805-dma-support-layerscape.patch 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750
  1. From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Mon, 25 Sep 2017 12:12:20 +0800
  4. Subject: [PATCH] dma: support layerscape
  5. This is a integrated patch for layerscape dma support.
  6. Signed-off-by: jiaheng.fan <[email protected]>
  7. Signed-off-by: Yangbo Lu <[email protected]>
  8. ---
  9. drivers/dma/Kconfig | 14 +
  10. drivers/dma/Makefile | 2 +
  11. drivers/dma/dpaa2-qdma/Kconfig | 8 +
  12. drivers/dma/dpaa2-qdma/Makefile | 8 +
  13. drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
  14. drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
  15. drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
  16. drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
  17. drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
  18. drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
  19. 10 files changed, 3678 insertions(+)
  20. create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
  21. create mode 100644 drivers/dma/dpaa2-qdma/Makefile
  22. create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  23. create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
  24. create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
  25. create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
  26. create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  27. create mode 100644 drivers/dma/fsl-qdma.c
  28. --- a/drivers/dma/Kconfig
  29. +++ b/drivers/dma/Kconfig
  30. @@ -192,6 +192,20 @@ config FSL_EDMA
  31. multiplexing capability for DMA request sources(slot).
  32. This module can be found on Freescale Vybrid and LS-1 SoCs.
  33. +config FSL_QDMA
  34. + tristate "Freescale qDMA engine support"
  35. + select DMA_ENGINE
  36. + select DMA_VIRTUAL_CHANNELS
  37. + select DMA_ENGINE_RAID
  38. + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
  39. + help
  40. + Support the Freescale qDMA engine with command queue and legacy mode.
  41. + Channel virtualization is supported through enqueuing of DMA jobs to,
  42. + or dequeuing DMA jobs from, different work queues.
  43. + This module can be found on Freescale LS SoCs.
  44. +
  45. +source drivers/dma/dpaa2-qdma/Kconfig
  46. +
  47. config FSL_RAID
  48. tristate "Freescale RAID engine Support"
  49. depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
  50. --- a/drivers/dma/Makefile
  51. +++ b/drivers/dma/Makefile
  52. @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
  53. obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
  54. obj-$(CONFIG_FSL_DMA) += fsldma.o
  55. obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
  56. +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
  57. +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
  58. obj-$(CONFIG_FSL_RAID) += fsl_raid.o
  59. obj-$(CONFIG_HSU_DMA) += hsu/
  60. obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
  61. --- /dev/null
  62. +++ b/drivers/dma/dpaa2-qdma/Kconfig
  63. @@ -0,0 +1,8 @@
  64. +menuconfig FSL_DPAA2_QDMA
  65. + tristate "NXP DPAA2 QDMA"
  66. + depends on FSL_MC_BUS && FSL_MC_DPIO
  67. + select DMA_ENGINE
  68. + select DMA_VIRTUAL_CHANNELS
  69. + ---help---
  70. + NXP Data Path Acceleration Architecture 2 QDMA driver,
  71. + using the NXP MC bus driver.
  72. --- /dev/null
  73. +++ b/drivers/dma/dpaa2-qdma/Makefile
  74. @@ -0,0 +1,8 @@
  75. +#
  76. +# Makefile for the NXP DPAA2 CAAM controllers
  77. +#
  78. +ccflags-y += -DVERSION=\"\"
  79. +
  80. +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
  81. +
  82. +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
  83. --- /dev/null
  84. +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  85. @@ -0,0 +1,986 @@
  86. +/*
  87. + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  88. + *
  89. + * Copyright 2015-2017 NXP Semiconductor, Inc.
  90. + * Author: Changming Huang <[email protected]>
  91. + *
  92. + * Driver for the NXP QDMA engine with QMan mode.
  93. + * Channel virtualization is supported through enqueuing of DMA jobs to,
  94. + * or dequeuing DMA jobs from different work queues with QMan portal.
  95. + * This module can be found on NXP LS2 SoCs.
  96. + *
  97. + * This program is free software; you can redistribute it and/or modify it
  98. + * under the terms of the GNU General Public License as published by the
  99. + * Free Software Foundation; either version 2 of the License, or (at your
  100. + * option) any later version.
  101. + */
  102. +
  103. +#include <linux/init.h>
  104. +#include <linux/module.h>
  105. +#include <linux/interrupt.h>
  106. +#include <linux/clk.h>
  107. +#include <linux/dma-mapping.h>
  108. +#include <linux/dmapool.h>
  109. +#include <linux/slab.h>
  110. +#include <linux/spinlock.h>
  111. +#include <linux/of.h>
  112. +#include <linux/of_device.h>
  113. +#include <linux/of_address.h>
  114. +#include <linux/of_irq.h>
  115. +#include <linux/of_dma.h>
  116. +#include <linux/types.h>
  117. +#include <linux/delay.h>
  118. +#include <linux/iommu.h>
  119. +
  120. +#include "../virt-dma.h"
  121. +
  122. +#include "../../../drivers/staging/fsl-mc/include/mc.h"
  123. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
  124. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
  125. +#include "fsl_dpdmai_cmd.h"
  126. +#include "fsl_dpdmai.h"
  127. +#include "dpaa2-qdma.h"
  128. +
  129. +static bool smmu_disable = true;
  130. +
  131. +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
  132. +{
  133. + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
  134. +}
  135. +
  136. +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  137. +{
  138. + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
  139. +}
  140. +
  141. +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
  142. +{
  143. + return 0;
  144. +}
  145. +
  146. +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
  147. +{
  148. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  149. + unsigned long flags;
  150. + LIST_HEAD(head);
  151. +
  152. + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
  153. + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
  154. + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
  155. +
  156. + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
  157. +}
  158. +
  159. +/*
  160. + * Request a command descriptor for enqueue.
  161. + */
  162. +static struct dpaa2_qdma_comp *
  163. +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
  164. +{
  165. + struct dpaa2_qdma_comp *comp_temp = NULL;
  166. + unsigned long flags;
  167. +
  168. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  169. + if (list_empty(&dpaa2_chan->comp_free)) {
  170. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  171. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  172. + if (!comp_temp)
  173. + goto err;
  174. + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
  175. + GFP_NOWAIT, &comp_temp->fd_bus_addr);
  176. + if (!comp_temp->fd_virt_addr)
  177. + goto err;
  178. +
  179. + comp_temp->fl_virt_addr =
  180. + (void *)((struct dpaa2_fd *)
  181. + comp_temp->fd_virt_addr + 1);
  182. + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
  183. + sizeof(struct dpaa2_fd);
  184. + comp_temp->desc_virt_addr =
  185. + (void *)((struct dpaa2_frame_list *)
  186. + comp_temp->fl_virt_addr + 3);
  187. + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
  188. + sizeof(struct dpaa2_frame_list) * 3;
  189. +
  190. + comp_temp->qchan = dpaa2_chan;
  191. + comp_temp->sg_blk_num = 0;
  192. + INIT_LIST_HEAD(&comp_temp->sg_src_head);
  193. + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
  194. + return comp_temp;
  195. + }
  196. + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
  197. + struct dpaa2_qdma_comp, list);
  198. + list_del(&comp_temp->list);
  199. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  200. +
  201. + comp_temp->qchan = dpaa2_chan;
  202. +err:
  203. + return comp_temp;
  204. +}
  205. +
  206. +static void dpaa2_qdma_populate_fd(uint32_t format,
  207. + struct dpaa2_qdma_comp *dpaa2_comp)
  208. +{
  209. + struct dpaa2_fd *fd;
  210. +
  211. + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
  212. + memset(fd, 0, sizeof(struct dpaa2_fd));
  213. +
  214. + /* fd populated */
  215. + fd->simple.addr = dpaa2_comp->fl_bus_addr;
  216. + /* Bypass memory translation, Frame list format, short length disable */
  217. + /* we need to disable BMT if fsl-mc use iova addr */
  218. + if (smmu_disable)
  219. + fd->simple.bpid = QMAN_FD_BMT_ENABLE;
  220. + fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
  221. +
  222. + fd->simple.frc = format | QDMA_SER_CTX;
  223. +}
  224. +
  225. +/* first frame list for descriptor buffer */
  226. +static void dpaa2_qdma_populate_first_framel(
  227. + struct dpaa2_frame_list *f_list,
  228. + struct dpaa2_qdma_comp *dpaa2_comp)
  229. +{
  230. + struct dpaa2_qdma_sd_d *sdd;
  231. +
  232. + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
  233. + memset(sdd, 0, 2 * (sizeof(*sdd)));
  234. + /* source and destination descriptor */
  235. + sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
  236. + sdd++;
  237. + sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
  238. +
  239. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  240. + /* first frame list to source descriptor */
  241. + f_list->addr_lo = dpaa2_comp->desc_bus_addr;
  242. + f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
  243. + f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
  244. + f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
  245. + if (smmu_disable)
  246. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  247. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  248. + f_list->f = 0; /* not the last frame list */
  249. +}
  250. +
  251. +/* source and destination frame list */
  252. +static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
  253. + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
  254. +{
  255. + /* source frame list to source buffer */
  256. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  257. + f_list->addr_lo = src;
  258. + f_list->addr_hi = (src >> 32);
  259. + f_list->data_len.data_len_sl0 = len;
  260. + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
  261. + if (smmu_disable)
  262. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  263. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  264. + f_list->f = 0; /* not the last frame list */
  265. +
  266. + f_list++;
  267. + /* destination frame list to destination buffer */
  268. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  269. + f_list->addr_lo = dst;
  270. + f_list->addr_hi = (dst >> 32);
  271. + f_list->data_len.data_len_sl0 = len;
  272. + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
  273. + if (smmu_disable)
  274. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  275. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  276. + f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
  277. +}
  278. +
  279. +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
  280. + struct dma_chan *chan, dma_addr_t dst,
  281. + dma_addr_t src, size_t len, unsigned long flags)
  282. +{
  283. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  284. + struct dpaa2_qdma_comp *dpaa2_comp;
  285. + struct dpaa2_frame_list *f_list;
  286. + uint32_t format;
  287. +
  288. + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  289. +
  290. +#ifdef LONG_FORMAT
  291. + format = QDMA_FD_LONG_FORMAT;
  292. +#else
  293. + format = QDMA_FD_SHORT_FORMAT;
  294. +#endif
  295. + /* populate Frame descriptor */
  296. + dpaa2_qdma_populate_fd(format, dpaa2_comp);
  297. +
  298. + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
  299. +
  300. +#ifdef LONG_FORMAT
  301. + /* first frame list for descriptor buffer (logn format) */
  302. + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
  303. +
  304. + f_list++;
  305. +#endif
  306. +
  307. + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
  308. +
  309. + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  310. +}
  311. +
  312. +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
  313. + struct dpaa2_qdma_comp *dpaa2_comp,
  314. + struct dpaa2_qdma_chan *dpaa2_chan)
  315. +{
  316. + struct qdma_sg_blk *sg_blk = NULL;
  317. + dma_addr_t phy_sgb;
  318. + unsigned long flags;
  319. +
  320. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  321. + if (list_empty(&dpaa2_chan->sgb_free)) {
  322. + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
  323. + dpaa2_chan->sg_blk_pool,
  324. + GFP_NOWAIT, &phy_sgb);
  325. + if (!sg_blk) {
  326. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  327. + return sg_blk;
  328. + }
  329. + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
  330. + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
  331. + } else {
  332. + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
  333. + struct qdma_sg_blk, list);
  334. + list_del(&sg_blk->list);
  335. + }
  336. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  337. +
  338. + return sg_blk;
  339. +}
  340. +
  341. +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
  342. + struct dpaa2_qdma_chan *dpaa2_chan,
  343. + struct dpaa2_qdma_comp *dpaa2_comp,
  344. + struct scatterlist *dst_sg, u32 dst_nents,
  345. + struct scatterlist *src_sg, u32 src_nents)
  346. +{
  347. + struct dpaa2_qdma_sg *src_sge;
  348. + struct dpaa2_qdma_sg *dst_sge;
  349. + struct qdma_sg_blk *sg_blk;
  350. + struct qdma_sg_blk *sg_blk_dst;
  351. + dma_addr_t src;
  352. + dma_addr_t dst;
  353. + uint32_t num;
  354. + uint32_t blocks;
  355. + uint32_t len = 0;
  356. + uint32_t total_len = 0;
  357. + int i, j = 0;
  358. +
  359. + num = min(dst_nents, src_nents);
  360. + blocks = num / (NUM_SG_PER_BLK - 1);
  361. + if (num % (NUM_SG_PER_BLK - 1))
  362. + blocks += 1;
  363. + if (dpaa2_comp->sg_blk_num < blocks) {
  364. + len = blocks - dpaa2_comp->sg_blk_num;
  365. + for (i = 0; i < len; i++) {
  366. + /* source sg blocks */
  367. + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
  368. + if (!sg_blk)
  369. + return 0;
  370. + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
  371. + /* destination sg blocks */
  372. + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
  373. + if (!sg_blk)
  374. + return 0;
  375. + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
  376. + }
  377. + } else {
  378. + len = dpaa2_comp->sg_blk_num - blocks;
  379. + for (i = 0; i < len; i++) {
  380. + spin_lock(&dpaa2_chan->queue_lock);
  381. + /* handle source sg blocks */
  382. + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
  383. + struct qdma_sg_blk, list);
  384. + list_del(&sg_blk->list);
  385. + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
  386. + /* handle destination sg blocks */
  387. + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
  388. + struct qdma_sg_blk, list);
  389. + list_del(&sg_blk->list);
  390. + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
  391. + spin_unlock(&dpaa2_chan->queue_lock);
  392. + }
  393. + }
  394. + dpaa2_comp->sg_blk_num = blocks;
  395. +
  396. + /* get the first source sg phy address */
  397. + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
  398. + struct qdma_sg_blk, list);
  399. + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
  400. + /* get the first destinaiton sg phy address */
  401. + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
  402. + struct qdma_sg_blk, list);
  403. + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
  404. +
  405. + for (i = 0; i < blocks; i++) {
  406. + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
  407. + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
  408. +
  409. + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
  410. + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
  411. + if (0 == len)
  412. + goto fetch;
  413. + total_len += len;
  414. + src = sg_dma_address(src_sg);
  415. + dst = sg_dma_address(dst_sg);
  416. +
  417. + /* source SG */
  418. + src_sge->addr_lo = src;
  419. + src_sge->addr_hi = (src >> 32);
  420. + src_sge->data_len.data_len_sl0 = len;
  421. + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
  422. + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
  423. + /* destination SG */
  424. + dst_sge->addr_lo = dst;
  425. + dst_sge->addr_hi = (dst >> 32);
  426. + dst_sge->data_len.data_len_sl0 = len;
  427. + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
  428. + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
  429. +fetch:
  430. + num--;
  431. + if (0 == num) {
  432. + src_sge->ctrl.f = QDMA_SG_F;
  433. + dst_sge->ctrl.f = QDMA_SG_F;
  434. + goto end;
  435. + }
  436. + dst_sg = sg_next(dst_sg);
  437. + src_sg = sg_next(src_sg);
  438. + src_sge++;
  439. + dst_sge++;
  440. + if (j == (NUM_SG_PER_BLK - 2)) {
  441. + /* for next blocks, extension */
  442. + sg_blk = list_next_entry(sg_blk, list);
  443. + sg_blk_dst = list_next_entry(sg_blk_dst, list);
  444. + src_sge->addr_lo = sg_blk->blk_bus_addr;
  445. + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
  446. + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
  447. + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
  448. + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
  449. + dst_sge->addr_hi =
  450. + sg_blk_dst->blk_bus_addr >> 32;
  451. + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
  452. + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
  453. + }
  454. + }
  455. + }
  456. +
  457. +end:
  458. + return total_len;
  459. +}
  460. +
  461. +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
  462. + struct dma_chan *chan,
  463. + struct scatterlist *dst_sg, u32 dst_nents,
  464. + struct scatterlist *src_sg, u32 src_nents,
  465. + unsigned long flags)
  466. +{
  467. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  468. + struct dpaa2_qdma_comp *dpaa2_comp;
  469. + struct dpaa2_frame_list *f_list;
  470. + struct device *dev = dpaa2_chan->qdma->priv->dev;
  471. + uint32_t total_len = 0;
  472. +
  473. + /* basic sanity checks */
  474. + if (dst_nents == 0 || src_nents == 0)
  475. + return NULL;
  476. +
  477. + if (dst_sg == NULL || src_sg == NULL)
  478. + return NULL;
  479. +
  480. + /* get the descriptors required */
  481. + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  482. +
  483. + /* populate Frame descriptor */
  484. + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
  485. +
  486. + /* prepare Scatter gather entry for source and destination */
  487. + total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
  488. + dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
  489. +
  490. + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
  491. + /* first frame list for descriptor buffer */
  492. + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
  493. + f_list++;
  494. + /* prepare Scatter gather entry for source and destination */
  495. + /* populate source and destination frame list table */
  496. + dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
  497. + dpaa2_comp->sge_src_bus_addr,
  498. + total_len, QDMA_FL_FMT_SGE);
  499. +
  500. + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  501. +}
  502. +
  503. +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
  504. + dma_cookie_t cookie, struct dma_tx_state *txstate)
  505. +{
  506. + return dma_cookie_status(chan, cookie, txstate);
  507. +}
  508. +
  509. +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
  510. +{
  511. +}
  512. +
  513. +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
  514. +{
  515. + struct dpaa2_qdma_comp *dpaa2_comp;
  516. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  517. + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  518. + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
  519. + struct virt_dma_desc *vdesc;
  520. + struct dpaa2_fd *fd;
  521. + int err;
  522. + unsigned long flags;
  523. +
  524. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  525. + spin_lock(&dpaa2_chan->vchan.lock);
  526. + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
  527. + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
  528. + if (!vdesc)
  529. + goto err_enqueue;
  530. + dpaa2_comp = to_fsl_qdma_comp(vdesc);
  531. +
  532. + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
  533. +
  534. + list_del(&vdesc->node);
  535. + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
  536. +
  537. + /* TOBO: priority hard-coded to zero */
  538. + err = dpaa2_io_service_enqueue_fq(NULL,
  539. + priv->tx_queue_attr[0].fqid, fd);
  540. + if (err) {
  541. + list_del(&dpaa2_comp->list);
  542. + list_add_tail(&dpaa2_comp->list,
  543. + &dpaa2_chan->comp_free);
  544. + }
  545. +
  546. + }
  547. +err_enqueue:
  548. + spin_unlock(&dpaa2_chan->vchan.lock);
  549. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  550. +}
  551. +
  552. +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
  553. +{
  554. + struct device *dev = &ls_dev->dev;
  555. + struct dpaa2_qdma_priv *priv;
  556. + struct dpaa2_qdma_priv_per_prio *ppriv;
  557. + uint8_t prio_def = DPDMAI_PRIO_NUM;
  558. + int err;
  559. + int i;
  560. +
  561. + priv = dev_get_drvdata(dev);
  562. +
  563. + priv->dev = dev;
  564. + priv->dpqdma_id = ls_dev->obj_desc.id;
  565. +
  566. + /*Get the handle for the DPDMAI this interface is associate with */
  567. + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
  568. + if (err) {
  569. + dev_err(dev, "dpdmai_open() failed\n");
  570. + return err;
  571. + }
  572. + dev_info(dev, "Opened dpdmai object successfully\n");
  573. +
  574. + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
  575. + &priv->dpdmai_attr);
  576. + if (err) {
  577. + dev_err(dev, "dpdmai_get_attributes() failed\n");
  578. + return err;
  579. + }
  580. +
  581. + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
  582. + dev_err(dev, "DPDMAI major version mismatch\n"
  583. + "Found %u.%u, supported version is %u.%u\n",
  584. + priv->dpdmai_attr.version.major,
  585. + priv->dpdmai_attr.version.minor,
  586. + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  587. + }
  588. +
  589. + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
  590. + dev_err(dev, "DPDMAI minor version mismatch\n"
  591. + "Found %u.%u, supported version is %u.%u\n",
  592. + priv->dpdmai_attr.version.major,
  593. + priv->dpdmai_attr.version.minor,
  594. + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  595. + }
  596. +
  597. + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
  598. + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
  599. + if (!ppriv) {
  600. + dev_err(dev, "kzalloc for ppriv failed\n");
  601. + return -1;
  602. + }
  603. + priv->ppriv = ppriv;
  604. +
  605. + for (i = 0; i < priv->num_pairs; i++) {
  606. + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  607. + i, &priv->rx_queue_attr[i]);
  608. + if (err) {
  609. + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
  610. + return err;
  611. + }
  612. + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
  613. +
  614. + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  615. + i, &priv->tx_queue_attr[i]);
  616. + if (err) {
  617. + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
  618. + return err;
  619. + }
  620. + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
  621. + ppriv->prio = i;
  622. + ppriv->priv = priv;
  623. + ppriv++;
  624. + }
  625. +
  626. + return 0;
  627. +}
  628. +
  629. +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
  630. +{
  631. + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
  632. + struct dpaa2_qdma_priv_per_prio, nctx);
  633. + struct dpaa2_qdma_priv *priv = ppriv->priv;
  634. + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
  635. + struct dpaa2_qdma_chan *qchan;
  636. + const struct dpaa2_fd *fd;
  637. + const struct dpaa2_fd *fd_eq;
  638. + struct dpaa2_dq *dq;
  639. + int err;
  640. + int is_last = 0;
  641. + uint8_t status;
  642. + int i;
  643. + int found;
  644. + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
  645. +
  646. + do {
  647. + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
  648. + ppriv->store);
  649. + } while (err);
  650. +
  651. + while (!is_last) {
  652. + do {
  653. + dq = dpaa2_io_store_next(ppriv->store, &is_last);
  654. + } while (!is_last && !dq);
  655. + if (!dq) {
  656. + dev_err(priv->dev, "FQID returned no valid frames!\n");
  657. + continue;
  658. + }
  659. +
  660. + /* obtain FD and process the error */
  661. + fd = dpaa2_dq_fd(dq);
  662. + status = fd->simple.ctrl & 0xff;
  663. + if (status)
  664. + dev_err(priv->dev, "FD error occurred\n");
  665. + found = 0;
  666. + for (i = 0; i < n_chans; i++) {
  667. + qchan = &priv->dpaa2_qdma->chans[i];
  668. + spin_lock(&qchan->queue_lock);
  669. + if (list_empty(&qchan->comp_used)) {
  670. + spin_unlock(&qchan->queue_lock);
  671. + continue;
  672. + }
  673. + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
  674. + &qchan->comp_used, list) {
  675. + fd_eq = (struct dpaa2_fd *)
  676. + dpaa2_comp->fd_virt_addr;
  677. +
  678. + if (fd_eq->simple.addr ==
  679. + fd->simple.addr) {
  680. +
  681. + list_del(&dpaa2_comp->list);
  682. + list_add_tail(&dpaa2_comp->list,
  683. + &qchan->comp_free);
  684. +
  685. + spin_lock(&qchan->vchan.lock);
  686. + vchan_cookie_complete(
  687. + &dpaa2_comp->vdesc);
  688. + spin_unlock(&qchan->vchan.lock);
  689. + found = 1;
  690. + break;
  691. + }
  692. + }
  693. + spin_unlock(&qchan->queue_lock);
  694. + if (found)
  695. + break;
  696. + }
  697. + }
  698. +
  699. + dpaa2_io_service_rearm(NULL, ctx);
  700. +}
  701. +
  702. +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
  703. +{
  704. + int err, i, num;
  705. + struct device *dev = priv->dev;
  706. + struct dpaa2_qdma_priv_per_prio *ppriv;
  707. +
  708. + num = priv->num_pairs;
  709. + ppriv = priv->ppriv;
  710. + for (i = 0; i < num; i++) {
  711. + ppriv->nctx.is_cdan = 0;
  712. + ppriv->nctx.desired_cpu = 1;
  713. + ppriv->nctx.id = ppriv->rsp_fqid;
  714. + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
  715. + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
  716. + if (err) {
  717. + dev_err(dev, "Notification register failed\n");
  718. + goto err_service;
  719. + }
  720. +
  721. + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
  722. + dev);
  723. + if (!ppriv->store) {
  724. + dev_err(dev, "dpaa2_io_store_create() failed\n");
  725. + goto err_store;
  726. + }
  727. +
  728. + ppriv++;
  729. + }
  730. + return 0;
  731. +
  732. +err_store:
  733. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  734. +err_service:
  735. + ppriv--;
  736. + while (ppriv >= priv->ppriv) {
  737. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  738. + dpaa2_io_store_destroy(ppriv->store);
  739. + ppriv--;
  740. + }
  741. + return -1;
  742. +}
  743. +
  744. +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
  745. +{
  746. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  747. + int i;
  748. +
  749. + for (i = 0; i < priv->num_pairs; i++) {
  750. + dpaa2_io_store_destroy(ppriv->store);
  751. + ppriv++;
  752. + }
  753. +}
  754. +
  755. +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
  756. +{
  757. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  758. + int i;
  759. +
  760. + for (i = 0; i < priv->num_pairs; i++) {
  761. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  762. + ppriv++;
  763. + }
  764. +}
  765. +
  766. +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
  767. +{
  768. + int err;
  769. + struct dpdmai_rx_queue_cfg rx_queue_cfg;
  770. + struct device *dev = priv->dev;
  771. + struct dpaa2_qdma_priv_per_prio *ppriv;
  772. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  773. + int i, num;
  774. +
  775. + num = priv->num_pairs;
  776. + ppriv = priv->ppriv;
  777. + for (i = 0; i < num; i++) {
  778. + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
  779. + DPDMAI_QUEUE_OPT_DEST;
  780. + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
  781. + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
  782. + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
  783. + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
  784. + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  785. + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
  786. + if (err) {
  787. + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
  788. + return err;
  789. + }
  790. +
  791. + ppriv++;
  792. + }
  793. +
  794. + return 0;
  795. +}
  796. +
  797. +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
  798. +{
  799. + int err = 0;
  800. + struct device *dev = priv->dev;
  801. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  802. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  803. + int i;
  804. +
  805. + for (i = 0; i < priv->num_pairs; i++) {
  806. + ppriv->nctx.qman64 = 0;
  807. + ppriv->nctx.dpio_id = 0;
  808. + ppriv++;
  809. + }
  810. +
  811. + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
  812. + if (err)
  813. + dev_err(dev, "dpdmai_reset() failed\n");
  814. +
  815. + return err;
  816. +}
  817. +
  818. +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
  819. + struct list_head *head)
  820. +{
  821. + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
  822. + /* free the QDMA SG pool block */
  823. + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
  824. + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
  825. + sgb_tmp->blk_virt_addr - 1);
  826. + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
  827. + - sizeof(*sgb_tmp);
  828. + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
  829. + sgb_tmp->blk_bus_addr);
  830. + }
  831. +
  832. +}
  833. +
  834. +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
  835. + struct list_head *head)
  836. +{
  837. + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
  838. + /* free the QDMA comp resource */
  839. + list_for_each_entry_safe(comp_tmp, _comp_tmp,
  840. + head, list) {
  841. + dma_pool_free(qchan->fd_pool,
  842. + comp_tmp->fd_virt_addr,
  843. + comp_tmp->fd_bus_addr);
  844. + /* free the SG source block on comp */
  845. + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
  846. + /* free the SG destination block on comp */
  847. + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
  848. + list_del(&comp_tmp->list);
  849. + kfree(comp_tmp);
  850. + }
  851. +
  852. +}
  853. +
  854. +static void __cold dpaa2_dpdmai_free_channels(
  855. + struct dpaa2_qdma_engine *dpaa2_qdma)
  856. +{
  857. + struct dpaa2_qdma_chan *qchan;
  858. + int num, i;
  859. +
  860. + num = dpaa2_qdma->n_chans;
  861. + for (i = 0; i < num; i++) {
  862. + qchan = &dpaa2_qdma->chans[i];
  863. + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
  864. + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
  865. + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
  866. + dma_pool_destroy(qchan->fd_pool);
  867. + dma_pool_destroy(qchan->sg_blk_pool);
  868. + }
  869. +}
  870. +
  871. +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  872. +{
  873. + struct dpaa2_qdma_chan *dpaa2_chan;
  874. + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
  875. + int i;
  876. +
  877. + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
  878. + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
  879. + dpaa2_chan = &dpaa2_qdma->chans[i];
  880. + dpaa2_chan->qdma = dpaa2_qdma;
  881. + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
  882. + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
  883. +
  884. + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
  885. + dev, FD_POOL_SIZE, 32, 0);
  886. + if (!dpaa2_chan->fd_pool)
  887. + return -1;
  888. + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
  889. + dev, SG_POOL_SIZE, 32, 0);
  890. + if (!dpaa2_chan->sg_blk_pool)
  891. + return -1;
  892. +
  893. + spin_lock_init(&dpaa2_chan->queue_lock);
  894. + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
  895. + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
  896. + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
  897. + }
  898. + return 0;
  899. +}
  900. +
  901. +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
  902. +{
  903. + struct dpaa2_qdma_priv *priv;
  904. + struct device *dev = &dpdmai_dev->dev;
  905. + struct dpaa2_qdma_engine *dpaa2_qdma;
  906. + int err;
  907. +
  908. + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  909. + if (!priv)
  910. + return -ENOMEM;
  911. + dev_set_drvdata(dev, priv);
  912. + priv->dpdmai_dev = dpdmai_dev;
  913. +
  914. + priv->iommu_domain = iommu_get_domain_for_dev(dev);
  915. + if (priv->iommu_domain)
  916. + smmu_disable = false;
  917. +
  918. + /* obtain a MC portal */
  919. + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
  920. + if (err) {
  921. + dev_err(dev, "MC portal allocation failed\n");
  922. + goto err_mcportal;
  923. + }
  924. +
  925. + /* DPDMAI initialization */
  926. + err = dpaa2_qdma_setup(dpdmai_dev);
  927. + if (err) {
  928. + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
  929. + goto err_dpdmai_setup;
  930. + }
  931. +
  932. + /* DPIO */
  933. + err = dpaa2_qdma_dpio_setup(priv);
  934. + if (err) {
  935. + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
  936. + goto err_dpio_setup;
  937. + }
  938. +
  939. + /* DPDMAI binding to DPIO */
  940. + err = dpaa2_dpdmai_bind(priv);
  941. + if (err) {
  942. + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
  943. + goto err_bind;
  944. + }
  945. +
  946. + /* DPDMAI enable */
  947. + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  948. + if (err) {
  949. + dev_err(dev, "dpdmai_enable() faile\n");
  950. + goto err_enable;
  951. + }
  952. +
  953. + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
  954. + if (!dpaa2_qdma) {
  955. + err = -ENOMEM;
  956. + goto err_eng;
  957. + }
  958. +
  959. + priv->dpaa2_qdma = dpaa2_qdma;
  960. + dpaa2_qdma->priv = priv;
  961. +
  962. + dpaa2_qdma->n_chans = NUM_CH;
  963. +
  964. + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
  965. + if (err) {
  966. + dev_err(dev, "QDMA alloc channels faile\n");
  967. + goto err_reg;
  968. + }
  969. +
  970. + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
  971. + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
  972. + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
  973. + dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
  974. +
  975. + dpaa2_qdma->dma_dev.dev = dev;
  976. + dpaa2_qdma->dma_dev.device_alloc_chan_resources
  977. + = dpaa2_qdma_alloc_chan_resources;
  978. + dpaa2_qdma->dma_dev.device_free_chan_resources
  979. + = dpaa2_qdma_free_chan_resources;
  980. + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
  981. + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
  982. + dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
  983. + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
  984. +
  985. + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
  986. + if (err) {
  987. + dev_err(dev, "Can't register NXP QDMA engine.\n");
  988. + goto err_reg;
  989. + }
  990. +
  991. + return 0;
  992. +
  993. +err_reg:
  994. + dpaa2_dpdmai_free_channels(dpaa2_qdma);
  995. + kfree(dpaa2_qdma);
  996. +err_eng:
  997. + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  998. +err_enable:
  999. + dpaa2_dpdmai_dpio_unbind(priv);
  1000. +err_bind:
  1001. + dpaa2_dpmai_store_free(priv);
  1002. + dpaa2_dpdmai_dpio_free(priv);
  1003. +err_dpio_setup:
  1004. + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
  1005. +err_dpdmai_setup:
  1006. + fsl_mc_portal_free(priv->mc_io);
  1007. +err_mcportal:
  1008. + kfree(priv->ppriv);
  1009. + kfree(priv);
  1010. + dev_set_drvdata(dev, NULL);
  1011. + return err;
  1012. +}
  1013. +
  1014. +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
  1015. +{
  1016. + struct device *dev;
  1017. + struct dpaa2_qdma_priv *priv;
  1018. + struct dpaa2_qdma_engine *dpaa2_qdma;
  1019. +
  1020. + dev = &ls_dev->dev;
  1021. + priv = dev_get_drvdata(dev);
  1022. + dpaa2_qdma = priv->dpaa2_qdma;
  1023. +
  1024. + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  1025. + dpaa2_dpdmai_dpio_unbind(priv);
  1026. + dpaa2_dpmai_store_free(priv);
  1027. + dpaa2_dpdmai_dpio_free(priv);
  1028. + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  1029. + fsl_mc_portal_free(priv->mc_io);
  1030. + dev_set_drvdata(dev, NULL);
  1031. + dpaa2_dpdmai_free_channels(dpaa2_qdma);
  1032. +
  1033. + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
  1034. + kfree(priv);
  1035. + kfree(dpaa2_qdma);
  1036. +
  1037. + return 0;
  1038. +}
  1039. +
  1040. +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
  1041. + {
  1042. + .vendor = FSL_MC_VENDOR_FREESCALE,
  1043. + .obj_type = "dpdmai",
  1044. + },
  1045. + { .vendor = 0x0 }
  1046. +};
  1047. +
  1048. +static struct fsl_mc_driver dpaa2_qdma_driver = {
  1049. + .driver = {
  1050. + .name = "dpaa2-qdma",
  1051. + .owner = THIS_MODULE,
  1052. + },
  1053. + .probe = dpaa2_qdma_probe,
  1054. + .remove = dpaa2_qdma_remove,
  1055. + .match_id_table = dpaa2_qdma_id_table
  1056. +};
  1057. +
  1058. +static int __init dpaa2_qdma_driver_init(void)
  1059. +{
  1060. + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
  1061. +}
  1062. +late_initcall(dpaa2_qdma_driver_init);
  1063. +
  1064. +static void __exit fsl_qdma_exit(void)
  1065. +{
  1066. + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
  1067. +}
  1068. +module_exit(fsl_qdma_exit);
  1069. +
  1070. +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
  1071. +MODULE_LICENSE("Dual BSD/GPL");
  1072. --- /dev/null
  1073. +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
  1074. @@ -0,0 +1,262 @@
  1075. +/* Copyright 2015 NXP Semiconductor Inc.
  1076. + *
  1077. + * Redistribution and use in source and binary forms, with or without
  1078. + * modification, are permitted provided that the following conditions are met:
  1079. + * * Redistributions of source code must retain the above copyright
  1080. + * notice, this list of conditions and the following disclaimer.
  1081. + * * Redistributions in binary form must reproduce the above copyright
  1082. + * notice, this list of conditions and the following disclaimer in the
  1083. + * documentation and/or other materials provided with the distribution.
  1084. + * * Neither the name of NXP Semiconductor nor the
  1085. + * names of its contributors may be used to endorse or promote products
  1086. + * derived from this software without specific prior written permission.
  1087. + *
  1088. + *
  1089. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1090. + * GNU General Public License ("GPL") as published by the Free Software
  1091. + * Foundation, either version 2 of that License or (at your option) any
  1092. + * later version.
  1093. + *
  1094. + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
  1095. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1096. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1097. + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
  1098. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1099. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1100. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1101. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1102. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1103. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1104. + */
  1105. +
  1106. +#ifndef __DPAA2_QDMA_H
  1107. +#define __DPAA2_QDMA_H
  1108. +
  1109. +#define LONG_FORMAT 1
  1110. +
  1111. +#define DPAA2_QDMA_STORE_SIZE 16
  1112. +#define NUM_CH 8
  1113. +#define NUM_SG_PER_BLK 16
  1114. +
  1115. +#define QDMA_DMR_OFFSET 0x0
  1116. +#define QDMA_DQ_EN (0 << 30)
  1117. +#define QDMA_DQ_DIS (1 << 30)
  1118. +
  1119. +#define QDMA_DSR_M_OFFSET 0x10004
  1120. +
  1121. +struct dpaa2_qdma_sd_d {
  1122. + uint32_t rsv:32;
  1123. + union {
  1124. + struct {
  1125. + uint32_t ssd:12; /* souce stride distance */
  1126. + uint32_t sss:12; /* souce stride size */
  1127. + uint32_t rsv1:8;
  1128. + } sdf;
  1129. + struct {
  1130. + uint32_t dsd:12; /* Destination stride distance */
  1131. + uint32_t dss:12; /* Destination stride size */
  1132. + uint32_t rsv2:8;
  1133. + } ddf;
  1134. + } df;
  1135. + uint32_t rbpcmd; /* Route-by-port command */
  1136. + uint32_t cmd;
  1137. +} __attribute__((__packed__));
  1138. +/* Source descriptor command read transaction type for RBP=0:
  1139. + coherent copy of cacheable memory */
  1140. +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
  1141. +/* Destination descriptor command write transaction type for RBP=0:
  1142. + coherent copy of cacheable memory */
  1143. +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
  1144. +
  1145. +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
  1146. +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
  1147. +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
  1148. +#define QDMA_SG_SL_SHORT 0x1 /* short length */
  1149. +#define QDMA_SG_SL_LONG 0x0 /* short length */
  1150. +#define QDMA_SG_F 0x1 /* last sg entry */
  1151. +struct dpaa2_qdma_sg {
  1152. + uint32_t addr_lo; /* address 0:31 */
  1153. + uint32_t addr_hi:17; /* address 32:48 */
  1154. + uint32_t rsv:15;
  1155. + union {
  1156. + uint32_t data_len_sl0; /* SL=0, the long format */
  1157. + struct {
  1158. + uint32_t len:17; /* SL=1, the short format */
  1159. + uint32_t reserve:3;
  1160. + uint32_t sf:1;
  1161. + uint32_t sr:1;
  1162. + uint32_t size:10; /* buff size */
  1163. + } data_len_sl1;
  1164. + } data_len; /* AVAIL_LENGTH */
  1165. + struct {
  1166. + uint32_t bpid:14;
  1167. + uint32_t ivp:1;
  1168. + uint32_t mbt:1;
  1169. + uint32_t offset:12;
  1170. + uint32_t fmt:2;
  1171. + uint32_t sl:1;
  1172. + uint32_t f:1;
  1173. + } ctrl;
  1174. +} __attribute__((__packed__));
  1175. +
  1176. +#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
  1177. +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
  1178. +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
  1179. +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
  1180. +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
  1181. +
  1182. +#define QDMA_SB_FRAME (0 << 28) /* single frame */
  1183. +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
  1184. +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
  1185. +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
  1186. +
  1187. +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
  1188. +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
  1189. +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
  1190. +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
  1191. +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
  1192. +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
  1193. +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
  1194. +
  1195. +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
  1196. +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
  1197. +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
  1198. +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
  1199. +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
  1200. +
  1201. +#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
  1202. +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
  1203. +#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
  1204. +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
  1205. +#define QDMA_FL_SL_LONG 0x0 /* long length */
  1206. +#define QDMA_FL_SL_SHORT 0x1 /* short length */
  1207. +#define QDMA_FL_F 0x1 /* last frame list bit */
  1208. +/*Description of Frame list table structure*/
  1209. +struct dpaa2_frame_list {
  1210. + uint32_t addr_lo; /* lower 32 bits of address */
  1211. + uint32_t addr_hi:17; /* upper 17 bits of address */
  1212. + uint32_t resrvd:15;
  1213. + union {
  1214. + uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
  1215. + struct {
  1216. + uint32_t data_len:18; /* IF SL=1; length is 18bit */
  1217. + uint32_t resrvd:2;
  1218. + uint32_t mem:12; /* Valid only when SL=1 */
  1219. + } data_len_sl1;
  1220. + } data_len;
  1221. + /* word 4 */
  1222. + uint32_t bpid:14; /* Frame buffer pool ID */
  1223. + uint32_t ivp:1; /* Invalid Pool ID. */
  1224. + uint32_t bmt:1; /* Bypass Memory Translation */
  1225. + uint32_t offset:12; /* Frame offset */
  1226. + uint32_t fmt:2; /* Frame Format */
  1227. + uint32_t sl:1; /* Short Length */
  1228. + uint32_t f:1; /* Final bit */
  1229. +
  1230. + uint32_t frc; /* Frame Context */
  1231. + /* word 6 */
  1232. + uint32_t err:8; /* Frame errors */
  1233. + uint32_t resrvd0:8;
  1234. + uint32_t asal:4; /* accelerator-specific annotation length */
  1235. + uint32_t resrvd1:1;
  1236. + uint32_t ptv2:1;
  1237. + uint32_t ptv1:1;
  1238. + uint32_t pta:1; /* pass-through annotation */
  1239. + uint32_t resrvd2:8;
  1240. +
  1241. + uint32_t flc_lo; /* lower 32 bits fo flow context */
  1242. + uint32_t flc_hi; /* higher 32 bits fo flow context */
  1243. +} __attribute__((__packed__));
  1244. +
  1245. +struct dpaa2_qdma_chan {
  1246. + struct virt_dma_chan vchan;
  1247. + struct virt_dma_desc vdesc;
  1248. + enum dma_status status;
  1249. + struct dpaa2_qdma_engine *qdma;
  1250. +
  1251. + struct mutex dpaa2_queue_mutex;
  1252. + spinlock_t queue_lock;
  1253. + struct dma_pool *fd_pool;
  1254. + struct dma_pool *sg_blk_pool;
  1255. +
  1256. + struct list_head comp_used;
  1257. + struct list_head comp_free;
  1258. +
  1259. + struct list_head sgb_free;
  1260. +};
  1261. +
  1262. +struct qdma_sg_blk {
  1263. + dma_addr_t blk_bus_addr;
  1264. + void *blk_virt_addr;
  1265. + struct list_head list;
  1266. +};
  1267. +
  1268. +struct dpaa2_qdma_comp {
  1269. + dma_addr_t fd_bus_addr;
  1270. + dma_addr_t fl_bus_addr;
  1271. + dma_addr_t desc_bus_addr;
  1272. + dma_addr_t sge_src_bus_addr;
  1273. + dma_addr_t sge_dst_bus_addr;
  1274. + void *fd_virt_addr;
  1275. + void *fl_virt_addr;
  1276. + void *desc_virt_addr;
  1277. + void *sg_src_virt_addr;
  1278. + void *sg_dst_virt_addr;
  1279. + struct qdma_sg_blk *sg_blk;
  1280. + uint32_t sg_blk_num;
  1281. + struct list_head sg_src_head;
  1282. + struct list_head sg_dst_head;
  1283. + struct dpaa2_qdma_chan *qchan;
  1284. + struct virt_dma_desc vdesc;
  1285. + struct list_head list;
  1286. +};
  1287. +
  1288. +struct dpaa2_qdma_engine {
  1289. + struct dma_device dma_dev;
  1290. + u32 n_chans;
  1291. + struct dpaa2_qdma_chan chans[NUM_CH];
  1292. +
  1293. + struct dpaa2_qdma_priv *priv;
  1294. +};
  1295. +
  1296. +/*
  1297. + * dpaa2_qdma_priv - driver private data
  1298. + */
  1299. +struct dpaa2_qdma_priv {
  1300. + int dpqdma_id;
  1301. +
  1302. + struct iommu_domain *iommu_domain;
  1303. + struct dpdmai_attr dpdmai_attr;
  1304. + struct device *dev;
  1305. + struct fsl_mc_io *mc_io;
  1306. + struct fsl_mc_device *dpdmai_dev;
  1307. +
  1308. + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
  1309. + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
  1310. +
  1311. + uint8_t num_pairs;
  1312. +
  1313. + struct dpaa2_qdma_engine *dpaa2_qdma;
  1314. + struct dpaa2_qdma_priv_per_prio *ppriv;
  1315. +};
  1316. +
  1317. +struct dpaa2_qdma_priv_per_prio {
  1318. + int req_fqid;
  1319. + int rsp_fqid;
  1320. + int prio;
  1321. +
  1322. + struct dpaa2_io_store *store;
  1323. + struct dpaa2_io_notification_ctx nctx;
  1324. +
  1325. + struct dpaa2_qdma_priv *priv;
  1326. +};
  1327. +
  1328. +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
  1329. +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
  1330. + sizeof(struct dpaa2_frame_list) * 3 + \
  1331. + sizeof(struct dpaa2_qdma_sd_d) * 2)
  1332. +
  1333. +/* qdma_sg_blk + 16 SGs */
  1334. +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
  1335. + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
  1336. +#endif /* __DPAA2_QDMA_H */
  1337. --- /dev/null
  1338. +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
  1339. @@ -0,0 +1,454 @@
  1340. +/* Copyright 2013-2015 Freescale Semiconductor Inc.
  1341. + *
  1342. + * Redistribution and use in source and binary forms, with or without
  1343. + * modification, are permitted provided that the following conditions are met:
  1344. + * * Redistributions of source code must retain the above copyright
  1345. + * notice, this list of conditions and the following disclaimer.
  1346. + * * Redistributions in binary form must reproduce the above copyright
  1347. + * notice, this list of conditions and the following disclaimer in the
  1348. + * documentation and/or other materials provided with the distribution.
  1349. + * * Neither the name of the above-listed copyright holders nor the
  1350. + * names of any contributors may be used to endorse or promote products
  1351. + * derived from this software without specific prior written permission.
  1352. + *
  1353. + *
  1354. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1355. + * GNU General Public License ("GPL") as published by the Free Software
  1356. + * Foundation, either version 2 of that License or (at your option) any
  1357. + * later version.
  1358. + *
  1359. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  1360. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  1361. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  1362. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  1363. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  1364. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  1365. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  1366. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  1367. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  1368. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  1369. + * POSSIBILITY OF SUCH DAMAGE.
  1370. + */
  1371. +#include <linux/types.h>
  1372. +#include <linux/io.h>
  1373. +#include "fsl_dpdmai.h"
  1374. +#include "fsl_dpdmai_cmd.h"
  1375. +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
  1376. +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
  1377. +
  1378. +int dpdmai_open(struct fsl_mc_io *mc_io,
  1379. + uint32_t cmd_flags,
  1380. + int dpdmai_id,
  1381. + uint16_t *token)
  1382. +{
  1383. + struct mc_command cmd = { 0 };
  1384. + int err;
  1385. +
  1386. + /* prepare command */
  1387. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
  1388. + cmd_flags,
  1389. + 0);
  1390. + DPDMAI_CMD_OPEN(cmd, dpdmai_id);
  1391. +
  1392. + /* send command to mc*/
  1393. + err = mc_send_command(mc_io, &cmd);
  1394. + if (err)
  1395. + return err;
  1396. +
  1397. + /* retrieve response parameters */
  1398. + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
  1399. +
  1400. + return 0;
  1401. +}
  1402. +
  1403. +int dpdmai_close(struct fsl_mc_io *mc_io,
  1404. + uint32_t cmd_flags,
  1405. + uint16_t token)
  1406. +{
  1407. + struct mc_command cmd = { 0 };
  1408. +
  1409. + /* prepare command */
  1410. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
  1411. + cmd_flags, token);
  1412. +
  1413. + /* send command to mc*/
  1414. + return mc_send_command(mc_io, &cmd);
  1415. +}
  1416. +
  1417. +int dpdmai_create(struct fsl_mc_io *mc_io,
  1418. + uint32_t cmd_flags,
  1419. + const struct dpdmai_cfg *cfg,
  1420. + uint16_t *token)
  1421. +{
  1422. + struct mc_command cmd = { 0 };
  1423. + int err;
  1424. +
  1425. + /* prepare command */
  1426. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
  1427. + cmd_flags,
  1428. + 0);
  1429. + DPDMAI_CMD_CREATE(cmd, cfg);
  1430. +
  1431. + /* send command to mc*/
  1432. + err = mc_send_command(mc_io, &cmd);
  1433. + if (err)
  1434. + return err;
  1435. +
  1436. + /* retrieve response parameters */
  1437. + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
  1438. +
  1439. + return 0;
  1440. +}
  1441. +
  1442. +int dpdmai_destroy(struct fsl_mc_io *mc_io,
  1443. + uint32_t cmd_flags,
  1444. + uint16_t token)
  1445. +{
  1446. + struct mc_command cmd = { 0 };
  1447. +
  1448. + /* prepare command */
  1449. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
  1450. + cmd_flags,
  1451. + token);
  1452. +
  1453. + /* send command to mc*/
  1454. + return mc_send_command(mc_io, &cmd);
  1455. +}
  1456. +
  1457. +int dpdmai_enable(struct fsl_mc_io *mc_io,
  1458. + uint32_t cmd_flags,
  1459. + uint16_t token)
  1460. +{
  1461. + struct mc_command cmd = { 0 };
  1462. +
  1463. + /* prepare command */
  1464. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
  1465. + cmd_flags,
  1466. + token);
  1467. +
  1468. + /* send command to mc*/
  1469. + return mc_send_command(mc_io, &cmd);
  1470. +}
  1471. +
  1472. +int dpdmai_disable(struct fsl_mc_io *mc_io,
  1473. + uint32_t cmd_flags,
  1474. + uint16_t token)
  1475. +{
  1476. + struct mc_command cmd = { 0 };
  1477. +
  1478. + /* prepare command */
  1479. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
  1480. + cmd_flags,
  1481. + token);
  1482. +
  1483. + /* send command to mc*/
  1484. + return mc_send_command(mc_io, &cmd);
  1485. +}
  1486. +
  1487. +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
  1488. + uint32_t cmd_flags,
  1489. + uint16_t token,
  1490. + int *en)
  1491. +{
  1492. + struct mc_command cmd = { 0 };
  1493. + int err;
  1494. + /* prepare command */
  1495. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
  1496. + cmd_flags,
  1497. + token);
  1498. +
  1499. + /* send command to mc*/
  1500. + err = mc_send_command(mc_io, &cmd);
  1501. + if (err)
  1502. + return err;
  1503. +
  1504. + /* retrieve response parameters */
  1505. + DPDMAI_RSP_IS_ENABLED(cmd, *en);
  1506. +
  1507. + return 0;
  1508. +}
  1509. +
  1510. +int dpdmai_reset(struct fsl_mc_io *mc_io,
  1511. + uint32_t cmd_flags,
  1512. + uint16_t token)
  1513. +{
  1514. + struct mc_command cmd = { 0 };
  1515. +
  1516. + /* prepare command */
  1517. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
  1518. + cmd_flags,
  1519. + token);
  1520. +
  1521. + /* send command to mc*/
  1522. + return mc_send_command(mc_io, &cmd);
  1523. +}
  1524. +
  1525. +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
  1526. + uint32_t cmd_flags,
  1527. + uint16_t token,
  1528. + uint8_t irq_index,
  1529. + int *type,
  1530. + struct dpdmai_irq_cfg *irq_cfg)
  1531. +{
  1532. + struct mc_command cmd = { 0 };
  1533. + int err;
  1534. +
  1535. + /* prepare command */
  1536. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
  1537. + cmd_flags,
  1538. + token);
  1539. + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
  1540. +
  1541. + /* send command to mc*/
  1542. + err = mc_send_command(mc_io, &cmd);
  1543. + if (err)
  1544. + return err;
  1545. +
  1546. + /* retrieve response parameters */
  1547. + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
  1548. +
  1549. + return 0;
  1550. +}
  1551. +
  1552. +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
  1553. + uint32_t cmd_flags,
  1554. + uint16_t token,
  1555. + uint8_t irq_index,
  1556. + struct dpdmai_irq_cfg *irq_cfg)
  1557. +{
  1558. + struct mc_command cmd = { 0 };
  1559. +
  1560. + /* prepare command */
  1561. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
  1562. + cmd_flags,
  1563. + token);
  1564. + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
  1565. +
  1566. + /* send command to mc*/
  1567. + return mc_send_command(mc_io, &cmd);
  1568. +}
  1569. +
  1570. +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
  1571. + uint32_t cmd_flags,
  1572. + uint16_t token,
  1573. + uint8_t irq_index,
  1574. + uint8_t *en)
  1575. +{
  1576. + struct mc_command cmd = { 0 };
  1577. + int err;
  1578. +
  1579. + /* prepare command */
  1580. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
  1581. + cmd_flags,
  1582. + token);
  1583. + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
  1584. +
  1585. + /* send command to mc*/
  1586. + err = mc_send_command(mc_io, &cmd);
  1587. + if (err)
  1588. + return err;
  1589. +
  1590. + /* retrieve response parameters */
  1591. + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
  1592. +
  1593. + return 0;
  1594. +}
  1595. +
  1596. +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
  1597. + uint32_t cmd_flags,
  1598. + uint16_t token,
  1599. + uint8_t irq_index,
  1600. + uint8_t en)
  1601. +{
  1602. + struct mc_command cmd = { 0 };
  1603. +
  1604. + /* prepare command */
  1605. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
  1606. + cmd_flags,
  1607. + token);
  1608. + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
  1609. +
  1610. + /* send command to mc*/
  1611. + return mc_send_command(mc_io, &cmd);
  1612. +}
  1613. +
  1614. +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
  1615. + uint32_t cmd_flags,
  1616. + uint16_t token,
  1617. + uint8_t irq_index,
  1618. + uint32_t *mask)
  1619. +{
  1620. + struct mc_command cmd = { 0 };
  1621. + int err;
  1622. +
  1623. + /* prepare command */
  1624. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
  1625. + cmd_flags,
  1626. + token);
  1627. + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
  1628. +
  1629. + /* send command to mc*/
  1630. + err = mc_send_command(mc_io, &cmd);
  1631. + if (err)
  1632. + return err;
  1633. +
  1634. + /* retrieve response parameters */
  1635. + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
  1636. +
  1637. + return 0;
  1638. +}
  1639. +
  1640. +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
  1641. + uint32_t cmd_flags,
  1642. + uint16_t token,
  1643. + uint8_t irq_index,
  1644. + uint32_t mask)
  1645. +{
  1646. + struct mc_command cmd = { 0 };
  1647. +
  1648. + /* prepare command */
  1649. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
  1650. + cmd_flags,
  1651. + token);
  1652. + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
  1653. +
  1654. + /* send command to mc*/
  1655. + return mc_send_command(mc_io, &cmd);
  1656. +}
  1657. +
  1658. +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
  1659. + uint32_t cmd_flags,
  1660. + uint16_t token,
  1661. + uint8_t irq_index,
  1662. + uint32_t *status)
  1663. +{
  1664. + struct mc_command cmd = { 0 };
  1665. + int err;
  1666. +
  1667. + /* prepare command */
  1668. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
  1669. + cmd_flags,
  1670. + token);
  1671. + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
  1672. +
  1673. + /* send command to mc*/
  1674. + err = mc_send_command(mc_io, &cmd);
  1675. + if (err)
  1676. + return err;
  1677. +
  1678. + /* retrieve response parameters */
  1679. + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
  1680. +
  1681. + return 0;
  1682. +}
  1683. +
  1684. +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
  1685. + uint32_t cmd_flags,
  1686. + uint16_t token,
  1687. + uint8_t irq_index,
  1688. + uint32_t status)
  1689. +{
  1690. + struct mc_command cmd = { 0 };
  1691. +
  1692. + /* prepare command */
  1693. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
  1694. + cmd_flags,
  1695. + token);
  1696. + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
  1697. +
  1698. + /* send command to mc*/
  1699. + return mc_send_command(mc_io, &cmd);
  1700. +}
  1701. +
  1702. +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
  1703. + uint32_t cmd_flags,
  1704. + uint16_t token,
  1705. + struct dpdmai_attr *attr)
  1706. +{
  1707. + struct mc_command cmd = { 0 };
  1708. + int err;
  1709. +
  1710. + /* prepare command */
  1711. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
  1712. + cmd_flags,
  1713. + token);
  1714. +
  1715. + /* send command to mc*/
  1716. + err = mc_send_command(mc_io, &cmd);
  1717. + if (err)
  1718. + return err;
  1719. +
  1720. + /* retrieve response parameters */
  1721. + DPDMAI_RSP_GET_ATTR(cmd, attr);
  1722. +
  1723. + return 0;
  1724. +}
  1725. +
  1726. +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
  1727. + uint32_t cmd_flags,
  1728. + uint16_t token,
  1729. + uint8_t priority,
  1730. + const struct dpdmai_rx_queue_cfg *cfg)
  1731. +{
  1732. + struct mc_command cmd = { 0 };
  1733. +
  1734. + /* prepare command */
  1735. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
  1736. + cmd_flags,
  1737. + token);
  1738. + DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
  1739. +
  1740. + /* send command to mc*/
  1741. + return mc_send_command(mc_io, &cmd);
  1742. +}
  1743. +
  1744. +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
  1745. + uint32_t cmd_flags,
  1746. + uint16_t token,
  1747. + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
  1748. +{
  1749. + struct mc_command cmd = { 0 };
  1750. + int err;
  1751. +
  1752. + /* prepare command */
  1753. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
  1754. + cmd_flags,
  1755. + token);
  1756. + DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
  1757. +
  1758. + /* send command to mc*/
  1759. + err = mc_send_command(mc_io, &cmd);
  1760. + if (err)
  1761. + return err;
  1762. +
  1763. + /* retrieve response parameters */
  1764. + DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
  1765. +
  1766. + return 0;
  1767. +}
  1768. +
  1769. +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
  1770. + uint32_t cmd_flags,
  1771. + uint16_t token,
  1772. + uint8_t priority,
  1773. + struct dpdmai_tx_queue_attr *attr)
  1774. +{
  1775. + struct mc_command cmd = { 0 };
  1776. + int err;
  1777. +
  1778. + /* prepare command */
  1779. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
  1780. + cmd_flags,
  1781. + token);
  1782. + DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
  1783. +
  1784. + /* send command to mc*/
  1785. + err = mc_send_command(mc_io, &cmd);
  1786. + if (err)
  1787. + return err;
  1788. +
  1789. + /* retrieve response parameters */
  1790. + DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
  1791. +
  1792. + return 0;
  1793. +}
  1794. --- /dev/null
  1795. +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
  1796. @@ -0,0 +1,521 @@
  1797. +/* Copyright 2013-2015 Freescale Semiconductor Inc.
  1798. + *
  1799. + * Redistribution and use in source and binary forms, with or without
  1800. + * modification, are permitted provided that the following conditions are met:
  1801. + * * Redistributions of source code must retain the above copyright
  1802. + * notice, this list of conditions and the following disclaimer.
  1803. + * * Redistributions in binary form must reproduce the above copyright
  1804. + * notice, this list of conditions and the following disclaimer in the
  1805. + * documentation and/or other materials provided with the distribution.
  1806. + * * Neither the name of the above-listed copyright holders nor the
  1807. + * names of any contributors may be used to endorse or promote products
  1808. + * derived from this software without specific prior written permission.
  1809. + *
  1810. + *
  1811. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1812. + * GNU General Public License ("GPL") as published by the Free Software
  1813. + * Foundation, either version 2 of that License or (at your option) any
  1814. + * later version.
  1815. + *
  1816. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  1817. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  1818. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  1819. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  1820. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  1821. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  1822. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  1823. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  1824. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  1825. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  1826. + * POSSIBILITY OF SUCH DAMAGE.
  1827. + */
  1828. +#ifndef __FSL_DPDMAI_H
  1829. +#define __FSL_DPDMAI_H
  1830. +
  1831. +struct fsl_mc_io;
  1832. +
  1833. +/* Data Path DMA Interface API
  1834. + * Contains initialization APIs and runtime control APIs for DPDMAI
  1835. + */
  1836. +
  1837. +/* General DPDMAI macros */
  1838. +
  1839. +/**
  1840. + * Maximum number of Tx/Rx priorities per DPDMAI object
  1841. + */
  1842. +#define DPDMAI_PRIO_NUM 2
  1843. +
  1844. +/**
  1845. + * All queues considered; see dpdmai_set_rx_queue()
  1846. + */
  1847. +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
  1848. +
  1849. +/**
  1850. + * dpdmai_open() - Open a control session for the specified object
  1851. + * @mc_io: Pointer to MC portal's I/O object
  1852. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1853. + * @dpdmai_id: DPDMAI unique ID
  1854. + * @token: Returned token; use in subsequent API calls
  1855. + *
  1856. + * This function can be used to open a control session for an
  1857. + * already created object; an object may have been declared in
  1858. + * the DPL or by calling the dpdmai_create() function.
  1859. + * This function returns a unique authentication token,
  1860. + * associated with the specific object ID and the specific MC
  1861. + * portal; this token must be used in all subsequent commands for
  1862. + * this specific object.
  1863. + *
  1864. + * Return: '0' on Success; Error code otherwise.
  1865. + */
  1866. +int dpdmai_open(struct fsl_mc_io *mc_io,
  1867. + uint32_t cmd_flags,
  1868. + int dpdmai_id,
  1869. + uint16_t *token);
  1870. +
  1871. +/**
  1872. + * dpdmai_close() - Close the control session of the object
  1873. + * @mc_io: Pointer to MC portal's I/O object
  1874. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1875. + * @token: Token of DPDMAI object
  1876. + *
  1877. + * After this function is called, no further operations are
  1878. + * allowed on the object without opening a new control session.
  1879. + *
  1880. + * Return: '0' on Success; Error code otherwise.
  1881. + */
  1882. +int dpdmai_close(struct fsl_mc_io *mc_io,
  1883. + uint32_t cmd_flags,
  1884. + uint16_t token);
  1885. +
  1886. +/**
  1887. + * struct dpdmai_cfg - Structure representing DPDMAI configuration
  1888. + * @priorities: Priorities for the DMA hardware processing; valid priorities are
  1889. + * configured with values 1-8; the entry following last valid entry
  1890. + * should be configured with 0
  1891. + */
  1892. +struct dpdmai_cfg {
  1893. + uint8_t priorities[DPDMAI_PRIO_NUM];
  1894. +};
  1895. +
  1896. +/**
  1897. + * dpdmai_create() - Create the DPDMAI object
  1898. + * @mc_io: Pointer to MC portal's I/O object
  1899. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1900. + * @cfg: Configuration structure
  1901. + * @token: Returned token; use in subsequent API calls
  1902. + *
  1903. + * Create the DPDMAI object, allocate required resources and
  1904. + * perform required initialization.
  1905. + *
  1906. + * The object can be created either by declaring it in the
  1907. + * DPL file, or by calling this function.
  1908. + *
  1909. + * This function returns a unique authentication token,
  1910. + * associated with the specific object ID and the specific MC
  1911. + * portal; this token must be used in all subsequent calls to
  1912. + * this specific object. For objects that are created using the
  1913. + * DPL file, call dpdmai_open() function to get an authentication
  1914. + * token first.
  1915. + *
  1916. + * Return: '0' on Success; Error code otherwise.
  1917. + */
  1918. +int dpdmai_create(struct fsl_mc_io *mc_io,
  1919. + uint32_t cmd_flags,
  1920. + const struct dpdmai_cfg *cfg,
  1921. + uint16_t *token);
  1922. +
  1923. +/**
  1924. + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
  1925. + * @mc_io: Pointer to MC portal's I/O object
  1926. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1927. + * @token: Token of DPDMAI object
  1928. + *
  1929. + * Return: '0' on Success; error code otherwise.
  1930. + */
  1931. +int dpdmai_destroy(struct fsl_mc_io *mc_io,
  1932. + uint32_t cmd_flags,
  1933. + uint16_t token);
  1934. +
  1935. +/**
  1936. + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
  1937. + * @mc_io: Pointer to MC portal's I/O object
  1938. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1939. + * @token: Token of DPDMAI object
  1940. + *
  1941. + * Return: '0' on Success; Error code otherwise.
  1942. + */
  1943. +int dpdmai_enable(struct fsl_mc_io *mc_io,
  1944. + uint32_t cmd_flags,
  1945. + uint16_t token);
  1946. +
  1947. +/**
  1948. + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
  1949. + * @mc_io: Pointer to MC portal's I/O object
  1950. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1951. + * @token: Token of DPDMAI object
  1952. + *
  1953. + * Return: '0' on Success; Error code otherwise.
  1954. + */
  1955. +int dpdmai_disable(struct fsl_mc_io *mc_io,
  1956. + uint32_t cmd_flags,
  1957. + uint16_t token);
  1958. +
  1959. +/**
  1960. + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
  1961. + * @mc_io: Pointer to MC portal's I/O object
  1962. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1963. + * @token: Token of DPDMAI object
  1964. + * @en: Returns '1' if object is enabled; '0' otherwise
  1965. + *
  1966. + * Return: '0' on Success; Error code otherwise.
  1967. + */
  1968. +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
  1969. + uint32_t cmd_flags,
  1970. + uint16_t token,
  1971. + int *en);
  1972. +
  1973. +/**
  1974. + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
  1975. + * @mc_io: Pointer to MC portal's I/O object
  1976. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  1977. + * @token: Token of DPDMAI object
  1978. + *
  1979. + * Return: '0' on Success; Error code otherwise.
  1980. + */
  1981. +int dpdmai_reset(struct fsl_mc_io *mc_io,
  1982. + uint32_t cmd_flags,
  1983. + uint16_t token);
  1984. +
  1985. +/**
  1986. + * struct dpdmai_irq_cfg - IRQ configuration
  1987. + * @addr: Address that must be written to signal a message-based interrupt
  1988. + * @val: Value to write into irq_addr address
  1989. + * @irq_num: A user defined number associated with this IRQ
  1990. + */
  1991. +struct dpdmai_irq_cfg {
  1992. + uint64_t addr;
  1993. + uint32_t val;
  1994. + int irq_num;
  1995. +};
  1996. +
  1997. +/**
  1998. + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
  1999. + * @mc_io: Pointer to MC portal's I/O object
  2000. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2001. + * @token: Token of DPDMAI object
  2002. + * @irq_index: Identifies the interrupt index to configure
  2003. + * @irq_cfg: IRQ configuration
  2004. + *
  2005. + * Return: '0' on Success; Error code otherwise.
  2006. + */
  2007. +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
  2008. + uint32_t cmd_flags,
  2009. + uint16_t token,
  2010. + uint8_t irq_index,
  2011. + struct dpdmai_irq_cfg *irq_cfg);
  2012. +
  2013. +/**
  2014. + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
  2015. + *
  2016. + * @mc_io: Pointer to MC portal's I/O object
  2017. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2018. + * @token: Token of DPDMAI object
  2019. + * @irq_index: The interrupt index to configure
  2020. + * @type: Interrupt type: 0 represents message interrupt
  2021. + * type (both irq_addr and irq_val are valid)
  2022. + * @irq_cfg: IRQ attributes
  2023. + *
  2024. + * Return: '0' on Success; Error code otherwise.
  2025. + */
  2026. +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
  2027. + uint32_t cmd_flags,
  2028. + uint16_t token,
  2029. + uint8_t irq_index,
  2030. + int *type,
  2031. + struct dpdmai_irq_cfg *irq_cfg);
  2032. +
  2033. +/**
  2034. + * dpdmai_set_irq_enable() - Set overall interrupt state.
  2035. + * @mc_io: Pointer to MC portal's I/O object
  2036. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2037. + * @token: Token of DPDMAI object
  2038. + * @irq_index: The interrupt index to configure
  2039. + * @en: Interrupt state - enable = 1, disable = 0
  2040. + *
  2041. + * Allows GPP software to control when interrupts are generated.
  2042. + * Each interrupt can have up to 32 causes. The enable/disable control's the
  2043. + * overall interrupt state. if the interrupt is disabled no causes will cause
  2044. + * an interrupt
  2045. + *
  2046. + * Return: '0' on Success; Error code otherwise.
  2047. + */
  2048. +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
  2049. + uint32_t cmd_flags,
  2050. + uint16_t token,
  2051. + uint8_t irq_index,
  2052. + uint8_t en);
  2053. +
  2054. +/**
  2055. + * dpdmai_get_irq_enable() - Get overall interrupt state
  2056. + * @mc_io: Pointer to MC portal's I/O object
  2057. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2058. + * @token: Token of DPDMAI object
  2059. + * @irq_index: The interrupt index to configure
  2060. + * @en: Returned Interrupt state - enable = 1, disable = 0
  2061. + *
  2062. + * Return: '0' on Success; Error code otherwise.
  2063. + */
  2064. +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
  2065. + uint32_t cmd_flags,
  2066. + uint16_t token,
  2067. + uint8_t irq_index,
  2068. + uint8_t *en);
  2069. +
  2070. +/**
  2071. + * dpdmai_set_irq_mask() - Set interrupt mask.
  2072. + * @mc_io: Pointer to MC portal's I/O object
  2073. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2074. + * @token: Token of DPDMAI object
  2075. + * @irq_index: The interrupt index to configure
  2076. + * @mask: event mask to trigger interrupt;
  2077. + * each bit:
  2078. + * 0 = ignore event
  2079. + * 1 = consider event for asserting IRQ
  2080. + *
  2081. + * Every interrupt can have up to 32 causes and the interrupt model supports
  2082. + * masking/unmasking each cause independently
  2083. + *
  2084. + * Return: '0' on Success; Error code otherwise.
  2085. + */
  2086. +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
  2087. + uint32_t cmd_flags,
  2088. + uint16_t token,
  2089. + uint8_t irq_index,
  2090. + uint32_t mask);
  2091. +
  2092. +/**
  2093. + * dpdmai_get_irq_mask() - Get interrupt mask.
  2094. + * @mc_io: Pointer to MC portal's I/O object
  2095. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2096. + * @token: Token of DPDMAI object
  2097. + * @irq_index: The interrupt index to configure
  2098. + * @mask: Returned event mask to trigger interrupt
  2099. + *
  2100. + * Every interrupt can have up to 32 causes and the interrupt model supports
  2101. + * masking/unmasking each cause independently
  2102. + *
  2103. + * Return: '0' on Success; Error code otherwise.
  2104. + */
  2105. +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
  2106. + uint32_t cmd_flags,
  2107. + uint16_t token,
  2108. + uint8_t irq_index,
  2109. + uint32_t *mask);
  2110. +
  2111. +/**
  2112. + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
  2113. + * @mc_io: Pointer to MC portal's I/O object
  2114. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2115. + * @token: Token of DPDMAI object
  2116. + * @irq_index: The interrupt index to configure
  2117. + * @status: Returned interrupts status - one bit per cause:
  2118. + * 0 = no interrupt pending
  2119. + * 1 = interrupt pending
  2120. + *
  2121. + * Return: '0' on Success; Error code otherwise.
  2122. + */
  2123. +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
  2124. + uint32_t cmd_flags,
  2125. + uint16_t token,
  2126. + uint8_t irq_index,
  2127. + uint32_t *status);
  2128. +
  2129. +/**
  2130. + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
  2131. + * @mc_io: Pointer to MC portal's I/O object
  2132. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2133. + * @token: Token of DPDMAI object
  2134. + * @irq_index: The interrupt index to configure
  2135. + * @status: bits to clear (W1C) - one bit per cause:
  2136. + * 0 = don't change
  2137. + * 1 = clear status bit
  2138. + *
  2139. + * Return: '0' on Success; Error code otherwise.
  2140. + */
  2141. +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
  2142. + uint32_t cmd_flags,
  2143. + uint16_t token,
  2144. + uint8_t irq_index,
  2145. + uint32_t status);
  2146. +
  2147. +/**
  2148. + * struct dpdmai_attr - Structure representing DPDMAI attributes
  2149. + * @id: DPDMAI object ID
  2150. + * @version: DPDMAI version
  2151. + * @num_of_priorities: number of priorities
  2152. + */
  2153. +struct dpdmai_attr {
  2154. + int id;
  2155. + /**
  2156. + * struct version - DPDMAI version
  2157. + * @major: DPDMAI major version
  2158. + * @minor: DPDMAI minor version
  2159. + */
  2160. + struct {
  2161. + uint16_t major;
  2162. + uint16_t minor;
  2163. + } version;
  2164. + uint8_t num_of_priorities;
  2165. +};
  2166. +
  2167. +/**
  2168. + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
  2169. + * @mc_io: Pointer to MC portal's I/O object
  2170. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2171. + * @token: Token of DPDMAI object
  2172. + * @attr: Returned object's attributes
  2173. + *
  2174. + * Return: '0' on Success; Error code otherwise.
  2175. + */
  2176. +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
  2177. + uint32_t cmd_flags,
  2178. + uint16_t token,
  2179. + struct dpdmai_attr *attr);
  2180. +
  2181. +/**
  2182. + * enum dpdmai_dest - DPDMAI destination types
  2183. + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
  2184. + * and does not generate FQDAN notifications; user is expected to dequeue
  2185. + * from the queue based on polling or other user-defined method
  2186. + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
  2187. + * notifications to the specified DPIO; user is expected to dequeue
  2188. + * from the queue only after notification is received
  2189. + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
  2190. + * FQDAN notifications, but is connected to the specified DPCON object;
  2191. + * user is expected to dequeue from the DPCON channel
  2192. + */
  2193. +enum dpdmai_dest {
  2194. + DPDMAI_DEST_NONE = 0,
  2195. + DPDMAI_DEST_DPIO = 1,
  2196. + DPDMAI_DEST_DPCON = 2
  2197. +};
  2198. +
  2199. +/**
  2200. + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
  2201. + * @dest_type: Destination type
  2202. + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
  2203. + * @priority: Priority selection within the DPIO or DPCON channel; valid values
  2204. + * are 0-1 or 0-7, depending on the number of priorities in that
  2205. + * channel; not relevant for 'DPDMAI_DEST_NONE' option
  2206. + */
  2207. +struct dpdmai_dest_cfg {
  2208. + enum dpdmai_dest dest_type;
  2209. + int dest_id;
  2210. + uint8_t priority;
  2211. +};
  2212. +
  2213. +/* DPDMAI queue modification options */
  2214. +
  2215. +/**
  2216. + * Select to modify the user's context associated with the queue
  2217. + */
  2218. +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
  2219. +
  2220. +/**
  2221. + * Select to modify the queue's destination
  2222. + */
  2223. +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
  2224. +
  2225. +/**
  2226. + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
  2227. + * @options: Flags representing the suggested modifications to the queue;
  2228. + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
  2229. + * @user_ctx: User context value provided in the frame descriptor of each
  2230. + * dequeued frame;
  2231. + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
  2232. + * @dest_cfg: Queue destination parameters;
  2233. + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
  2234. + */
  2235. +struct dpdmai_rx_queue_cfg {
  2236. + uint32_t options;
  2237. + uint64_t user_ctx;
  2238. + struct dpdmai_dest_cfg dest_cfg;
  2239. +
  2240. +};
  2241. +
  2242. +/**
  2243. + * dpdmai_set_rx_queue() - Set Rx queue configuration
  2244. + * @mc_io: Pointer to MC portal's I/O object
  2245. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2246. + * @token: Token of DPDMAI object
  2247. + * @priority: Select the queue relative to number of
  2248. + * priorities configured at DPDMAI creation; use
  2249. + * DPDMAI_ALL_QUEUES to configure all Rx queues
  2250. + * identically.
  2251. + * @cfg: Rx queue configuration
  2252. + *
  2253. + * Return: '0' on Success; Error code otherwise.
  2254. + */
  2255. +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
  2256. + uint32_t cmd_flags,
  2257. + uint16_t token,
  2258. + uint8_t priority,
  2259. + const struct dpdmai_rx_queue_cfg *cfg);
  2260. +
  2261. +/**
  2262. + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
  2263. + * @user_ctx: User context value provided in the frame descriptor of each
  2264. + * dequeued frame
  2265. + * @dest_cfg: Queue destination configuration
  2266. + * @fqid: Virtual FQID value to be used for dequeue operations
  2267. + */
  2268. +struct dpdmai_rx_queue_attr {
  2269. + uint64_t user_ctx;
  2270. + struct dpdmai_dest_cfg dest_cfg;
  2271. + uint32_t fqid;
  2272. +};
  2273. +
  2274. +/**
  2275. + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
  2276. + * @mc_io: Pointer to MC portal's I/O object
  2277. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2278. + * @token: Token of DPDMAI object
  2279. + * @priority: Select the queue relative to number of
  2280. + * priorities configured at DPDMAI creation
  2281. + * @attr: Returned Rx queue attributes
  2282. + *
  2283. + * Return: '0' on Success; Error code otherwise.
  2284. + */
  2285. +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
  2286. + uint32_t cmd_flags,
  2287. + uint16_t token,
  2288. + uint8_t priority,
  2289. + struct dpdmai_rx_queue_attr *attr);
  2290. +
  2291. +/**
  2292. + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
  2293. + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
  2294. + */
  2295. +
  2296. +struct dpdmai_tx_queue_attr {
  2297. + uint32_t fqid;
  2298. +};
  2299. +
  2300. +/**
  2301. + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
  2302. + * @mc_io: Pointer to MC portal's I/O object
  2303. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2304. + * @token: Token of DPDMAI object
  2305. + * @priority: Select the queue relative to number of
  2306. + * priorities configured at DPDMAI creation
  2307. + * @attr: Returned Tx queue attributes
  2308. + *
  2309. + * Return: '0' on Success; Error code otherwise.
  2310. + */
  2311. +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
  2312. + uint32_t cmd_flags,
  2313. + uint16_t token,
  2314. + uint8_t priority,
  2315. + struct dpdmai_tx_queue_attr *attr);
  2316. +
  2317. +#endif /* __FSL_DPDMAI_H */
  2318. --- /dev/null
  2319. +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  2320. @@ -0,0 +1,222 @@
  2321. +/* Copyright 2013-2016 Freescale Semiconductor Inc.
  2322. + *
  2323. + * Redistribution and use in source and binary forms, with or without
  2324. + * modification, are permitted provided that the following conditions are met:
  2325. + * * Redistributions of source code must retain the above copyright
  2326. + * notice, this list of conditions and the following disclaimer.
  2327. + * * Redistributions in binary form must reproduce the above copyright
  2328. + * notice, this list of conditions and the following disclaimer in the
  2329. + * documentation and/or other materials provided with the distribution.
  2330. + * * Neither the name of the above-listed copyright holders nor the
  2331. + * names of any contributors may be used to endorse or promote products
  2332. + * derived from this software without specific prior written permission.
  2333. + *
  2334. + *
  2335. + * ALTERNATIVELY, this software may be distributed under the terms of the
  2336. + * GNU General Public License ("GPL") as published by the Free Software
  2337. + * Foundation, either version 2 of that License or (at your option) any
  2338. + * later version.
  2339. + *
  2340. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  2341. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  2342. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  2343. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  2344. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  2345. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  2346. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  2347. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  2348. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  2349. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  2350. + * POSSIBILITY OF SUCH DAMAGE.
  2351. + */
  2352. +#ifndef _FSL_DPDMAI_CMD_H
  2353. +#define _FSL_DPDMAI_CMD_H
  2354. +
  2355. +/* DPDMAI Version */
  2356. +#define DPDMAI_VER_MAJOR 2
  2357. +#define DPDMAI_VER_MINOR 2
  2358. +
  2359. +#define DPDMAI_CMD_BASE_VERSION 0
  2360. +#define DPDMAI_CMD_ID_OFFSET 4
  2361. +
  2362. +/* Command IDs */
  2363. +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2364. +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2365. +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2366. +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2367. +
  2368. +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2369. +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2370. +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2371. +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2372. +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2373. +
  2374. +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2375. +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2376. +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2377. +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2378. +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2379. +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2380. +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2381. +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2382. +
  2383. +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2384. +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2385. +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2386. +
  2387. +
  2388. +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
  2389. +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
  2390. +
  2391. +
  2392. +#define MAKE_UMASK64(_width) \
  2393. + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
  2394. + (uint64_t)-1))
  2395. +
  2396. +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
  2397. +{
  2398. + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
  2399. +}
  2400. +
  2401. +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
  2402. +{
  2403. + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
  2404. +}
  2405. +
  2406. +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
  2407. + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
  2408. +
  2409. +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
  2410. + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
  2411. +
  2412. +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
  2413. + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
  2414. +
  2415. +/* cmd, param, offset, width, type, arg_name */
  2416. +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
  2417. + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
  2418. +
  2419. +/* cmd, param, offset, width, type, arg_name */
  2420. +#define DPDMAI_CMD_CREATE(cmd, cfg) \
  2421. +do { \
  2422. + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
  2423. + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
  2424. +} while (0)
  2425. +
  2426. +/* cmd, param, offset, width, type, arg_name */
  2427. +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
  2428. + MC_RSP_OP(cmd, 0, 0, 1, int, en)
  2429. +
  2430. +/* cmd, param, offset, width, type, arg_name */
  2431. +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
  2432. +do { \
  2433. + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
  2434. + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
  2435. + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
  2436. + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
  2437. +} while (0)
  2438. +
  2439. +/* cmd, param, offset, width, type, arg_name */
  2440. +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
  2441. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  2442. +
  2443. +/* cmd, param, offset, width, type, arg_name */
  2444. +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
  2445. +do { \
  2446. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
  2447. + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
  2448. + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
  2449. + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
  2450. +} while (0)
  2451. +
  2452. +/* cmd, param, offset, width, type, arg_name */
  2453. +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
  2454. +do { \
  2455. + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
  2456. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  2457. +} while (0)
  2458. +
  2459. +/* cmd, param, offset, width, type, arg_name */
  2460. +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
  2461. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  2462. +
  2463. +/* cmd, param, offset, width, type, arg_name */
  2464. +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
  2465. + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
  2466. +
  2467. +/* cmd, param, offset, width, type, arg_name */
  2468. +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
  2469. +do { \
  2470. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
  2471. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  2472. +} while (0)
  2473. +
  2474. +/* cmd, param, offset, width, type, arg_name */
  2475. +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
  2476. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  2477. +
  2478. +/* cmd, param, offset, width, type, arg_name */
  2479. +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
  2480. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
  2481. +
  2482. +/* cmd, param, offset, width, type, arg_name */
  2483. +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
  2484. +do { \
  2485. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
  2486. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
  2487. +} while (0)
  2488. +
  2489. +/* cmd, param, offset, width, type, arg_name */
  2490. +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
  2491. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
  2492. +
  2493. +/* cmd, param, offset, width, type, arg_name */
  2494. +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
  2495. +do { \
  2496. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
  2497. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  2498. +} while (0)
  2499. +
  2500. +/* cmd, param, offset, width, type, arg_name */
  2501. +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
  2502. +do { \
  2503. + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
  2504. + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
  2505. + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
  2506. + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
  2507. +} while (0)
  2508. +
  2509. +/* cmd, param, offset, width, type, arg_name */
  2510. +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
  2511. +do { \
  2512. + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
  2513. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
  2514. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
  2515. + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
  2516. + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
  2517. + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
  2518. +} while (0)
  2519. +
  2520. +/* cmd, param, offset, width, type, arg_name */
  2521. +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
  2522. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
  2523. +
  2524. +/* cmd, param, offset, width, type, arg_name */
  2525. +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
  2526. +do { \
  2527. + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
  2528. + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
  2529. + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
  2530. + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
  2531. + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
  2532. +} while (0)
  2533. +
  2534. +/* cmd, param, offset, width, type, arg_name */
  2535. +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
  2536. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
  2537. +
  2538. +/* cmd, param, offset, width, type, arg_name */
  2539. +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
  2540. + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
  2541. +
  2542. +#endif /* _FSL_DPDMAI_CMD_H */
  2543. --- /dev/null
  2544. +++ b/drivers/dma/fsl-qdma.c
  2545. @@ -0,0 +1,1201 @@
  2546. +/*
  2547. + * drivers/dma/fsl-qdma.c
  2548. + *
  2549. + * Copyright 2014-2015 Freescale Semiconductor, Inc.
  2550. + *
  2551. + * Driver for the Freescale qDMA engine with software command queue mode.
  2552. + * Channel virtualization is supported through enqueuing of DMA jobs to,
  2553. + * or dequeuing DMA jobs from, different work queues.
  2554. + * This module can be found on Freescale LS SoCs.
  2555. + *
  2556. + * This program is free software; you can redistribute it and/or modify it
  2557. + * under the terms of the GNU General Public License as published by the
  2558. + * Free Software Foundation; either version 2 of the License, or (at your
  2559. + * option) any later version.
  2560. + */
  2561. +
  2562. +#include <asm/cacheflush.h>
  2563. +#include <linux/clk.h>
  2564. +#include <linux/delay.h>
  2565. +#include <linux/dma-mapping.h>
  2566. +#include <linux/dmapool.h>
  2567. +#include <linux/init.h>
  2568. +#include <linux/interrupt.h>
  2569. +#include <linux/module.h>
  2570. +#include <linux/of.h>
  2571. +#include <linux/of_address.h>
  2572. +#include <linux/of_device.h>
  2573. +#include <linux/of_dma.h>
  2574. +#include <linux/of_irq.h>
  2575. +#include <linux/slab.h>
  2576. +#include <linux/spinlock.h>
  2577. +
  2578. +#include "virt-dma.h"
  2579. +
  2580. +#define FSL_QDMA_DMR 0x0
  2581. +#define FSL_QDMA_DSR 0x4
  2582. +#define FSL_QDMA_DEIER 0xe00
  2583. +#define FSL_QDMA_DEDR 0xe04
  2584. +#define FSL_QDMA_DECFDW0R 0xe10
  2585. +#define FSL_QDMA_DECFDW1R 0xe14
  2586. +#define FSL_QDMA_DECFDW2R 0xe18
  2587. +#define FSL_QDMA_DECFDW3R 0xe1c
  2588. +#define FSL_QDMA_DECFQIDR 0xe30
  2589. +#define FSL_QDMA_DECBR 0xe34
  2590. +
  2591. +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
  2592. +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
  2593. +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
  2594. +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
  2595. +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
  2596. +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
  2597. +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
  2598. +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
  2599. +
  2600. +#define FSL_QDMA_SQDPAR 0x80c
  2601. +#define FSL_QDMA_SQEPAR 0x814
  2602. +#define FSL_QDMA_BSQMR 0x800
  2603. +#define FSL_QDMA_BSQSR 0x804
  2604. +#define FSL_QDMA_BSQICR 0x828
  2605. +#define FSL_QDMA_CQMR 0xa00
  2606. +#define FSL_QDMA_CQDSCR1 0xa08
  2607. +#define FSL_QDMA_CQDSCR2 0xa0c
  2608. +#define FSL_QDMA_CQIER 0xa10
  2609. +#define FSL_QDMA_CQEDR 0xa14
  2610. +#define FSL_QDMA_SQCCMR 0xa20
  2611. +
  2612. +#define FSL_QDMA_SQICR_ICEN
  2613. +
  2614. +#define FSL_QDMA_CQIDR_CQT 0xff000000
  2615. +#define FSL_QDMA_CQIDR_SQPE 0x800000
  2616. +#define FSL_QDMA_CQIDR_SQT 0x8000
  2617. +
  2618. +#define FSL_QDMA_BCQIER_CQTIE 0x8000
  2619. +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
  2620. +#define FSL_QDMA_BSQICR_ICEN 0x80000000
  2621. +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
  2622. +#define FSL_QDMA_CQIER_MEIE 0x80000000
  2623. +#define FSL_QDMA_CQIER_TEIE 0x1
  2624. +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
  2625. +
  2626. +#define FSL_QDMA_QUEUE_MAX 8
  2627. +
  2628. +#define FSL_QDMA_BCQMR_EN 0x80000000
  2629. +#define FSL_QDMA_BCQMR_EI 0x40000000
  2630. +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
  2631. +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
  2632. +
  2633. +#define FSL_QDMA_BCQSR_QF 0x10000
  2634. +#define FSL_QDMA_BCQSR_XOFF 0x1
  2635. +
  2636. +#define FSL_QDMA_BSQMR_EN 0x80000000
  2637. +#define FSL_QDMA_BSQMR_DI 0x40000000
  2638. +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
  2639. +
  2640. +#define FSL_QDMA_BSQSR_QE 0x20000
  2641. +
  2642. +#define FSL_QDMA_DMR_DQD 0x40000000
  2643. +#define FSL_QDMA_DSR_DB 0x80000000
  2644. +
  2645. +#define FSL_QDMA_BASE_BUFFER_SIZE 96
  2646. +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
  2647. +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
  2648. +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
  2649. +#define FSL_QDMA_QUEUE_NUM_MAX 8
  2650. +
  2651. +#define FSL_QDMA_CMD_RWTTYPE 0x4
  2652. +#define FSL_QDMA_CMD_LWC 0x2
  2653. +
  2654. +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
  2655. +#define FSL_QDMA_CMD_NS_OFFSET 27
  2656. +#define FSL_QDMA_CMD_DQOS_OFFSET 24
  2657. +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
  2658. +#define FSL_QDMA_CMD_DSEN_OFFSET 19
  2659. +#define FSL_QDMA_CMD_LWC_OFFSET 16
  2660. +
  2661. +#define FSL_QDMA_E_SG_TABLE 1
  2662. +#define FSL_QDMA_E_DATA_BUFFER 0
  2663. +#define FSL_QDMA_F_LAST_ENTRY 1
  2664. +
  2665. +u64 pre_addr, pre_queue;
  2666. +
  2667. +struct fsl_qdma_ccdf {
  2668. + u8 status;
  2669. + u32 rev1:22;
  2670. + u32 ser:1;
  2671. + u32 rev2:1;
  2672. + u32 rev3:20;
  2673. + u32 offset:9;
  2674. + u32 format:3;
  2675. + union {
  2676. + struct {
  2677. + u32 addr_lo; /* low 32-bits of 40-bit address */
  2678. + u32 addr_hi:8; /* high 8-bits of 40-bit address */
  2679. + u32 rev4:16;
  2680. + u32 queue:3;
  2681. + u32 rev5:3;
  2682. + u32 dd:2; /* dynamic debug */
  2683. + };
  2684. + struct {
  2685. + u64 addr:40;
  2686. + /* More efficient address accessor */
  2687. + u64 __notaddress:24;
  2688. + };
  2689. + };
  2690. +} __packed;
  2691. +
  2692. +struct fsl_qdma_csgf {
  2693. + u32 offset:13;
  2694. + u32 rev1:19;
  2695. + u32 length:30;
  2696. + u32 f:1;
  2697. + u32 e:1;
  2698. + union {
  2699. + struct {
  2700. + u32 addr_lo; /* low 32-bits of 40-bit address */
  2701. + u32 addr_hi:8; /* high 8-bits of 40-bit address */
  2702. + u32 rev2:24;
  2703. + };
  2704. + struct {
  2705. + u64 addr:40;
  2706. + /* More efficient address accessor */
  2707. + u64 __notaddress:24;
  2708. + };
  2709. + };
  2710. +} __packed;
  2711. +
  2712. +struct fsl_qdma_sdf {
  2713. + u32 rev3:32;
  2714. + u32 ssd:12; /* souce stride distance */
  2715. + u32 sss:12; /* souce stride size */
  2716. + u32 rev4:8;
  2717. + u32 rev5:32;
  2718. + u32 cmd;
  2719. +} __packed;
  2720. +
  2721. +struct fsl_qdma_ddf {
  2722. + u32 rev1:32;
  2723. + u32 dsd:12; /* Destination stride distance */
  2724. + u32 dss:12; /* Destination stride size */
  2725. + u32 rev2:8;
  2726. + u32 rev3:32;
  2727. + u32 cmd;
  2728. +} __packed;
  2729. +
  2730. +struct fsl_qdma_chan {
  2731. + struct virt_dma_chan vchan;
  2732. + struct virt_dma_desc vdesc;
  2733. + enum dma_status status;
  2734. + u32 slave_id;
  2735. + struct fsl_qdma_engine *qdma;
  2736. + struct fsl_qdma_queue *queue;
  2737. + struct list_head qcomp;
  2738. +};
  2739. +
  2740. +struct fsl_qdma_queue {
  2741. + struct fsl_qdma_ccdf *virt_head;
  2742. + struct fsl_qdma_ccdf *virt_tail;
  2743. + struct list_head comp_used;
  2744. + struct list_head comp_free;
  2745. + struct dma_pool *comp_pool;
  2746. + struct dma_pool *sg_pool;
  2747. + spinlock_t queue_lock;
  2748. + dma_addr_t bus_addr;
  2749. + u32 n_cq;
  2750. + u32 id;
  2751. + struct fsl_qdma_ccdf *cq;
  2752. +};
  2753. +
  2754. +struct fsl_qdma_sg {
  2755. + dma_addr_t bus_addr;
  2756. + void *virt_addr;
  2757. +};
  2758. +
  2759. +struct fsl_qdma_comp {
  2760. + dma_addr_t bus_addr;
  2761. + void *virt_addr;
  2762. + struct fsl_qdma_chan *qchan;
  2763. + struct fsl_qdma_sg *sg_block;
  2764. + struct virt_dma_desc vdesc;
  2765. + struct list_head list;
  2766. + u32 sg_block_src;
  2767. + u32 sg_block_dst;
  2768. +};
  2769. +
  2770. +struct fsl_qdma_engine {
  2771. + struct dma_device dma_dev;
  2772. + void __iomem *ctrl_base;
  2773. + void __iomem *status_base;
  2774. + void __iomem *block_base;
  2775. + u32 n_chans;
  2776. + u32 n_queues;
  2777. + struct mutex fsl_qdma_mutex;
  2778. + int error_irq;
  2779. + int queue_irq;
  2780. + bool big_endian;
  2781. + struct fsl_qdma_queue *queue;
  2782. + struct fsl_qdma_queue *status;
  2783. + struct fsl_qdma_chan chans[];
  2784. +
  2785. +};
  2786. +
  2787. +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
  2788. +{
  2789. + if (qdma->big_endian)
  2790. + return ioread32be(addr);
  2791. + else
  2792. + return ioread32(addr);
  2793. +}
  2794. +
  2795. +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
  2796. + void __iomem *addr)
  2797. +{
  2798. + if (qdma->big_endian)
  2799. + iowrite32be(val, addr);
  2800. + else
  2801. + iowrite32(val, addr);
  2802. +}
  2803. +
  2804. +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
  2805. +{
  2806. + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
  2807. +}
  2808. +
  2809. +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  2810. +{
  2811. + return container_of(vd, struct fsl_qdma_comp, vdesc);
  2812. +}
  2813. +
  2814. +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
  2815. +{
  2816. + /*
  2817. + * In QDMA mode, We don't need to do anything.
  2818. + */
  2819. + return 0;
  2820. +}
  2821. +
  2822. +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
  2823. +{
  2824. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  2825. + unsigned long flags;
  2826. + LIST_HEAD(head);
  2827. +
  2828. + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  2829. + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  2830. + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  2831. +
  2832. + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  2833. +}
  2834. +
  2835. +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
  2836. + dma_addr_t dst, dma_addr_t src, u32 len)
  2837. +{
  2838. + struct fsl_qdma_ccdf *ccdf;
  2839. + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
  2840. + struct fsl_qdma_sdf *sdf;
  2841. + struct fsl_qdma_ddf *ddf;
  2842. +
  2843. + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
  2844. + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
  2845. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
  2846. + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
  2847. + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
  2848. + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
  2849. +
  2850. + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
  2851. + /* Head Command Descriptor(Frame Descriptor) */
  2852. + ccdf->addr = fsl_comp->bus_addr + 16;
  2853. + ccdf->format = 1; /* Compound S/G format */
  2854. + /* Status notification is enqueued to status queue. */
  2855. + ccdf->ser = 1;
  2856. + /* Compound Command Descriptor(Frame List Table) */
  2857. + csgf_desc->addr = fsl_comp->bus_addr + 64;
  2858. + /* It must be 32 as Compound S/G Descriptor */
  2859. + csgf_desc->length = 32;
  2860. + csgf_src->addr = src;
  2861. + csgf_src->length = len;
  2862. + csgf_dest->addr = dst;
  2863. + csgf_dest->length = len;
  2864. + /* This entry is the last entry. */
  2865. + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
  2866. + /* Descriptor Buffer */
  2867. + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  2868. + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  2869. + ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
  2870. +}
  2871. +
  2872. +static void fsl_qdma_comp_fill_sg(
  2873. + struct fsl_qdma_comp *fsl_comp,
  2874. + struct scatterlist *dst_sg, unsigned int dst_nents,
  2875. + struct scatterlist *src_sg, unsigned int src_nents)
  2876. +{
  2877. + struct fsl_qdma_ccdf *ccdf;
  2878. + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
  2879. + struct fsl_qdma_sdf *sdf;
  2880. + struct fsl_qdma_ddf *ddf;
  2881. + struct fsl_qdma_sg *sg_block, *temp;
  2882. + struct scatterlist *sg;
  2883. + u64 total_src_len = 0;
  2884. + u64 total_dst_len = 0;
  2885. + u32 i;
  2886. +
  2887. + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
  2888. + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
  2889. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
  2890. + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
  2891. + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
  2892. + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
  2893. +
  2894. + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
  2895. + /* Head Command Descriptor(Frame Descriptor) */
  2896. + ccdf->addr = fsl_comp->bus_addr + 16;
  2897. + ccdf->format = 1; /* Compound S/G format */
  2898. + /* Status notification is enqueued to status queue. */
  2899. + ccdf->ser = 1;
  2900. +
  2901. + /* Compound Command Descriptor(Frame List Table) */
  2902. + csgf_desc->addr = fsl_comp->bus_addr + 64;
  2903. + /* It must be 32 as Compound S/G Descriptor */
  2904. + csgf_desc->length = 32;
  2905. +
  2906. + sg_block = fsl_comp->sg_block;
  2907. + csgf_src->addr = sg_block->bus_addr;
  2908. + /* This entry link to the s/g entry. */
  2909. + csgf_src->e = FSL_QDMA_E_SG_TABLE;
  2910. +
  2911. + temp = sg_block + fsl_comp->sg_block_src;
  2912. + csgf_dest->addr = temp->bus_addr;
  2913. + /* This entry is the last entry. */
  2914. + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
  2915. + /* This entry link to the s/g entry. */
  2916. + csgf_dest->e = FSL_QDMA_E_SG_TABLE;
  2917. +
  2918. + for_each_sg(src_sg, sg, src_nents, i) {
  2919. + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  2920. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  2921. + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  2922. + csgf_sg->addr = sg_dma_address(sg);
  2923. + csgf_sg->length = sg_dma_len(sg);
  2924. + total_src_len += sg_dma_len(sg);
  2925. +
  2926. + if (i == src_nents - 1)
  2927. + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
  2928. + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
  2929. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
  2930. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  2931. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
  2932. + temp = sg_block +
  2933. + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  2934. + csgf_sg->addr = temp->bus_addr;
  2935. + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
  2936. + }
  2937. + }
  2938. +
  2939. + sg_block += fsl_comp->sg_block_src;
  2940. + for_each_sg(dst_sg, sg, dst_nents, i) {
  2941. + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  2942. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  2943. + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  2944. + csgf_sg->addr = sg_dma_address(sg);
  2945. + csgf_sg->length = sg_dma_len(sg);
  2946. + total_dst_len += sg_dma_len(sg);
  2947. +
  2948. + if (i == dst_nents - 1)
  2949. + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
  2950. + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
  2951. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
  2952. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  2953. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
  2954. + temp = sg_block +
  2955. + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  2956. + csgf_sg->addr = temp->bus_addr;
  2957. + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
  2958. + }
  2959. + }
  2960. +
  2961. + if (total_src_len != total_dst_len)
  2962. + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
  2963. + "The data length for src and dst isn't match.\n");
  2964. +
  2965. + csgf_src->length = total_src_len;
  2966. + csgf_dest->length = total_dst_len;
  2967. +
  2968. + /* Descriptor Buffer */
  2969. + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  2970. + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  2971. +}
  2972. +
  2973. +/*
  2974. + * Prei-request full command descriptor for enqueue.
  2975. + */
  2976. +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
  2977. +{
  2978. + struct fsl_qdma_comp *comp_temp;
  2979. + int i;
  2980. +
  2981. + for (i = 0; i < queue->n_cq; i++) {
  2982. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  2983. + if (!comp_temp)
  2984. + return -1;
  2985. + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
  2986. + GFP_NOWAIT,
  2987. + &comp_temp->bus_addr);
  2988. + if (!comp_temp->virt_addr)
  2989. + return -1;
  2990. + list_add_tail(&comp_temp->list, &queue->comp_free);
  2991. + }
  2992. + return 0;
  2993. +}
  2994. +
  2995. +/*
  2996. + * Request a command descriptor for enqueue.
  2997. + */
  2998. +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
  2999. + struct fsl_qdma_chan *fsl_chan,
  3000. + unsigned int dst_nents,
  3001. + unsigned int src_nents)
  3002. +{
  3003. + struct fsl_qdma_comp *comp_temp;
  3004. + struct fsl_qdma_sg *sg_block;
  3005. + struct fsl_qdma_queue *queue = fsl_chan->queue;
  3006. + unsigned long flags;
  3007. + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
  3008. +
  3009. + spin_lock_irqsave(&queue->queue_lock, flags);
  3010. + if (list_empty(&queue->comp_free)) {
  3011. + spin_unlock_irqrestore(&queue->queue_lock, flags);
  3012. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  3013. + if (!comp_temp)
  3014. + return NULL;
  3015. + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
  3016. + GFP_NOWAIT,
  3017. + &comp_temp->bus_addr);
  3018. + if (!comp_temp->virt_addr)
  3019. + return NULL;
  3020. + } else {
  3021. + comp_temp = list_first_entry(&queue->comp_free,
  3022. + struct fsl_qdma_comp,
  3023. + list);
  3024. + list_del(&comp_temp->list);
  3025. + spin_unlock_irqrestore(&queue->queue_lock, flags);
  3026. + }
  3027. +
  3028. + if (dst_nents != 0)
  3029. + dst_sg_entry_block = dst_nents /
  3030. + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3031. + else
  3032. + dst_sg_entry_block = 0;
  3033. +
  3034. + if (src_nents != 0)
  3035. + src_sg_entry_block = src_nents /
  3036. + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3037. + else
  3038. + src_sg_entry_block = 0;
  3039. +
  3040. + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
  3041. + if (sg_entry_total) {
  3042. + sg_block = kzalloc(sizeof(*sg_block) *
  3043. + sg_entry_total,
  3044. + GFP_KERNEL);
  3045. + if (!sg_block)
  3046. + return NULL;
  3047. + comp_temp->sg_block = sg_block;
  3048. + for (i = 0; i < sg_entry_total; i++) {
  3049. + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
  3050. + GFP_NOWAIT,
  3051. + &sg_block->bus_addr);
  3052. + memset(sg_block->virt_addr, 0,
  3053. + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
  3054. + sg_block++;
  3055. + }
  3056. + }
  3057. +
  3058. + comp_temp->sg_block_src = src_sg_entry_block;
  3059. + comp_temp->sg_block_dst = dst_sg_entry_block;
  3060. + comp_temp->qchan = fsl_chan;
  3061. +
  3062. + return comp_temp;
  3063. +}
  3064. +
  3065. +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
  3066. + struct platform_device *pdev,
  3067. + unsigned int queue_num)
  3068. +{
  3069. + struct device_node *np = pdev->dev.of_node;
  3070. + struct fsl_qdma_queue *queue_head, *queue_temp;
  3071. + int ret, len, i;
  3072. + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
  3073. +
  3074. + if (queue_num > FSL_QDMA_QUEUE_MAX)
  3075. + queue_num = FSL_QDMA_QUEUE_MAX;
  3076. + len = sizeof(*queue_head) * queue_num;
  3077. + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  3078. + if (!queue_head)
  3079. + return NULL;
  3080. +
  3081. + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
  3082. + queue_num);
  3083. + if (ret) {
  3084. + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
  3085. + return NULL;
  3086. + }
  3087. +
  3088. + for (i = 0; i < queue_num; i++) {
  3089. + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
  3090. + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  3091. + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
  3092. + return NULL;
  3093. + }
  3094. + queue_temp = queue_head + i;
  3095. + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
  3096. + sizeof(struct fsl_qdma_ccdf) *
  3097. + queue_size[i],
  3098. + &queue_temp->bus_addr,
  3099. + GFP_KERNEL);
  3100. + if (!queue_temp->cq)
  3101. + return NULL;
  3102. + queue_temp->n_cq = queue_size[i];
  3103. + queue_temp->id = i;
  3104. + queue_temp->virt_head = queue_temp->cq;
  3105. + queue_temp->virt_tail = queue_temp->cq;
  3106. + /*
  3107. + * The dma pool for queue command buffer
  3108. + */
  3109. + queue_temp->comp_pool = dma_pool_create("comp_pool",
  3110. + &pdev->dev,
  3111. + FSL_QDMA_BASE_BUFFER_SIZE,
  3112. + 16, 0);
  3113. + if (!queue_temp->comp_pool) {
  3114. + dma_free_coherent(&pdev->dev,
  3115. + sizeof(struct fsl_qdma_ccdf) *
  3116. + queue_size[i],
  3117. + queue_temp->cq,
  3118. + queue_temp->bus_addr);
  3119. + return NULL;
  3120. + }
  3121. + /*
  3122. + * The dma pool for queue command buffer
  3123. + */
  3124. + queue_temp->sg_pool = dma_pool_create("sg_pool",
  3125. + &pdev->dev,
  3126. + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
  3127. + 64, 0);
  3128. + if (!queue_temp->sg_pool) {
  3129. + dma_free_coherent(&pdev->dev,
  3130. + sizeof(struct fsl_qdma_ccdf) *
  3131. + queue_size[i],
  3132. + queue_temp->cq,
  3133. + queue_temp->bus_addr);
  3134. + dma_pool_destroy(queue_temp->comp_pool);
  3135. + return NULL;
  3136. + }
  3137. + /*
  3138. + * List for queue command buffer
  3139. + */
  3140. + INIT_LIST_HEAD(&queue_temp->comp_used);
  3141. + INIT_LIST_HEAD(&queue_temp->comp_free);
  3142. + spin_lock_init(&queue_temp->queue_lock);
  3143. + }
  3144. +
  3145. + return queue_head;
  3146. +}
  3147. +
  3148. +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
  3149. + struct platform_device *pdev)
  3150. +{
  3151. + struct device_node *np = pdev->dev.of_node;
  3152. + struct fsl_qdma_queue *status_head;
  3153. + unsigned int status_size;
  3154. + int ret;
  3155. +
  3156. + ret = of_property_read_u32(np, "status-sizes", &status_size);
  3157. + if (ret) {
  3158. + dev_err(&pdev->dev, "Can't get status-sizes.\n");
  3159. + return NULL;
  3160. + }
  3161. + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
  3162. + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  3163. + dev_err(&pdev->dev, "Get wrong status_size.\n");
  3164. + return NULL;
  3165. + }
  3166. + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
  3167. + GFP_KERNEL);
  3168. + if (!status_head)
  3169. + return NULL;
  3170. +
  3171. + /*
  3172. + * Buffer for queue command
  3173. + */
  3174. + status_head->cq = dma_alloc_coherent(&pdev->dev,
  3175. + sizeof(struct fsl_qdma_ccdf) *
  3176. + status_size,
  3177. + &status_head->bus_addr,
  3178. + GFP_KERNEL);
  3179. + if (!status_head->cq)
  3180. + return NULL;
  3181. + status_head->n_cq = status_size;
  3182. + status_head->virt_head = status_head->cq;
  3183. + status_head->virt_tail = status_head->cq;
  3184. + status_head->comp_pool = NULL;
  3185. +
  3186. + return status_head;
  3187. +}
  3188. +
  3189. +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
  3190. +{
  3191. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  3192. + void __iomem *block = fsl_qdma->block_base;
  3193. + int i, count = 5;
  3194. + u32 reg;
  3195. +
  3196. + /* Disable the command queue and wait for idle state. */
  3197. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  3198. + reg |= FSL_QDMA_DMR_DQD;
  3199. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  3200. + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
  3201. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
  3202. +
  3203. + while (1) {
  3204. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
  3205. + if (!(reg & FSL_QDMA_DSR_DB))
  3206. + break;
  3207. + if (count-- < 0)
  3208. + return -EBUSY;
  3209. + udelay(100);
  3210. + }
  3211. +
  3212. + /* Disable status queue. */
  3213. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
  3214. +
  3215. + /*
  3216. + * Clear the command queue interrupt detect register for all queues.
  3217. + */
  3218. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  3219. +
  3220. + return 0;
  3221. +}
  3222. +
  3223. +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
  3224. +{
  3225. + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  3226. + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
  3227. + struct fsl_qdma_queue *temp_queue;
  3228. + struct fsl_qdma_comp *fsl_comp;
  3229. + struct fsl_qdma_ccdf *status_addr;
  3230. + struct fsl_qdma_csgf *csgf_src;
  3231. + void __iomem *block = fsl_qdma->block_base;
  3232. + u32 reg, i;
  3233. + bool duplicate, duplicate_handle;
  3234. +
  3235. + while (1) {
  3236. + duplicate = 0;
  3237. + duplicate_handle = 0;
  3238. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
  3239. + if (reg & FSL_QDMA_BSQSR_QE)
  3240. + return 0;
  3241. + status_addr = fsl_status->virt_head;
  3242. + if (status_addr->queue == pre_queue &&
  3243. + status_addr->addr == pre_addr)
  3244. + duplicate = 1;
  3245. +
  3246. + i = status_addr->queue;
  3247. + pre_queue = status_addr->queue;
  3248. + pre_addr = status_addr->addr;
  3249. + temp_queue = fsl_queue + i;
  3250. + spin_lock(&temp_queue->queue_lock);
  3251. + if (list_empty(&temp_queue->comp_used)) {
  3252. + if (duplicate)
  3253. + duplicate_handle = 1;
  3254. + else {
  3255. + spin_unlock(&temp_queue->queue_lock);
  3256. + return -1;
  3257. + }
  3258. + } else {
  3259. + fsl_comp = list_first_entry(&temp_queue->comp_used,
  3260. + struct fsl_qdma_comp,
  3261. + list);
  3262. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
  3263. + + 2;
  3264. + if (fsl_comp->bus_addr + 16 !=
  3265. + (dma_addr_t)status_addr->addr) {
  3266. + if (duplicate)
  3267. + duplicate_handle = 1;
  3268. + else {
  3269. + spin_unlock(&temp_queue->queue_lock);
  3270. + return -1;
  3271. + }
  3272. + }
  3273. + }
  3274. +
  3275. + if (duplicate_handle) {
  3276. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  3277. + reg |= FSL_QDMA_BSQMR_DI;
  3278. + status_addr->addr = 0x0;
  3279. + fsl_status->virt_head++;
  3280. + if (fsl_status->virt_head == fsl_status->cq
  3281. + + fsl_status->n_cq)
  3282. + fsl_status->virt_head = fsl_status->cq;
  3283. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  3284. + spin_unlock(&temp_queue->queue_lock);
  3285. + continue;
  3286. + }
  3287. + list_del(&fsl_comp->list);
  3288. +
  3289. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  3290. + reg |= FSL_QDMA_BSQMR_DI;
  3291. + status_addr->addr = 0x0;
  3292. + fsl_status->virt_head++;
  3293. + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
  3294. + fsl_status->virt_head = fsl_status->cq;
  3295. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  3296. + spin_unlock(&temp_queue->queue_lock);
  3297. +
  3298. + spin_lock(&fsl_comp->qchan->vchan.lock);
  3299. + vchan_cookie_complete(&fsl_comp->vdesc);
  3300. + fsl_comp->qchan->status = DMA_COMPLETE;
  3301. + spin_unlock(&fsl_comp->qchan->vchan.lock);
  3302. + }
  3303. + return 0;
  3304. +}
  3305. +
  3306. +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
  3307. +{
  3308. + struct fsl_qdma_engine *fsl_qdma = dev_id;
  3309. + unsigned int intr;
  3310. + void __iomem *status = fsl_qdma->status_base;
  3311. +
  3312. + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
  3313. +
  3314. + if (intr)
  3315. + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
  3316. +
  3317. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
  3318. + return IRQ_HANDLED;
  3319. +}
  3320. +
  3321. +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
  3322. +{
  3323. + struct fsl_qdma_engine *fsl_qdma = dev_id;
  3324. + unsigned int intr, reg;
  3325. + void __iomem *block = fsl_qdma->block_base;
  3326. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  3327. +
  3328. + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
  3329. +
  3330. + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
  3331. + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
  3332. +
  3333. + if (intr != 0) {
  3334. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  3335. + reg |= FSL_QDMA_DMR_DQD;
  3336. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  3337. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
  3338. + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
  3339. + }
  3340. +
  3341. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  3342. +
  3343. + return IRQ_HANDLED;
  3344. +}
  3345. +
  3346. +static int
  3347. +fsl_qdma_irq_init(struct platform_device *pdev,
  3348. + struct fsl_qdma_engine *fsl_qdma)
  3349. +{
  3350. + int ret;
  3351. +
  3352. + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
  3353. + "qdma-error");
  3354. + if (fsl_qdma->error_irq < 0) {
  3355. + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
  3356. + return fsl_qdma->error_irq;
  3357. + }
  3358. +
  3359. + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
  3360. + if (fsl_qdma->queue_irq < 0) {
  3361. + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
  3362. + return fsl_qdma->queue_irq;
  3363. + }
  3364. +
  3365. + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
  3366. + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
  3367. + if (ret) {
  3368. + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
  3369. + return ret;
  3370. + }
  3371. + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
  3372. + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
  3373. + if (ret) {
  3374. + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
  3375. + return ret;
  3376. + }
  3377. +
  3378. + return 0;
  3379. +}
  3380. +
  3381. +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
  3382. +{
  3383. + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  3384. + struct fsl_qdma_queue *temp;
  3385. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  3386. + void __iomem *status = fsl_qdma->status_base;
  3387. + void __iomem *block = fsl_qdma->block_base;
  3388. + int i, ret;
  3389. + u32 reg;
  3390. +
  3391. + /* Try to halt the qDMA engine first. */
  3392. + ret = fsl_qdma_halt(fsl_qdma);
  3393. + if (ret) {
  3394. + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
  3395. + return ret;
  3396. + }
  3397. +
  3398. + /*
  3399. + * Clear the command queue interrupt detect register for all queues.
  3400. + */
  3401. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  3402. +
  3403. + for (i = 0; i < fsl_qdma->n_queues; i++) {
  3404. + temp = fsl_queue + i;
  3405. + /*
  3406. + * Initialize Command Queue registers to point to the first
  3407. + * command descriptor in memory.
  3408. + * Dequeue Pointer Address Registers
  3409. + * Enqueue Pointer Address Registers
  3410. + */
  3411. + qdma_writel(fsl_qdma, temp->bus_addr,
  3412. + block + FSL_QDMA_BCQDPA_SADDR(i));
  3413. + qdma_writel(fsl_qdma, temp->bus_addr,
  3414. + block + FSL_QDMA_BCQEPA_SADDR(i));
  3415. +
  3416. + /* Initialize the queue mode. */
  3417. + reg = FSL_QDMA_BCQMR_EN;
  3418. + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
  3419. + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
  3420. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
  3421. + }
  3422. +
  3423. + /*
  3424. + * Workaround for erratum: ERR010812.
  3425. + * We must enable XOFF to avoid the enqueue rejection occurs.
  3426. + * Setting SQCCMR ENTER_WM to 0x20.
  3427. + */
  3428. + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
  3429. + block + FSL_QDMA_SQCCMR);
  3430. + /*
  3431. + * Initialize status queue registers to point to the first
  3432. + * command descriptor in memory.
  3433. + * Dequeue Pointer Address Registers
  3434. + * Enqueue Pointer Address Registers
  3435. + */
  3436. + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
  3437. + block + FSL_QDMA_SQEPAR);
  3438. + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
  3439. + block + FSL_QDMA_SQDPAR);
  3440. + /* Initialize status queue interrupt. */
  3441. + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
  3442. + block + FSL_QDMA_BCQIER(0));
  3443. + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
  3444. + | 0x8000,
  3445. + block + FSL_QDMA_BSQICR);
  3446. + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
  3447. + block + FSL_QDMA_CQIER);
  3448. + /* Initialize controller interrupt register. */
  3449. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
  3450. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
  3451. +
  3452. + /* Initialize the status queue mode. */
  3453. + reg = FSL_QDMA_BSQMR_EN;
  3454. + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
  3455. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  3456. +
  3457. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  3458. + reg &= ~FSL_QDMA_DMR_DQD;
  3459. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  3460. +
  3461. + return 0;
  3462. +}
  3463. +
  3464. +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
  3465. + struct dma_chan *chan,
  3466. + struct scatterlist *dst_sg, unsigned int dst_nents,
  3467. + struct scatterlist *src_sg, unsigned int src_nents,
  3468. + unsigned long flags)
  3469. +{
  3470. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  3471. + struct fsl_qdma_comp *fsl_comp;
  3472. +
  3473. + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
  3474. + dst_nents,
  3475. + src_nents);
  3476. + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
  3477. +
  3478. + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
  3479. +}
  3480. +
  3481. +static struct dma_async_tx_descriptor *
  3482. +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
  3483. + dma_addr_t src, size_t len, unsigned long flags)
  3484. +{
  3485. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  3486. + struct fsl_qdma_comp *fsl_comp;
  3487. +
  3488. + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
  3489. + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
  3490. +
  3491. + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
  3492. +}
  3493. +
  3494. +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
  3495. +{
  3496. + void __iomem *block = fsl_chan->qdma->block_base;
  3497. + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  3498. + struct fsl_qdma_comp *fsl_comp;
  3499. + struct virt_dma_desc *vdesc;
  3500. + u32 reg;
  3501. +
  3502. + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
  3503. + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
  3504. + return;
  3505. + vdesc = vchan_next_desc(&fsl_chan->vchan);
  3506. + if (!vdesc)
  3507. + return;
  3508. + list_del(&vdesc->node);
  3509. + fsl_comp = to_fsl_qdma_comp(vdesc);
  3510. +
  3511. + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
  3512. + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
  3513. + fsl_queue->virt_head = fsl_queue->cq;
  3514. +
  3515. + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
  3516. + barrier();
  3517. + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
  3518. + reg |= FSL_QDMA_BCQMR_EI;
  3519. + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
  3520. + fsl_chan->status = DMA_IN_PROGRESS;
  3521. +}
  3522. +
  3523. +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
  3524. + dma_cookie_t cookie, struct dma_tx_state *txstate)
  3525. +{
  3526. + return dma_cookie_status(chan, cookie, txstate);
  3527. +}
  3528. +
  3529. +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
  3530. +{
  3531. + struct fsl_qdma_comp *fsl_comp;
  3532. + struct fsl_qdma_queue *fsl_queue;
  3533. + struct fsl_qdma_sg *sg_block;
  3534. + unsigned long flags;
  3535. + unsigned int i;
  3536. +
  3537. + fsl_comp = to_fsl_qdma_comp(vdesc);
  3538. + fsl_queue = fsl_comp->qchan->queue;
  3539. +
  3540. + if (fsl_comp->sg_block) {
  3541. + for (i = 0; i < fsl_comp->sg_block_src +
  3542. + fsl_comp->sg_block_dst; i++) {
  3543. + sg_block = fsl_comp->sg_block + i;
  3544. + dma_pool_free(fsl_queue->sg_pool,
  3545. + sg_block->virt_addr,
  3546. + sg_block->bus_addr);
  3547. + }
  3548. + kfree(fsl_comp->sg_block);
  3549. + }
  3550. +
  3551. + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  3552. + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
  3553. + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  3554. +}
  3555. +
  3556. +static void fsl_qdma_issue_pending(struct dma_chan *chan)
  3557. +{
  3558. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  3559. + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  3560. + unsigned long flags;
  3561. +
  3562. + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  3563. + spin_lock(&fsl_chan->vchan.lock);
  3564. + if (vchan_issue_pending(&fsl_chan->vchan))
  3565. + fsl_qdma_enqueue_desc(fsl_chan);
  3566. + spin_unlock(&fsl_chan->vchan.lock);
  3567. + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  3568. +}
  3569. +
  3570. +static int fsl_qdma_probe(struct platform_device *pdev)
  3571. +{
  3572. + struct device_node *np = pdev->dev.of_node;
  3573. + struct fsl_qdma_engine *fsl_qdma;
  3574. + struct fsl_qdma_chan *fsl_chan;
  3575. + struct resource *res;
  3576. + unsigned int len, chans, queues;
  3577. + int ret, i;
  3578. +
  3579. + ret = of_property_read_u32(np, "channels", &chans);
  3580. + if (ret) {
  3581. + dev_err(&pdev->dev, "Can't get channels.\n");
  3582. + return ret;
  3583. + }
  3584. +
  3585. + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
  3586. + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  3587. + if (!fsl_qdma)
  3588. + return -ENOMEM;
  3589. +
  3590. + ret = of_property_read_u32(np, "queues", &queues);
  3591. + if (ret) {
  3592. + dev_err(&pdev->dev, "Can't get queues.\n");
  3593. + return ret;
  3594. + }
  3595. +
  3596. + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
  3597. + if (!fsl_qdma->queue)
  3598. + return -ENOMEM;
  3599. +
  3600. + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
  3601. + if (!fsl_qdma->status)
  3602. + return -ENOMEM;
  3603. +
  3604. + fsl_qdma->n_chans = chans;
  3605. + fsl_qdma->n_queues = queues;
  3606. + mutex_init(&fsl_qdma->fsl_qdma_mutex);
  3607. +
  3608. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3609. + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
  3610. + if (IS_ERR(fsl_qdma->ctrl_base))
  3611. + return PTR_ERR(fsl_qdma->ctrl_base);
  3612. +
  3613. + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  3614. + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
  3615. + if (IS_ERR(fsl_qdma->status_base))
  3616. + return PTR_ERR(fsl_qdma->status_base);
  3617. +
  3618. + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
  3619. + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
  3620. + if (IS_ERR(fsl_qdma->block_base))
  3621. + return PTR_ERR(fsl_qdma->block_base);
  3622. +
  3623. + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
  3624. + if (ret)
  3625. + return ret;
  3626. +
  3627. + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
  3628. + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
  3629. + for (i = 0; i < fsl_qdma->n_chans; i++) {
  3630. + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
  3631. +
  3632. + fsl_chan->qdma = fsl_qdma;
  3633. + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
  3634. + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
  3635. + INIT_LIST_HEAD(&fsl_chan->qcomp);
  3636. + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
  3637. + }
  3638. + for (i = 0; i < fsl_qdma->n_queues; i++)
  3639. + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
  3640. +
  3641. + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
  3642. + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
  3643. +
  3644. + fsl_qdma->dma_dev.dev = &pdev->dev;
  3645. + fsl_qdma->dma_dev.device_alloc_chan_resources
  3646. + = fsl_qdma_alloc_chan_resources;
  3647. + fsl_qdma->dma_dev.device_free_chan_resources
  3648. + = fsl_qdma_free_chan_resources;
  3649. + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
  3650. + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
  3651. + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
  3652. + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
  3653. +
  3654. + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
  3655. +
  3656. + platform_set_drvdata(pdev, fsl_qdma);
  3657. +
  3658. + ret = dma_async_device_register(&fsl_qdma->dma_dev);
  3659. + if (ret) {
  3660. + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
  3661. + return ret;
  3662. + }
  3663. +
  3664. + ret = fsl_qdma_reg_init(fsl_qdma);
  3665. + if (ret) {
  3666. + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
  3667. + return ret;
  3668. + }
  3669. +
  3670. +
  3671. + return 0;
  3672. +}
  3673. +
  3674. +static int fsl_qdma_remove(struct platform_device *pdev)
  3675. +{
  3676. + struct device_node *np = pdev->dev.of_node;
  3677. + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
  3678. + struct fsl_qdma_queue *queue_temp;
  3679. + struct fsl_qdma_queue *status = fsl_qdma->status;
  3680. + struct fsl_qdma_comp *comp_temp, *_comp_temp;
  3681. + int i;
  3682. +
  3683. + of_dma_controller_free(np);
  3684. + dma_async_device_unregister(&fsl_qdma->dma_dev);
  3685. +
  3686. + /* Free descriptor areas */
  3687. + for (i = 0; i < fsl_qdma->n_queues; i++) {
  3688. + queue_temp = fsl_qdma->queue + i;
  3689. + list_for_each_entry_safe(comp_temp, _comp_temp,
  3690. + &queue_temp->comp_used, list) {
  3691. + dma_pool_free(queue_temp->comp_pool,
  3692. + comp_temp->virt_addr,
  3693. + comp_temp->bus_addr);
  3694. + list_del(&comp_temp->list);
  3695. + kfree(comp_temp);
  3696. + }
  3697. + list_for_each_entry_safe(comp_temp, _comp_temp,
  3698. + &queue_temp->comp_free, list) {
  3699. + dma_pool_free(queue_temp->comp_pool,
  3700. + comp_temp->virt_addr,
  3701. + comp_temp->bus_addr);
  3702. + list_del(&comp_temp->list);
  3703. + kfree(comp_temp);
  3704. + }
  3705. + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
  3706. + queue_temp->n_cq, queue_temp->cq,
  3707. + queue_temp->bus_addr);
  3708. + dma_pool_destroy(queue_temp->comp_pool);
  3709. + }
  3710. +
  3711. + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
  3712. + status->n_cq, status->cq, status->bus_addr);
  3713. + return 0;
  3714. +}
  3715. +
  3716. +static const struct of_device_id fsl_qdma_dt_ids[] = {
  3717. + { .compatible = "fsl,ls1021a-qdma", },
  3718. + { /* sentinel */ }
  3719. +};
  3720. +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
  3721. +
  3722. +static struct platform_driver fsl_qdma_driver = {
  3723. + .driver = {
  3724. + .name = "fsl-qdma",
  3725. + .owner = THIS_MODULE,
  3726. + .of_match_table = fsl_qdma_dt_ids,
  3727. + },
  3728. + .probe = fsl_qdma_probe,
  3729. + .remove = fsl_qdma_remove,
  3730. +};
  3731. +
  3732. +static int __init fsl_qdma_init(void)
  3733. +{
  3734. + return platform_driver_register(&fsl_qdma_driver);
  3735. +}
  3736. +subsys_initcall(fsl_qdma_init);
  3737. +
  3738. +static void __exit fsl_qdma_exit(void)
  3739. +{
  3740. + platform_driver_unregister(&fsl_qdma_driver);
  3741. +}
  3742. +module_exit(fsl_qdma_exit);
  3743. +
  3744. +MODULE_ALIAS("platform:fsl-qdma");
  3745. +MODULE_DESCRIPTION("Freescale qDMA engine driver");
  3746. +MODULE_LICENSE("GPL v2");