805-dma-support-layerscape.patch 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384
  1. From 854c1f0e9574e9b25a55b439608c71e013b34a56 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Mon, 25 Sep 2017 12:12:20 +0800
  4. Subject: [PATCH] dma: support layerscape
  5. This is a integrated patch for layerscape dma support.
  6. Signed-off-by: jiaheng.fan <[email protected]>
  7. Signed-off-by: Yangbo Lu <[email protected]>
  8. ---
  9. drivers/dma/Kconfig | 31 +
  10. drivers/dma/Makefile | 3 +
  11. drivers/dma/caam_dma.c | 563 +++++++++++++++
  12. drivers/dma/dpaa2-qdma/Kconfig | 8 +
  13. drivers/dma/dpaa2-qdma/Makefile | 8 +
  14. drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
  15. drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
  16. drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
  17. drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
  18. drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
  19. drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
  20. 11 files changed, 4259 insertions(+)
  21. create mode 100644 drivers/dma/caam_dma.c
  22. create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
  23. create mode 100644 drivers/dma/dpaa2-qdma/Makefile
  24. create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  25. create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
  26. create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
  27. create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
  28. create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  29. create mode 100644 drivers/dma/fsl-qdma.c
  30. diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
  31. index 141aefbe..8caaf091 100644
  32. --- a/drivers/dma/Kconfig
  33. +++ b/drivers/dma/Kconfig
  34. @@ -192,6 +192,20 @@ config FSL_EDMA
  35. multiplexing capability for DMA request sources(slot).
  36. This module can be found on Freescale Vybrid and LS-1 SoCs.
  37. +config FSL_QDMA
  38. + tristate "Freescale qDMA engine support"
  39. + select DMA_ENGINE
  40. + select DMA_VIRTUAL_CHANNELS
  41. + select DMA_ENGINE_RAID
  42. + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
  43. + help
  44. + Support the Freescale qDMA engine with command queue and legacy mode.
  45. + Channel virtualization is supported through enqueuing of DMA jobs to,
  46. + or dequeuing DMA jobs from, different work queues.
  47. + This module can be found on Freescale LS SoCs.
  48. +
  49. +source drivers/dma/dpaa2-qdma/Kconfig
  50. +
  51. config FSL_RAID
  52. tristate "Freescale RAID engine Support"
  53. depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
  54. @@ -564,6 +578,23 @@ config ZX_DMA
  55. help
  56. Support the DMA engine for ZTE ZX296702 platform devices.
  57. +config CRYPTO_DEV_FSL_CAAM_DMA
  58. + tristate "CAAM DMA engine support"
  59. + depends on CRYPTO_DEV_FSL_CAAM_JR
  60. + default y
  61. + select DMA_ENGINE
  62. + select ASYNC_CORE
  63. + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
  64. + help
  65. + Selecting this will offload the DMA operations for users of
  66. + the scatter gather memcopy API to the CAAM via job rings. The
  67. + CAAM is a hardware module that provides hardware acceleration to
  68. + cryptographic operations. It has a built-in DMA controller that can
  69. + be programmed to read/write cryptographic data. This module defines
  70. + a DMA driver that uses the DMA capabilities of the CAAM.
  71. +
  72. + To compile this as a module, choose M here: the module
  73. + will be called caam_dma.
  74. # driver files
  75. source "drivers/dma/bestcomm/Kconfig"
  76. diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
  77. index e4dc9cac..a694da0e 100644
  78. --- a/drivers/dma/Makefile
  79. +++ b/drivers/dma/Makefile
  80. @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
  81. obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
  82. obj-$(CONFIG_FSL_DMA) += fsldma.o
  83. obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
  84. +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
  85. +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
  86. obj-$(CONFIG_FSL_RAID) += fsl_raid.o
  87. obj-$(CONFIG_HSU_DMA) += hsu/
  88. obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
  89. @@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
  90. obj-$(CONFIG_TI_EDMA) += edma.o
  91. obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
  92. obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
  93. +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
  94. obj-y += qcom/
  95. obj-y += xilinx/
  96. diff --git a/drivers/dma/caam_dma.c b/drivers/dma/caam_dma.c
  97. new file mode 100644
  98. index 00000000..e430b320
  99. --- /dev/null
  100. +++ b/drivers/dma/caam_dma.c
  101. @@ -0,0 +1,563 @@
  102. +/*
  103. + * caam support for SG DMA
  104. + *
  105. + * Copyright 2016 Freescale Semiconductor, Inc
  106. + * Copyright 2017 NXP
  107. + */
  108. +
  109. +#include <linux/module.h>
  110. +#include <linux/platform_device.h>
  111. +#include <linux/dma-mapping.h>
  112. +#include <linux/interrupt.h>
  113. +#include <linux/slab.h>
  114. +#include <linux/debugfs.h>
  115. +
  116. +#include <linux/dmaengine.h>
  117. +#include "dmaengine.h"
  118. +
  119. +#include "../crypto/caam/regs.h"
  120. +#include "../crypto/caam/jr.h"
  121. +#include "../crypto/caam/error.h"
  122. +#include "../crypto/caam/intern.h"
  123. +#include "../crypto/caam/desc_constr.h"
  124. +#include "../crypto/caam/sg_sw_sec4.h"
  125. +
  126. +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
  127. + CAAM_CMD_SZ)
  128. +
  129. +/* This is max chunk size of a DMA transfer. If a buffer is larger than this
  130. + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
  131. + * and for each chunk a DMA transfer request is issued.
  132. + * This value is the largest number on 16 bits that is a multiple of 256 bytes
  133. + * (the largest configurable CAAM DMA burst size).
  134. + */
  135. +#define CAAM_DMA_CHUNK_SIZE 65280
  136. +
  137. +struct caam_dma_sh_desc {
  138. + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
  139. + dma_addr_t desc_dma;
  140. +};
  141. +
  142. +/* caam dma extended descriptor */
  143. +struct caam_dma_edesc {
  144. + struct dma_async_tx_descriptor async_tx;
  145. + struct list_head node;
  146. + struct caam_dma_ctx *ctx;
  147. + dma_addr_t src_dma;
  148. + dma_addr_t dst_dma;
  149. + unsigned int src_len;
  150. + unsigned int dst_len;
  151. + struct sec4_sg_entry *sec4_sg;
  152. + u32 jd[] ____cacheline_aligned;
  153. +};
  154. +
  155. +/*
  156. + * caam_dma_ctx - per jr/channel context
  157. + * @chan: dma channel used by async_tx API
  158. + * @node: list_head used to attach to the global dma_ctx_list
  159. + * @jrdev: Job Ring device
  160. + * @submit_q: queue of pending (submitted, but not enqueued) jobs
  161. + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
  162. + * @edesc_lock: protects extended descriptor
  163. + */
  164. +struct caam_dma_ctx {
  165. + struct dma_chan chan;
  166. + struct list_head node;
  167. + struct device *jrdev;
  168. + struct list_head submit_q;
  169. + struct list_head done_not_acked;
  170. + spinlock_t edesc_lock;
  171. +};
  172. +
  173. +static struct dma_device *dma_dev;
  174. +static struct caam_dma_sh_desc *dma_sh_desc;
  175. +static LIST_HEAD(dma_ctx_list);
  176. +
  177. +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  178. +{
  179. + struct caam_dma_edesc *edesc = NULL;
  180. + struct caam_dma_ctx *ctx = NULL;
  181. + dma_cookie_t cookie;
  182. +
  183. + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
  184. + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
  185. +
  186. + spin_lock_bh(&ctx->edesc_lock);
  187. +
  188. + cookie = dma_cookie_assign(tx);
  189. + list_add_tail(&edesc->node, &ctx->submit_q);
  190. +
  191. + spin_unlock_bh(&ctx->edesc_lock);
  192. +
  193. + return cookie;
  194. +}
  195. +
  196. +static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
  197. + unsigned int nents)
  198. +{
  199. + unsigned int len;
  200. +
  201. + for (len = 0; sg && nents; sg = sg_next(sg), nents--)
  202. + len += sg_dma_len(sg);
  203. +
  204. + return len;
  205. +}
  206. +
  207. +static struct caam_dma_edesc *
  208. +caam_dma_sg_edesc_alloc(struct dma_chan *chan,
  209. + struct scatterlist *dst_sg, unsigned int dst_nents,
  210. + struct scatterlist *src_sg, unsigned int src_nents,
  211. + unsigned long flags)
  212. +{
  213. + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
  214. + chan);
  215. + struct device *jrdev = ctx->jrdev;
  216. + struct caam_dma_edesc *edesc;
  217. + struct sec4_sg_entry *sec4_sg;
  218. + dma_addr_t sec4_sg_dma_src;
  219. + unsigned int sec4_sg_bytes;
  220. +
  221. + if (!dst_sg || !src_sg || !dst_nents || !src_nents)
  222. + return NULL;
  223. +
  224. + sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg);
  225. +
  226. + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
  227. + GFP_DMA | GFP_NOWAIT);
  228. + if (!edesc)
  229. + return ERR_PTR(-ENOMEM);
  230. +
  231. + edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents);
  232. + edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents);
  233. + if (edesc->src_len != edesc->dst_len) {
  234. + dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n",
  235. + __func__, edesc->src_len, edesc->dst_len);
  236. + kfree(edesc);
  237. + return ERR_PTR(-EINVAL);
  238. + }
  239. +
  240. + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
  241. + edesc->async_tx.tx_submit = caam_dma_tx_submit;
  242. + edesc->async_tx.flags = flags;
  243. + edesc->async_tx.cookie = -EBUSY;
  244. +
  245. + /* Prepare SEC SGs */
  246. + edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) +
  247. + DESC_JOB_IO_LEN;
  248. +
  249. + sec4_sg = edesc->sec4_sg;
  250. + sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0);
  251. +
  252. + sec4_sg += src_nents;
  253. + sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0);
  254. +
  255. + sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes,
  256. + DMA_TO_DEVICE);
  257. + if (dma_mapping_error(jrdev, sec4_sg_dma_src)) {
  258. + dev_err(jrdev, "error mapping segments to device\n");
  259. + kfree(edesc);
  260. + return ERR_PTR(-ENOMEM);
  261. + }
  262. +
  263. + edesc->src_dma = sec4_sg_dma_src;
  264. + edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
  265. + edesc->ctx = ctx;
  266. +
  267. + return edesc;
  268. +}
  269. +
  270. +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
  271. +{
  272. + struct caam_dma_ctx *ctx = edesc->ctx;
  273. + struct caam_dma_edesc *_edesc = NULL;
  274. +
  275. + spin_lock_bh(&ctx->edesc_lock);
  276. +
  277. + list_add_tail(&edesc->node, &ctx->done_not_acked);
  278. + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
  279. + if (async_tx_test_ack(&edesc->async_tx)) {
  280. + list_del(&edesc->node);
  281. + kfree(edesc);
  282. + }
  283. + }
  284. +
  285. + spin_unlock_bh(&ctx->edesc_lock);
  286. +}
  287. +
  288. +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
  289. + void *context)
  290. +{
  291. + struct caam_dma_edesc *edesc = context;
  292. + struct caam_dma_ctx *ctx = edesc->ctx;
  293. + dma_async_tx_callback callback;
  294. + void *callback_param;
  295. +
  296. + if (err)
  297. + caam_jr_strstatus(ctx->jrdev, err);
  298. +
  299. + dma_run_dependencies(&edesc->async_tx);
  300. +
  301. + spin_lock_bh(&ctx->edesc_lock);
  302. + dma_cookie_complete(&edesc->async_tx);
  303. + spin_unlock_bh(&ctx->edesc_lock);
  304. +
  305. + callback = edesc->async_tx.callback;
  306. + callback_param = edesc->async_tx.callback_param;
  307. +
  308. + dma_descriptor_unmap(&edesc->async_tx);
  309. +
  310. + caam_jr_chan_free_edesc(edesc);
  311. +
  312. + if (callback)
  313. + callback(callback_param);
  314. +}
  315. +
  316. +static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
  317. +{
  318. + u32 *jd = edesc->jd;
  319. + u32 *sh_desc = dma_sh_desc->desc;
  320. + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
  321. +
  322. + /* init the job descriptor */
  323. + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
  324. +
  325. + /* set SEQIN PTR */
  326. + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
  327. +
  328. + /* set SEQOUT PTR */
  329. + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
  330. +
  331. +#ifdef DEBUG
  332. + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
  333. + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
  334. +#endif
  335. +}
  336. +
  337. +/* This function can be called from an interrupt context */
  338. +static struct dma_async_tx_descriptor *
  339. +caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
  340. + unsigned int dst_nents, struct scatterlist *src_sg,
  341. + unsigned int src_nents, unsigned long flags)
  342. +{
  343. + struct caam_dma_edesc *edesc;
  344. +
  345. + /* allocate extended descriptor */
  346. + edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
  347. + src_nents, flags);
  348. + if (IS_ERR_OR_NULL(edesc))
  349. + return ERR_CAST(edesc);
  350. +
  351. + /* Initialize job descriptor */
  352. + caam_dma_sg_init_job_desc(edesc);
  353. +
  354. + return &edesc->async_tx;
  355. +}
  356. +
  357. +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
  358. +{
  359. + u32 *jd = edesc->jd;
  360. + u32 *sh_desc = dma_sh_desc->desc;
  361. + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
  362. +
  363. + /* init the job descriptor */
  364. + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
  365. +
  366. + /* set SEQIN PTR */
  367. + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
  368. +
  369. + /* set SEQOUT PTR */
  370. + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
  371. +
  372. +#ifdef DEBUG
  373. + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
  374. + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
  375. +#endif
  376. +}
  377. +
  378. +static struct dma_async_tx_descriptor *
  379. +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
  380. + size_t len, unsigned long flags)
  381. +{
  382. + struct caam_dma_edesc *edesc;
  383. + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
  384. + chan);
  385. +
  386. + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
  387. + if (!edesc)
  388. + return ERR_PTR(-ENOMEM);
  389. +
  390. + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
  391. + edesc->async_tx.tx_submit = caam_dma_tx_submit;
  392. + edesc->async_tx.flags = flags;
  393. + edesc->async_tx.cookie = -EBUSY;
  394. +
  395. + edesc->src_dma = src;
  396. + edesc->src_len = len;
  397. + edesc->dst_dma = dst;
  398. + edesc->dst_len = len;
  399. + edesc->ctx = ctx;
  400. +
  401. + caam_dma_memcpy_init_job_desc(edesc);
  402. +
  403. + return &edesc->async_tx;
  404. +}
  405. +
  406. +/* This function can be called in an interrupt context */
  407. +static void caam_dma_issue_pending(struct dma_chan *chan)
  408. +{
  409. + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
  410. + chan);
  411. + struct caam_dma_edesc *edesc, *_edesc;
  412. +
  413. + spin_lock_bh(&ctx->edesc_lock);
  414. + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
  415. + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
  416. + caam_dma_done, edesc) < 0)
  417. + break;
  418. + list_del(&edesc->node);
  419. + }
  420. + spin_unlock_bh(&ctx->edesc_lock);
  421. +}
  422. +
  423. +static void caam_dma_free_chan_resources(struct dma_chan *chan)
  424. +{
  425. + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
  426. + chan);
  427. + struct caam_dma_edesc *edesc, *_edesc;
  428. +
  429. + spin_lock_bh(&ctx->edesc_lock);
  430. + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
  431. + list_del(&edesc->node);
  432. + kfree(edesc);
  433. + }
  434. + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
  435. + list_del(&edesc->node);
  436. + kfree(edesc);
  437. + }
  438. + spin_unlock_bh(&ctx->edesc_lock);
  439. +}
  440. +
  441. +static int caam_dma_jr_chan_bind(void)
  442. +{
  443. + struct device *jrdev;
  444. + struct caam_dma_ctx *ctx;
  445. + int bonds = 0;
  446. + int i;
  447. +
  448. + for (i = 0; i < caam_jr_driver_probed(); i++) {
  449. + jrdev = caam_jridx_alloc(i);
  450. + if (IS_ERR(jrdev)) {
  451. + pr_err("job ring device %d allocation failed\n", i);
  452. + continue;
  453. + }
  454. +
  455. + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  456. + if (!ctx) {
  457. + caam_jr_free(jrdev);
  458. + continue;
  459. + }
  460. +
  461. + ctx->chan.device = dma_dev;
  462. + ctx->chan.private = ctx;
  463. +
  464. + ctx->jrdev = jrdev;
  465. +
  466. + INIT_LIST_HEAD(&ctx->submit_q);
  467. + INIT_LIST_HEAD(&ctx->done_not_acked);
  468. + INIT_LIST_HEAD(&ctx->node);
  469. + spin_lock_init(&ctx->edesc_lock);
  470. +
  471. + dma_cookie_init(&ctx->chan);
  472. +
  473. + /* add the context of this channel to the context list */
  474. + list_add_tail(&ctx->node, &dma_ctx_list);
  475. +
  476. + /* add this channel to the device chan list */
  477. + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
  478. +
  479. + bonds++;
  480. + }
  481. +
  482. + return bonds;
  483. +}
  484. +
  485. +static inline void caam_jr_dma_free(struct dma_chan *chan)
  486. +{
  487. + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
  488. + chan);
  489. +
  490. + list_del(&ctx->node);
  491. + list_del(&chan->device_node);
  492. + caam_jr_free(ctx->jrdev);
  493. + kfree(ctx);
  494. +}
  495. +
  496. +static void set_caam_dma_desc(u32 *desc)
  497. +{
  498. + u32 *jmp_cmd;
  499. +
  500. + /* dma shared descriptor */
  501. + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
  502. +
  503. + /* REG1 = CAAM_DMA_CHUNK_SIZE */
  504. + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
  505. +
  506. + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
  507. + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
  508. +
  509. + /* if (REG0 > 0)
  510. + * jmp to LABEL1
  511. + */
  512. + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
  513. + JUMP_COND_MATH_Z);
  514. +
  515. + /* REG1 = SEQINLEN */
  516. + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
  517. +
  518. + /* LABEL1 */
  519. + set_jump_tgt_here(desc, jmp_cmd);
  520. +
  521. + /* VARSEQINLEN = REG1 */
  522. + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
  523. +
  524. + /* VARSEQOUTLEN = REG1 */
  525. + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
  526. +
  527. + /* do FIFO STORE */
  528. + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
  529. +
  530. + /* do FIFO LOAD */
  531. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  532. + FIFOLD_TYPE_IFIFO | LDST_VLF);
  533. +
  534. + /* if (REG0 > 0)
  535. + * jmp 0xF8 (after shared desc header)
  536. + */
  537. + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
  538. + JUMP_COND_MATH_Z | 0xF8);
  539. +
  540. +#ifdef DEBUG
  541. + print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ",
  542. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  543. +#endif
  544. +}
  545. +
  546. +static int __init caam_dma_probe(struct platform_device *pdev)
  547. +{
  548. + struct device *dev = &pdev->dev;
  549. + struct device *ctrldev = dev->parent;
  550. + struct dma_chan *chan, *_chan;
  551. + u32 *sh_desc;
  552. + int err = -ENOMEM;
  553. + int bonds;
  554. +
  555. + if (!caam_jr_driver_probed()) {
  556. + dev_info(dev, "Defer probing after JR driver probing\n");
  557. + return -EPROBE_DEFER;
  558. + }
  559. +
  560. + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
  561. + if (!dma_dev)
  562. + return -ENOMEM;
  563. +
  564. + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
  565. + if (!dma_sh_desc)
  566. + goto desc_err;
  567. +
  568. + sh_desc = dma_sh_desc->desc;
  569. + set_caam_dma_desc(sh_desc);
  570. + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
  571. + desc_bytes(sh_desc),
  572. + DMA_TO_DEVICE);
  573. + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
  574. + dev_err(dev, "unable to map dma descriptor\n");
  575. + goto map_err;
  576. + }
  577. +
  578. + INIT_LIST_HEAD(&dma_dev->channels);
  579. +
  580. + bonds = caam_dma_jr_chan_bind();
  581. + if (!bonds) {
  582. + err = -ENODEV;
  583. + goto jr_bind_err;
  584. + }
  585. +
  586. + dma_dev->dev = dev;
  587. + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  588. + dma_cap_set(DMA_SG, dma_dev->cap_mask);
  589. + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  590. + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
  591. + dma_dev->device_tx_status = dma_cookie_status;
  592. + dma_dev->device_issue_pending = caam_dma_issue_pending;
  593. + dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
  594. + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
  595. + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
  596. +
  597. + err = dma_async_device_register(dma_dev);
  598. + if (err) {
  599. + dev_err(dev, "Failed to register CAAM DMA engine\n");
  600. + goto jr_bind_err;
  601. + }
  602. +
  603. + dev_info(dev, "caam dma support with %d job rings\n", bonds);
  604. +
  605. + return err;
  606. +
  607. +jr_bind_err:
  608. + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
  609. + caam_jr_dma_free(chan);
  610. +
  611. + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
  612. + DMA_TO_DEVICE);
  613. +map_err:
  614. + kfree(dma_sh_desc);
  615. +desc_err:
  616. + kfree(dma_dev);
  617. + return err;
  618. +}
  619. +
  620. +static int caam_dma_remove(struct platform_device *pdev)
  621. +{
  622. + struct device *dev = &pdev->dev;
  623. + struct device *ctrldev = dev->parent;
  624. + struct caam_dma_ctx *ctx, *_ctx;
  625. +
  626. + dma_async_device_unregister(dma_dev);
  627. +
  628. + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
  629. + list_del(&ctx->node);
  630. + caam_jr_free(ctx->jrdev);
  631. + kfree(ctx);
  632. + }
  633. +
  634. + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
  635. + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
  636. +
  637. + kfree(dma_sh_desc);
  638. + kfree(dma_dev);
  639. +
  640. + dev_info(dev, "caam dma support disabled\n");
  641. + return 0;
  642. +}
  643. +
  644. +static const struct of_device_id caam_dma_match[] = {
  645. + { .compatible = "fsl,sec-v5.4-dma", },
  646. + { .compatible = "fsl,sec-v5.0-dma", },
  647. + { .compatible = "fsl,sec-v4.0-dma", },
  648. + {},
  649. +};
  650. +MODULE_DEVICE_TABLE(of, caam_dma_match);
  651. +
  652. +static struct platform_driver caam_dma_driver = {
  653. + .driver = {
  654. + .name = "caam-dma",
  655. + .of_match_table = caam_dma_match,
  656. + },
  657. + .probe = caam_dma_probe,
  658. + .remove = caam_dma_remove,
  659. +};
  660. +module_platform_driver(caam_dma_driver);
  661. +
  662. +MODULE_LICENSE("Dual BSD/GPL");
  663. +MODULE_DESCRIPTION("NXP CAAM support for SG DMA");
  664. +MODULE_AUTHOR("NXP Semiconductors");
  665. diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig
  666. new file mode 100644
  667. index 00000000..084e34bf
  668. --- /dev/null
  669. +++ b/drivers/dma/dpaa2-qdma/Kconfig
  670. @@ -0,0 +1,8 @@
  671. +menuconfig FSL_DPAA2_QDMA
  672. + tristate "NXP DPAA2 QDMA"
  673. + depends on FSL_MC_BUS && FSL_MC_DPIO
  674. + select DMA_ENGINE
  675. + select DMA_VIRTUAL_CHANNELS
  676. + ---help---
  677. + NXP Data Path Acceleration Architecture 2 QDMA driver,
  678. + using the NXP MC bus driver.
  679. diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile
  680. new file mode 100644
  681. index 00000000..ba599ac6
  682. --- /dev/null
  683. +++ b/drivers/dma/dpaa2-qdma/Makefile
  684. @@ -0,0 +1,8 @@
  685. +#
  686. +# Makefile for the NXP DPAA2 CAAM controllers
  687. +#
  688. +ccflags-y += -DVERSION=\"\"
  689. +
  690. +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
  691. +
  692. +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
  693. diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  694. new file mode 100644
  695. index 00000000..ad6b03f7
  696. --- /dev/null
  697. +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  698. @@ -0,0 +1,986 @@
  699. +/*
  700. + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
  701. + *
  702. + * Copyright 2015-2017 NXP Semiconductor, Inc.
  703. + * Author: Changming Huang <[email protected]>
  704. + *
  705. + * Driver for the NXP QDMA engine with QMan mode.
  706. + * Channel virtualization is supported through enqueuing of DMA jobs to,
  707. + * or dequeuing DMA jobs from different work queues with QMan portal.
  708. + * This module can be found on NXP LS2 SoCs.
  709. + *
  710. + * This program is free software; you can redistribute it and/or modify it
  711. + * under the terms of the GNU General Public License as published by the
  712. + * Free Software Foundation; either version 2 of the License, or (at your
  713. + * option) any later version.
  714. + */
  715. +
  716. +#include <linux/init.h>
  717. +#include <linux/module.h>
  718. +#include <linux/interrupt.h>
  719. +#include <linux/clk.h>
  720. +#include <linux/dma-mapping.h>
  721. +#include <linux/dmapool.h>
  722. +#include <linux/slab.h>
  723. +#include <linux/spinlock.h>
  724. +#include <linux/of.h>
  725. +#include <linux/of_device.h>
  726. +#include <linux/of_address.h>
  727. +#include <linux/of_irq.h>
  728. +#include <linux/of_dma.h>
  729. +#include <linux/types.h>
  730. +#include <linux/delay.h>
  731. +#include <linux/iommu.h>
  732. +
  733. +#include "../virt-dma.h"
  734. +
  735. +#include "../../../drivers/staging/fsl-mc/include/mc.h"
  736. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
  737. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
  738. +#include "fsl_dpdmai_cmd.h"
  739. +#include "fsl_dpdmai.h"
  740. +#include "dpaa2-qdma.h"
  741. +
  742. +static bool smmu_disable = true;
  743. +
  744. +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
  745. +{
  746. + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
  747. +}
  748. +
  749. +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  750. +{
  751. + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
  752. +}
  753. +
  754. +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
  755. +{
  756. + return 0;
  757. +}
  758. +
  759. +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
  760. +{
  761. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  762. + unsigned long flags;
  763. + LIST_HEAD(head);
  764. +
  765. + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
  766. + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
  767. + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
  768. +
  769. + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
  770. +}
  771. +
  772. +/*
  773. + * Request a command descriptor for enqueue.
  774. + */
  775. +static struct dpaa2_qdma_comp *
  776. +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
  777. +{
  778. + struct dpaa2_qdma_comp *comp_temp = NULL;
  779. + unsigned long flags;
  780. +
  781. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  782. + if (list_empty(&dpaa2_chan->comp_free)) {
  783. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  784. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  785. + if (!comp_temp)
  786. + goto err;
  787. + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
  788. + GFP_NOWAIT, &comp_temp->fd_bus_addr);
  789. + if (!comp_temp->fd_virt_addr)
  790. + goto err;
  791. +
  792. + comp_temp->fl_virt_addr =
  793. + (void *)((struct dpaa2_fd *)
  794. + comp_temp->fd_virt_addr + 1);
  795. + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
  796. + sizeof(struct dpaa2_fd);
  797. + comp_temp->desc_virt_addr =
  798. + (void *)((struct dpaa2_frame_list *)
  799. + comp_temp->fl_virt_addr + 3);
  800. + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
  801. + sizeof(struct dpaa2_frame_list) * 3;
  802. +
  803. + comp_temp->qchan = dpaa2_chan;
  804. + comp_temp->sg_blk_num = 0;
  805. + INIT_LIST_HEAD(&comp_temp->sg_src_head);
  806. + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
  807. + return comp_temp;
  808. + }
  809. + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
  810. + struct dpaa2_qdma_comp, list);
  811. + list_del(&comp_temp->list);
  812. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  813. +
  814. + comp_temp->qchan = dpaa2_chan;
  815. +err:
  816. + return comp_temp;
  817. +}
  818. +
  819. +static void dpaa2_qdma_populate_fd(uint32_t format,
  820. + struct dpaa2_qdma_comp *dpaa2_comp)
  821. +{
  822. + struct dpaa2_fd *fd;
  823. +
  824. + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
  825. + memset(fd, 0, sizeof(struct dpaa2_fd));
  826. +
  827. + /* fd populated */
  828. + fd->simple.addr = dpaa2_comp->fl_bus_addr;
  829. + /* Bypass memory translation, Frame list format, short length disable */
  830. + /* we need to disable BMT if fsl-mc use iova addr */
  831. + if (smmu_disable)
  832. + fd->simple.bpid = QMAN_FD_BMT_ENABLE;
  833. + fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
  834. +
  835. + fd->simple.frc = format | QDMA_SER_CTX;
  836. +}
  837. +
  838. +/* first frame list for descriptor buffer */
  839. +static void dpaa2_qdma_populate_first_framel(
  840. + struct dpaa2_frame_list *f_list,
  841. + struct dpaa2_qdma_comp *dpaa2_comp)
  842. +{
  843. + struct dpaa2_qdma_sd_d *sdd;
  844. +
  845. + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
  846. + memset(sdd, 0, 2 * (sizeof(*sdd)));
  847. + /* source and destination descriptor */
  848. + sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
  849. + sdd++;
  850. + sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
  851. +
  852. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  853. + /* first frame list to source descriptor */
  854. + f_list->addr_lo = dpaa2_comp->desc_bus_addr;
  855. + f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
  856. + f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
  857. + f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
  858. + if (smmu_disable)
  859. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  860. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  861. + f_list->f = 0; /* not the last frame list */
  862. +}
  863. +
  864. +/* source and destination frame list */
  865. +static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
  866. + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
  867. +{
  868. + /* source frame list to source buffer */
  869. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  870. + f_list->addr_lo = src;
  871. + f_list->addr_hi = (src >> 32);
  872. + f_list->data_len.data_len_sl0 = len;
  873. + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
  874. + if (smmu_disable)
  875. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  876. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  877. + f_list->f = 0; /* not the last frame list */
  878. +
  879. + f_list++;
  880. + /* destination frame list to destination buffer */
  881. + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
  882. + f_list->addr_lo = dst;
  883. + f_list->addr_hi = (dst >> 32);
  884. + f_list->data_len.data_len_sl0 = len;
  885. + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
  886. + if (smmu_disable)
  887. + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
  888. + f_list->sl = QDMA_FL_SL_LONG; /* long length */
  889. + f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
  890. +}
  891. +
  892. +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
  893. + struct dma_chan *chan, dma_addr_t dst,
  894. + dma_addr_t src, size_t len, unsigned long flags)
  895. +{
  896. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  897. + struct dpaa2_qdma_comp *dpaa2_comp;
  898. + struct dpaa2_frame_list *f_list;
  899. + uint32_t format;
  900. +
  901. + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  902. +
  903. +#ifdef LONG_FORMAT
  904. + format = QDMA_FD_LONG_FORMAT;
  905. +#else
  906. + format = QDMA_FD_SHORT_FORMAT;
  907. +#endif
  908. + /* populate Frame descriptor */
  909. + dpaa2_qdma_populate_fd(format, dpaa2_comp);
  910. +
  911. + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
  912. +
  913. +#ifdef LONG_FORMAT
  914. + /* first frame list for descriptor buffer (logn format) */
  915. + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
  916. +
  917. + f_list++;
  918. +#endif
  919. +
  920. + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
  921. +
  922. + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  923. +}
  924. +
  925. +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
  926. + struct dpaa2_qdma_comp *dpaa2_comp,
  927. + struct dpaa2_qdma_chan *dpaa2_chan)
  928. +{
  929. + struct qdma_sg_blk *sg_blk = NULL;
  930. + dma_addr_t phy_sgb;
  931. + unsigned long flags;
  932. +
  933. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  934. + if (list_empty(&dpaa2_chan->sgb_free)) {
  935. + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
  936. + dpaa2_chan->sg_blk_pool,
  937. + GFP_NOWAIT, &phy_sgb);
  938. + if (!sg_blk) {
  939. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  940. + return sg_blk;
  941. + }
  942. + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
  943. + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
  944. + } else {
  945. + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
  946. + struct qdma_sg_blk, list);
  947. + list_del(&sg_blk->list);
  948. + }
  949. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  950. +
  951. + return sg_blk;
  952. +}
  953. +
  954. +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
  955. + struct dpaa2_qdma_chan *dpaa2_chan,
  956. + struct dpaa2_qdma_comp *dpaa2_comp,
  957. + struct scatterlist *dst_sg, u32 dst_nents,
  958. + struct scatterlist *src_sg, u32 src_nents)
  959. +{
  960. + struct dpaa2_qdma_sg *src_sge;
  961. + struct dpaa2_qdma_sg *dst_sge;
  962. + struct qdma_sg_blk *sg_blk;
  963. + struct qdma_sg_blk *sg_blk_dst;
  964. + dma_addr_t src;
  965. + dma_addr_t dst;
  966. + uint32_t num;
  967. + uint32_t blocks;
  968. + uint32_t len = 0;
  969. + uint32_t total_len = 0;
  970. + int i, j = 0;
  971. +
  972. + num = min(dst_nents, src_nents);
  973. + blocks = num / (NUM_SG_PER_BLK - 1);
  974. + if (num % (NUM_SG_PER_BLK - 1))
  975. + blocks += 1;
  976. + if (dpaa2_comp->sg_blk_num < blocks) {
  977. + len = blocks - dpaa2_comp->sg_blk_num;
  978. + for (i = 0; i < len; i++) {
  979. + /* source sg blocks */
  980. + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
  981. + if (!sg_blk)
  982. + return 0;
  983. + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
  984. + /* destination sg blocks */
  985. + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
  986. + if (!sg_blk)
  987. + return 0;
  988. + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
  989. + }
  990. + } else {
  991. + len = dpaa2_comp->sg_blk_num - blocks;
  992. + for (i = 0; i < len; i++) {
  993. + spin_lock(&dpaa2_chan->queue_lock);
  994. + /* handle source sg blocks */
  995. + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
  996. + struct qdma_sg_blk, list);
  997. + list_del(&sg_blk->list);
  998. + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
  999. + /* handle destination sg blocks */
  1000. + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
  1001. + struct qdma_sg_blk, list);
  1002. + list_del(&sg_blk->list);
  1003. + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
  1004. + spin_unlock(&dpaa2_chan->queue_lock);
  1005. + }
  1006. + }
  1007. + dpaa2_comp->sg_blk_num = blocks;
  1008. +
  1009. + /* get the first source sg phy address */
  1010. + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
  1011. + struct qdma_sg_blk, list);
  1012. + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
  1013. + /* get the first destinaiton sg phy address */
  1014. + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
  1015. + struct qdma_sg_blk, list);
  1016. + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
  1017. +
  1018. + for (i = 0; i < blocks; i++) {
  1019. + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
  1020. + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
  1021. +
  1022. + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
  1023. + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
  1024. + if (0 == len)
  1025. + goto fetch;
  1026. + total_len += len;
  1027. + src = sg_dma_address(src_sg);
  1028. + dst = sg_dma_address(dst_sg);
  1029. +
  1030. + /* source SG */
  1031. + src_sge->addr_lo = src;
  1032. + src_sge->addr_hi = (src >> 32);
  1033. + src_sge->data_len.data_len_sl0 = len;
  1034. + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
  1035. + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
  1036. + /* destination SG */
  1037. + dst_sge->addr_lo = dst;
  1038. + dst_sge->addr_hi = (dst >> 32);
  1039. + dst_sge->data_len.data_len_sl0 = len;
  1040. + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
  1041. + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
  1042. +fetch:
  1043. + num--;
  1044. + if (0 == num) {
  1045. + src_sge->ctrl.f = QDMA_SG_F;
  1046. + dst_sge->ctrl.f = QDMA_SG_F;
  1047. + goto end;
  1048. + }
  1049. + dst_sg = sg_next(dst_sg);
  1050. + src_sg = sg_next(src_sg);
  1051. + src_sge++;
  1052. + dst_sge++;
  1053. + if (j == (NUM_SG_PER_BLK - 2)) {
  1054. + /* for next blocks, extension */
  1055. + sg_blk = list_next_entry(sg_blk, list);
  1056. + sg_blk_dst = list_next_entry(sg_blk_dst, list);
  1057. + src_sge->addr_lo = sg_blk->blk_bus_addr;
  1058. + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
  1059. + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
  1060. + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
  1061. + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
  1062. + dst_sge->addr_hi =
  1063. + sg_blk_dst->blk_bus_addr >> 32;
  1064. + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
  1065. + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
  1066. + }
  1067. + }
  1068. + }
  1069. +
  1070. +end:
  1071. + return total_len;
  1072. +}
  1073. +
  1074. +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
  1075. + struct dma_chan *chan,
  1076. + struct scatterlist *dst_sg, u32 dst_nents,
  1077. + struct scatterlist *src_sg, u32 src_nents,
  1078. + unsigned long flags)
  1079. +{
  1080. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  1081. + struct dpaa2_qdma_comp *dpaa2_comp;
  1082. + struct dpaa2_frame_list *f_list;
  1083. + struct device *dev = dpaa2_chan->qdma->priv->dev;
  1084. + uint32_t total_len = 0;
  1085. +
  1086. + /* basic sanity checks */
  1087. + if (dst_nents == 0 || src_nents == 0)
  1088. + return NULL;
  1089. +
  1090. + if (dst_sg == NULL || src_sg == NULL)
  1091. + return NULL;
  1092. +
  1093. + /* get the descriptors required */
  1094. + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  1095. +
  1096. + /* populate Frame descriptor */
  1097. + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
  1098. +
  1099. + /* prepare Scatter gather entry for source and destination */
  1100. + total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
  1101. + dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
  1102. +
  1103. + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
  1104. + /* first frame list for descriptor buffer */
  1105. + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
  1106. + f_list++;
  1107. + /* prepare Scatter gather entry for source and destination */
  1108. + /* populate source and destination frame list table */
  1109. + dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
  1110. + dpaa2_comp->sge_src_bus_addr,
  1111. + total_len, QDMA_FL_FMT_SGE);
  1112. +
  1113. + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  1114. +}
  1115. +
  1116. +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
  1117. + dma_cookie_t cookie, struct dma_tx_state *txstate)
  1118. +{
  1119. + return dma_cookie_status(chan, cookie, txstate);
  1120. +}
  1121. +
  1122. +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
  1123. +{
  1124. +}
  1125. +
  1126. +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
  1127. +{
  1128. + struct dpaa2_qdma_comp *dpaa2_comp;
  1129. + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  1130. + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  1131. + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
  1132. + struct virt_dma_desc *vdesc;
  1133. + struct dpaa2_fd *fd;
  1134. + int err;
  1135. + unsigned long flags;
  1136. +
  1137. + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  1138. + spin_lock(&dpaa2_chan->vchan.lock);
  1139. + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
  1140. + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
  1141. + if (!vdesc)
  1142. + goto err_enqueue;
  1143. + dpaa2_comp = to_fsl_qdma_comp(vdesc);
  1144. +
  1145. + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
  1146. +
  1147. + list_del(&vdesc->node);
  1148. + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
  1149. +
  1150. + /* TOBO: priority hard-coded to zero */
  1151. + err = dpaa2_io_service_enqueue_fq(NULL,
  1152. + priv->tx_queue_attr[0].fqid, fd);
  1153. + if (err) {
  1154. + list_del(&dpaa2_comp->list);
  1155. + list_add_tail(&dpaa2_comp->list,
  1156. + &dpaa2_chan->comp_free);
  1157. + }
  1158. +
  1159. + }
  1160. +err_enqueue:
  1161. + spin_unlock(&dpaa2_chan->vchan.lock);
  1162. + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  1163. +}
  1164. +
  1165. +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
  1166. +{
  1167. + struct device *dev = &ls_dev->dev;
  1168. + struct dpaa2_qdma_priv *priv;
  1169. + struct dpaa2_qdma_priv_per_prio *ppriv;
  1170. + uint8_t prio_def = DPDMAI_PRIO_NUM;
  1171. + int err;
  1172. + int i;
  1173. +
  1174. + priv = dev_get_drvdata(dev);
  1175. +
  1176. + priv->dev = dev;
  1177. + priv->dpqdma_id = ls_dev->obj_desc.id;
  1178. +
  1179. + /*Get the handle for the DPDMAI this interface is associate with */
  1180. + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
  1181. + if (err) {
  1182. + dev_err(dev, "dpdmai_open() failed\n");
  1183. + return err;
  1184. + }
  1185. + dev_info(dev, "Opened dpdmai object successfully\n");
  1186. +
  1187. + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
  1188. + &priv->dpdmai_attr);
  1189. + if (err) {
  1190. + dev_err(dev, "dpdmai_get_attributes() failed\n");
  1191. + return err;
  1192. + }
  1193. +
  1194. + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
  1195. + dev_err(dev, "DPDMAI major version mismatch\n"
  1196. + "Found %u.%u, supported version is %u.%u\n",
  1197. + priv->dpdmai_attr.version.major,
  1198. + priv->dpdmai_attr.version.minor,
  1199. + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  1200. + }
  1201. +
  1202. + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
  1203. + dev_err(dev, "DPDMAI minor version mismatch\n"
  1204. + "Found %u.%u, supported version is %u.%u\n",
  1205. + priv->dpdmai_attr.version.major,
  1206. + priv->dpdmai_attr.version.minor,
  1207. + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  1208. + }
  1209. +
  1210. + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
  1211. + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
  1212. + if (!ppriv) {
  1213. + dev_err(dev, "kzalloc for ppriv failed\n");
  1214. + return -1;
  1215. + }
  1216. + priv->ppriv = ppriv;
  1217. +
  1218. + for (i = 0; i < priv->num_pairs; i++) {
  1219. + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  1220. + i, &priv->rx_queue_attr[i]);
  1221. + if (err) {
  1222. + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
  1223. + return err;
  1224. + }
  1225. + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
  1226. +
  1227. + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  1228. + i, &priv->tx_queue_attr[i]);
  1229. + if (err) {
  1230. + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
  1231. + return err;
  1232. + }
  1233. + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
  1234. + ppriv->prio = i;
  1235. + ppriv->priv = priv;
  1236. + ppriv++;
  1237. + }
  1238. +
  1239. + return 0;
  1240. +}
  1241. +
  1242. +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
  1243. +{
  1244. + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
  1245. + struct dpaa2_qdma_priv_per_prio, nctx);
  1246. + struct dpaa2_qdma_priv *priv = ppriv->priv;
  1247. + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
  1248. + struct dpaa2_qdma_chan *qchan;
  1249. + const struct dpaa2_fd *fd;
  1250. + const struct dpaa2_fd *fd_eq;
  1251. + struct dpaa2_dq *dq;
  1252. + int err;
  1253. + int is_last = 0;
  1254. + uint8_t status;
  1255. + int i;
  1256. + int found;
  1257. + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
  1258. +
  1259. + do {
  1260. + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
  1261. + ppriv->store);
  1262. + } while (err);
  1263. +
  1264. + while (!is_last) {
  1265. + do {
  1266. + dq = dpaa2_io_store_next(ppriv->store, &is_last);
  1267. + } while (!is_last && !dq);
  1268. + if (!dq) {
  1269. + dev_err(priv->dev, "FQID returned no valid frames!\n");
  1270. + continue;
  1271. + }
  1272. +
  1273. + /* obtain FD and process the error */
  1274. + fd = dpaa2_dq_fd(dq);
  1275. + status = fd->simple.ctrl & 0xff;
  1276. + if (status)
  1277. + dev_err(priv->dev, "FD error occurred\n");
  1278. + found = 0;
  1279. + for (i = 0; i < n_chans; i++) {
  1280. + qchan = &priv->dpaa2_qdma->chans[i];
  1281. + spin_lock(&qchan->queue_lock);
  1282. + if (list_empty(&qchan->comp_used)) {
  1283. + spin_unlock(&qchan->queue_lock);
  1284. + continue;
  1285. + }
  1286. + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
  1287. + &qchan->comp_used, list) {
  1288. + fd_eq = (struct dpaa2_fd *)
  1289. + dpaa2_comp->fd_virt_addr;
  1290. +
  1291. + if (fd_eq->simple.addr ==
  1292. + fd->simple.addr) {
  1293. +
  1294. + list_del(&dpaa2_comp->list);
  1295. + list_add_tail(&dpaa2_comp->list,
  1296. + &qchan->comp_free);
  1297. +
  1298. + spin_lock(&qchan->vchan.lock);
  1299. + vchan_cookie_complete(
  1300. + &dpaa2_comp->vdesc);
  1301. + spin_unlock(&qchan->vchan.lock);
  1302. + found = 1;
  1303. + break;
  1304. + }
  1305. + }
  1306. + spin_unlock(&qchan->queue_lock);
  1307. + if (found)
  1308. + break;
  1309. + }
  1310. + }
  1311. +
  1312. + dpaa2_io_service_rearm(NULL, ctx);
  1313. +}
  1314. +
  1315. +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
  1316. +{
  1317. + int err, i, num;
  1318. + struct device *dev = priv->dev;
  1319. + struct dpaa2_qdma_priv_per_prio *ppriv;
  1320. +
  1321. + num = priv->num_pairs;
  1322. + ppriv = priv->ppriv;
  1323. + for (i = 0; i < num; i++) {
  1324. + ppriv->nctx.is_cdan = 0;
  1325. + ppriv->nctx.desired_cpu = 1;
  1326. + ppriv->nctx.id = ppriv->rsp_fqid;
  1327. + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
  1328. + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
  1329. + if (err) {
  1330. + dev_err(dev, "Notification register failed\n");
  1331. + goto err_service;
  1332. + }
  1333. +
  1334. + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
  1335. + dev);
  1336. + if (!ppriv->store) {
  1337. + dev_err(dev, "dpaa2_io_store_create() failed\n");
  1338. + goto err_store;
  1339. + }
  1340. +
  1341. + ppriv++;
  1342. + }
  1343. + return 0;
  1344. +
  1345. +err_store:
  1346. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  1347. +err_service:
  1348. + ppriv--;
  1349. + while (ppriv >= priv->ppriv) {
  1350. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  1351. + dpaa2_io_store_destroy(ppriv->store);
  1352. + ppriv--;
  1353. + }
  1354. + return -1;
  1355. +}
  1356. +
  1357. +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
  1358. +{
  1359. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  1360. + int i;
  1361. +
  1362. + for (i = 0; i < priv->num_pairs; i++) {
  1363. + dpaa2_io_store_destroy(ppriv->store);
  1364. + ppriv++;
  1365. + }
  1366. +}
  1367. +
  1368. +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
  1369. +{
  1370. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  1371. + int i;
  1372. +
  1373. + for (i = 0; i < priv->num_pairs; i++) {
  1374. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  1375. + ppriv++;
  1376. + }
  1377. +}
  1378. +
  1379. +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
  1380. +{
  1381. + int err;
  1382. + struct dpdmai_rx_queue_cfg rx_queue_cfg;
  1383. + struct device *dev = priv->dev;
  1384. + struct dpaa2_qdma_priv_per_prio *ppriv;
  1385. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  1386. + int i, num;
  1387. +
  1388. + num = priv->num_pairs;
  1389. + ppriv = priv->ppriv;
  1390. + for (i = 0; i < num; i++) {
  1391. + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
  1392. + DPDMAI_QUEUE_OPT_DEST;
  1393. + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
  1394. + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
  1395. + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
  1396. + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
  1397. + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  1398. + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
  1399. + if (err) {
  1400. + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
  1401. + return err;
  1402. + }
  1403. +
  1404. + ppriv++;
  1405. + }
  1406. +
  1407. + return 0;
  1408. +}
  1409. +
  1410. +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
  1411. +{
  1412. + int err = 0;
  1413. + struct device *dev = priv->dev;
  1414. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  1415. + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  1416. + int i;
  1417. +
  1418. + for (i = 0; i < priv->num_pairs; i++) {
  1419. + ppriv->nctx.qman64 = 0;
  1420. + ppriv->nctx.dpio_id = 0;
  1421. + ppriv++;
  1422. + }
  1423. +
  1424. + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
  1425. + if (err)
  1426. + dev_err(dev, "dpdmai_reset() failed\n");
  1427. +
  1428. + return err;
  1429. +}
  1430. +
  1431. +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
  1432. + struct list_head *head)
  1433. +{
  1434. + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
  1435. + /* free the QDMA SG pool block */
  1436. + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
  1437. + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
  1438. + sgb_tmp->blk_virt_addr - 1);
  1439. + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
  1440. + - sizeof(*sgb_tmp);
  1441. + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
  1442. + sgb_tmp->blk_bus_addr);
  1443. + }
  1444. +
  1445. +}
  1446. +
  1447. +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
  1448. + struct list_head *head)
  1449. +{
  1450. + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
  1451. + /* free the QDMA comp resource */
  1452. + list_for_each_entry_safe(comp_tmp, _comp_tmp,
  1453. + head, list) {
  1454. + dma_pool_free(qchan->fd_pool,
  1455. + comp_tmp->fd_virt_addr,
  1456. + comp_tmp->fd_bus_addr);
  1457. + /* free the SG source block on comp */
  1458. + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
  1459. + /* free the SG destination block on comp */
  1460. + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
  1461. + list_del(&comp_tmp->list);
  1462. + kfree(comp_tmp);
  1463. + }
  1464. +
  1465. +}
  1466. +
  1467. +static void __cold dpaa2_dpdmai_free_channels(
  1468. + struct dpaa2_qdma_engine *dpaa2_qdma)
  1469. +{
  1470. + struct dpaa2_qdma_chan *qchan;
  1471. + int num, i;
  1472. +
  1473. + num = dpaa2_qdma->n_chans;
  1474. + for (i = 0; i < num; i++) {
  1475. + qchan = &dpaa2_qdma->chans[i];
  1476. + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
  1477. + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
  1478. + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
  1479. + dma_pool_destroy(qchan->fd_pool);
  1480. + dma_pool_destroy(qchan->sg_blk_pool);
  1481. + }
  1482. +}
  1483. +
  1484. +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  1485. +{
  1486. + struct dpaa2_qdma_chan *dpaa2_chan;
  1487. + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
  1488. + int i;
  1489. +
  1490. + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
  1491. + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
  1492. + dpaa2_chan = &dpaa2_qdma->chans[i];
  1493. + dpaa2_chan->qdma = dpaa2_qdma;
  1494. + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
  1495. + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
  1496. +
  1497. + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
  1498. + dev, FD_POOL_SIZE, 32, 0);
  1499. + if (!dpaa2_chan->fd_pool)
  1500. + return -1;
  1501. + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
  1502. + dev, SG_POOL_SIZE, 32, 0);
  1503. + if (!dpaa2_chan->sg_blk_pool)
  1504. + return -1;
  1505. +
  1506. + spin_lock_init(&dpaa2_chan->queue_lock);
  1507. + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
  1508. + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
  1509. + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
  1510. + }
  1511. + return 0;
  1512. +}
  1513. +
  1514. +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
  1515. +{
  1516. + struct dpaa2_qdma_priv *priv;
  1517. + struct device *dev = &dpdmai_dev->dev;
  1518. + struct dpaa2_qdma_engine *dpaa2_qdma;
  1519. + int err;
  1520. +
  1521. + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  1522. + if (!priv)
  1523. + return -ENOMEM;
  1524. + dev_set_drvdata(dev, priv);
  1525. + priv->dpdmai_dev = dpdmai_dev;
  1526. +
  1527. + priv->iommu_domain = iommu_get_domain_for_dev(dev);
  1528. + if (priv->iommu_domain)
  1529. + smmu_disable = false;
  1530. +
  1531. + /* obtain a MC portal */
  1532. + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
  1533. + if (err) {
  1534. + dev_err(dev, "MC portal allocation failed\n");
  1535. + goto err_mcportal;
  1536. + }
  1537. +
  1538. + /* DPDMAI initialization */
  1539. + err = dpaa2_qdma_setup(dpdmai_dev);
  1540. + if (err) {
  1541. + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
  1542. + goto err_dpdmai_setup;
  1543. + }
  1544. +
  1545. + /* DPIO */
  1546. + err = dpaa2_qdma_dpio_setup(priv);
  1547. + if (err) {
  1548. + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
  1549. + goto err_dpio_setup;
  1550. + }
  1551. +
  1552. + /* DPDMAI binding to DPIO */
  1553. + err = dpaa2_dpdmai_bind(priv);
  1554. + if (err) {
  1555. + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
  1556. + goto err_bind;
  1557. + }
  1558. +
  1559. + /* DPDMAI enable */
  1560. + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  1561. + if (err) {
  1562. + dev_err(dev, "dpdmai_enable() faile\n");
  1563. + goto err_enable;
  1564. + }
  1565. +
  1566. + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
  1567. + if (!dpaa2_qdma) {
  1568. + err = -ENOMEM;
  1569. + goto err_eng;
  1570. + }
  1571. +
  1572. + priv->dpaa2_qdma = dpaa2_qdma;
  1573. + dpaa2_qdma->priv = priv;
  1574. +
  1575. + dpaa2_qdma->n_chans = NUM_CH;
  1576. +
  1577. + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
  1578. + if (err) {
  1579. + dev_err(dev, "QDMA alloc channels faile\n");
  1580. + goto err_reg;
  1581. + }
  1582. +
  1583. + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
  1584. + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
  1585. + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
  1586. + dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
  1587. +
  1588. + dpaa2_qdma->dma_dev.dev = dev;
  1589. + dpaa2_qdma->dma_dev.device_alloc_chan_resources
  1590. + = dpaa2_qdma_alloc_chan_resources;
  1591. + dpaa2_qdma->dma_dev.device_free_chan_resources
  1592. + = dpaa2_qdma_free_chan_resources;
  1593. + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
  1594. + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
  1595. + dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
  1596. + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
  1597. +
  1598. + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
  1599. + if (err) {
  1600. + dev_err(dev, "Can't register NXP QDMA engine.\n");
  1601. + goto err_reg;
  1602. + }
  1603. +
  1604. + return 0;
  1605. +
  1606. +err_reg:
  1607. + dpaa2_dpdmai_free_channels(dpaa2_qdma);
  1608. + kfree(dpaa2_qdma);
  1609. +err_eng:
  1610. + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  1611. +err_enable:
  1612. + dpaa2_dpdmai_dpio_unbind(priv);
  1613. +err_bind:
  1614. + dpaa2_dpmai_store_free(priv);
  1615. + dpaa2_dpdmai_dpio_free(priv);
  1616. +err_dpio_setup:
  1617. + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
  1618. +err_dpdmai_setup:
  1619. + fsl_mc_portal_free(priv->mc_io);
  1620. +err_mcportal:
  1621. + kfree(priv->ppriv);
  1622. + kfree(priv);
  1623. + dev_set_drvdata(dev, NULL);
  1624. + return err;
  1625. +}
  1626. +
  1627. +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
  1628. +{
  1629. + struct device *dev;
  1630. + struct dpaa2_qdma_priv *priv;
  1631. + struct dpaa2_qdma_engine *dpaa2_qdma;
  1632. +
  1633. + dev = &ls_dev->dev;
  1634. + priv = dev_get_drvdata(dev);
  1635. + dpaa2_qdma = priv->dpaa2_qdma;
  1636. +
  1637. + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  1638. + dpaa2_dpdmai_dpio_unbind(priv);
  1639. + dpaa2_dpmai_store_free(priv);
  1640. + dpaa2_dpdmai_dpio_free(priv);
  1641. + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  1642. + fsl_mc_portal_free(priv->mc_io);
  1643. + dev_set_drvdata(dev, NULL);
  1644. + dpaa2_dpdmai_free_channels(dpaa2_qdma);
  1645. +
  1646. + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
  1647. + kfree(priv);
  1648. + kfree(dpaa2_qdma);
  1649. +
  1650. + return 0;
  1651. +}
  1652. +
  1653. +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
  1654. + {
  1655. + .vendor = FSL_MC_VENDOR_FREESCALE,
  1656. + .obj_type = "dpdmai",
  1657. + },
  1658. + { .vendor = 0x0 }
  1659. +};
  1660. +
  1661. +static struct fsl_mc_driver dpaa2_qdma_driver = {
  1662. + .driver = {
  1663. + .name = "dpaa2-qdma",
  1664. + .owner = THIS_MODULE,
  1665. + },
  1666. + .probe = dpaa2_qdma_probe,
  1667. + .remove = dpaa2_qdma_remove,
  1668. + .match_id_table = dpaa2_qdma_id_table
  1669. +};
  1670. +
  1671. +static int __init dpaa2_qdma_driver_init(void)
  1672. +{
  1673. + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
  1674. +}
  1675. +late_initcall(dpaa2_qdma_driver_init);
  1676. +
  1677. +static void __exit fsl_qdma_exit(void)
  1678. +{
  1679. + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
  1680. +}
  1681. +module_exit(fsl_qdma_exit);
  1682. +
  1683. +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
  1684. +MODULE_LICENSE("Dual BSD/GPL");
  1685. diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
  1686. new file mode 100644
  1687. index 00000000..71a00db8
  1688. --- /dev/null
  1689. +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
  1690. @@ -0,0 +1,262 @@
  1691. +/* Copyright 2015 NXP Semiconductor Inc.
  1692. + *
  1693. + * Redistribution and use in source and binary forms, with or without
  1694. + * modification, are permitted provided that the following conditions are met:
  1695. + * * Redistributions of source code must retain the above copyright
  1696. + * notice, this list of conditions and the following disclaimer.
  1697. + * * Redistributions in binary form must reproduce the above copyright
  1698. + * notice, this list of conditions and the following disclaimer in the
  1699. + * documentation and/or other materials provided with the distribution.
  1700. + * * Neither the name of NXP Semiconductor nor the
  1701. + * names of its contributors may be used to endorse or promote products
  1702. + * derived from this software without specific prior written permission.
  1703. + *
  1704. + *
  1705. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1706. + * GNU General Public License ("GPL") as published by the Free Software
  1707. + * Foundation, either version 2 of that License or (at your option) any
  1708. + * later version.
  1709. + *
  1710. + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
  1711. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1712. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1713. + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
  1714. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1715. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1716. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1717. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1718. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1719. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1720. + */
  1721. +
  1722. +#ifndef __DPAA2_QDMA_H
  1723. +#define __DPAA2_QDMA_H
  1724. +
  1725. +#define LONG_FORMAT 1
  1726. +
  1727. +#define DPAA2_QDMA_STORE_SIZE 16
  1728. +#define NUM_CH 8
  1729. +#define NUM_SG_PER_BLK 16
  1730. +
  1731. +#define QDMA_DMR_OFFSET 0x0
  1732. +#define QDMA_DQ_EN (0 << 30)
  1733. +#define QDMA_DQ_DIS (1 << 30)
  1734. +
  1735. +#define QDMA_DSR_M_OFFSET 0x10004
  1736. +
  1737. +struct dpaa2_qdma_sd_d {
  1738. + uint32_t rsv:32;
  1739. + union {
  1740. + struct {
  1741. + uint32_t ssd:12; /* souce stride distance */
  1742. + uint32_t sss:12; /* souce stride size */
  1743. + uint32_t rsv1:8;
  1744. + } sdf;
  1745. + struct {
  1746. + uint32_t dsd:12; /* Destination stride distance */
  1747. + uint32_t dss:12; /* Destination stride size */
  1748. + uint32_t rsv2:8;
  1749. + } ddf;
  1750. + } df;
  1751. + uint32_t rbpcmd; /* Route-by-port command */
  1752. + uint32_t cmd;
  1753. +} __attribute__((__packed__));
  1754. +/* Source descriptor command read transaction type for RBP=0:
  1755. + coherent copy of cacheable memory */
  1756. +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
  1757. +/* Destination descriptor command write transaction type for RBP=0:
  1758. + coherent copy of cacheable memory */
  1759. +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
  1760. +
  1761. +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
  1762. +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
  1763. +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
  1764. +#define QDMA_SG_SL_SHORT 0x1 /* short length */
  1765. +#define QDMA_SG_SL_LONG 0x0 /* short length */
  1766. +#define QDMA_SG_F 0x1 /* last sg entry */
  1767. +struct dpaa2_qdma_sg {
  1768. + uint32_t addr_lo; /* address 0:31 */
  1769. + uint32_t addr_hi:17; /* address 32:48 */
  1770. + uint32_t rsv:15;
  1771. + union {
  1772. + uint32_t data_len_sl0; /* SL=0, the long format */
  1773. + struct {
  1774. + uint32_t len:17; /* SL=1, the short format */
  1775. + uint32_t reserve:3;
  1776. + uint32_t sf:1;
  1777. + uint32_t sr:1;
  1778. + uint32_t size:10; /* buff size */
  1779. + } data_len_sl1;
  1780. + } data_len; /* AVAIL_LENGTH */
  1781. + struct {
  1782. + uint32_t bpid:14;
  1783. + uint32_t ivp:1;
  1784. + uint32_t mbt:1;
  1785. + uint32_t offset:12;
  1786. + uint32_t fmt:2;
  1787. + uint32_t sl:1;
  1788. + uint32_t f:1;
  1789. + } ctrl;
  1790. +} __attribute__((__packed__));
  1791. +
  1792. +#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
  1793. +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
  1794. +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
  1795. +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
  1796. +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
  1797. +
  1798. +#define QDMA_SB_FRAME (0 << 28) /* single frame */
  1799. +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
  1800. +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
  1801. +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
  1802. +
  1803. +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
  1804. +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
  1805. +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
  1806. +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
  1807. +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
  1808. +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
  1809. +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
  1810. +
  1811. +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
  1812. +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
  1813. +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
  1814. +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
  1815. +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
  1816. +
  1817. +#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
  1818. +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
  1819. +#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
  1820. +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
  1821. +#define QDMA_FL_SL_LONG 0x0 /* long length */
  1822. +#define QDMA_FL_SL_SHORT 0x1 /* short length */
  1823. +#define QDMA_FL_F 0x1 /* last frame list bit */
  1824. +/*Description of Frame list table structure*/
  1825. +struct dpaa2_frame_list {
  1826. + uint32_t addr_lo; /* lower 32 bits of address */
  1827. + uint32_t addr_hi:17; /* upper 17 bits of address */
  1828. + uint32_t resrvd:15;
  1829. + union {
  1830. + uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
  1831. + struct {
  1832. + uint32_t data_len:18; /* IF SL=1; length is 18bit */
  1833. + uint32_t resrvd:2;
  1834. + uint32_t mem:12; /* Valid only when SL=1 */
  1835. + } data_len_sl1;
  1836. + } data_len;
  1837. + /* word 4 */
  1838. + uint32_t bpid:14; /* Frame buffer pool ID */
  1839. + uint32_t ivp:1; /* Invalid Pool ID. */
  1840. + uint32_t bmt:1; /* Bypass Memory Translation */
  1841. + uint32_t offset:12; /* Frame offset */
  1842. + uint32_t fmt:2; /* Frame Format */
  1843. + uint32_t sl:1; /* Short Length */
  1844. + uint32_t f:1; /* Final bit */
  1845. +
  1846. + uint32_t frc; /* Frame Context */
  1847. + /* word 6 */
  1848. + uint32_t err:8; /* Frame errors */
  1849. + uint32_t resrvd0:8;
  1850. + uint32_t asal:4; /* accelerator-specific annotation length */
  1851. + uint32_t resrvd1:1;
  1852. + uint32_t ptv2:1;
  1853. + uint32_t ptv1:1;
  1854. + uint32_t pta:1; /* pass-through annotation */
  1855. + uint32_t resrvd2:8;
  1856. +
  1857. + uint32_t flc_lo; /* lower 32 bits fo flow context */
  1858. + uint32_t flc_hi; /* higher 32 bits fo flow context */
  1859. +} __attribute__((__packed__));
  1860. +
  1861. +struct dpaa2_qdma_chan {
  1862. + struct virt_dma_chan vchan;
  1863. + struct virt_dma_desc vdesc;
  1864. + enum dma_status status;
  1865. + struct dpaa2_qdma_engine *qdma;
  1866. +
  1867. + struct mutex dpaa2_queue_mutex;
  1868. + spinlock_t queue_lock;
  1869. + struct dma_pool *fd_pool;
  1870. + struct dma_pool *sg_blk_pool;
  1871. +
  1872. + struct list_head comp_used;
  1873. + struct list_head comp_free;
  1874. +
  1875. + struct list_head sgb_free;
  1876. +};
  1877. +
  1878. +struct qdma_sg_blk {
  1879. + dma_addr_t blk_bus_addr;
  1880. + void *blk_virt_addr;
  1881. + struct list_head list;
  1882. +};
  1883. +
  1884. +struct dpaa2_qdma_comp {
  1885. + dma_addr_t fd_bus_addr;
  1886. + dma_addr_t fl_bus_addr;
  1887. + dma_addr_t desc_bus_addr;
  1888. + dma_addr_t sge_src_bus_addr;
  1889. + dma_addr_t sge_dst_bus_addr;
  1890. + void *fd_virt_addr;
  1891. + void *fl_virt_addr;
  1892. + void *desc_virt_addr;
  1893. + void *sg_src_virt_addr;
  1894. + void *sg_dst_virt_addr;
  1895. + struct qdma_sg_blk *sg_blk;
  1896. + uint32_t sg_blk_num;
  1897. + struct list_head sg_src_head;
  1898. + struct list_head sg_dst_head;
  1899. + struct dpaa2_qdma_chan *qchan;
  1900. + struct virt_dma_desc vdesc;
  1901. + struct list_head list;
  1902. +};
  1903. +
  1904. +struct dpaa2_qdma_engine {
  1905. + struct dma_device dma_dev;
  1906. + u32 n_chans;
  1907. + struct dpaa2_qdma_chan chans[NUM_CH];
  1908. +
  1909. + struct dpaa2_qdma_priv *priv;
  1910. +};
  1911. +
  1912. +/*
  1913. + * dpaa2_qdma_priv - driver private data
  1914. + */
  1915. +struct dpaa2_qdma_priv {
  1916. + int dpqdma_id;
  1917. +
  1918. + struct iommu_domain *iommu_domain;
  1919. + struct dpdmai_attr dpdmai_attr;
  1920. + struct device *dev;
  1921. + struct fsl_mc_io *mc_io;
  1922. + struct fsl_mc_device *dpdmai_dev;
  1923. +
  1924. + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
  1925. + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
  1926. +
  1927. + uint8_t num_pairs;
  1928. +
  1929. + struct dpaa2_qdma_engine *dpaa2_qdma;
  1930. + struct dpaa2_qdma_priv_per_prio *ppriv;
  1931. +};
  1932. +
  1933. +struct dpaa2_qdma_priv_per_prio {
  1934. + int req_fqid;
  1935. + int rsp_fqid;
  1936. + int prio;
  1937. +
  1938. + struct dpaa2_io_store *store;
  1939. + struct dpaa2_io_notification_ctx nctx;
  1940. +
  1941. + struct dpaa2_qdma_priv *priv;
  1942. +};
  1943. +
  1944. +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
  1945. +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
  1946. + sizeof(struct dpaa2_frame_list) * 3 + \
  1947. + sizeof(struct dpaa2_qdma_sd_d) * 2)
  1948. +
  1949. +/* qdma_sg_blk + 16 SGs */
  1950. +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
  1951. + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
  1952. +#endif /* __DPAA2_QDMA_H */
  1953. diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c
  1954. new file mode 100644
  1955. index 00000000..ad13fc1e
  1956. --- /dev/null
  1957. +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
  1958. @@ -0,0 +1,454 @@
  1959. +/* Copyright 2013-2015 Freescale Semiconductor Inc.
  1960. + *
  1961. + * Redistribution and use in source and binary forms, with or without
  1962. + * modification, are permitted provided that the following conditions are met:
  1963. + * * Redistributions of source code must retain the above copyright
  1964. + * notice, this list of conditions and the following disclaimer.
  1965. + * * Redistributions in binary form must reproduce the above copyright
  1966. + * notice, this list of conditions and the following disclaimer in the
  1967. + * documentation and/or other materials provided with the distribution.
  1968. + * * Neither the name of the above-listed copyright holders nor the
  1969. + * names of any contributors may be used to endorse or promote products
  1970. + * derived from this software without specific prior written permission.
  1971. + *
  1972. + *
  1973. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1974. + * GNU General Public License ("GPL") as published by the Free Software
  1975. + * Foundation, either version 2 of that License or (at your option) any
  1976. + * later version.
  1977. + *
  1978. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  1979. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  1980. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  1981. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  1982. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  1983. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  1984. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  1985. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  1986. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  1987. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  1988. + * POSSIBILITY OF SUCH DAMAGE.
  1989. + */
  1990. +#include <linux/types.h>
  1991. +#include <linux/io.h>
  1992. +#include "fsl_dpdmai.h"
  1993. +#include "fsl_dpdmai_cmd.h"
  1994. +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
  1995. +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
  1996. +
  1997. +int dpdmai_open(struct fsl_mc_io *mc_io,
  1998. + uint32_t cmd_flags,
  1999. + int dpdmai_id,
  2000. + uint16_t *token)
  2001. +{
  2002. + struct mc_command cmd = { 0 };
  2003. + int err;
  2004. +
  2005. + /* prepare command */
  2006. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
  2007. + cmd_flags,
  2008. + 0);
  2009. + DPDMAI_CMD_OPEN(cmd, dpdmai_id);
  2010. +
  2011. + /* send command to mc*/
  2012. + err = mc_send_command(mc_io, &cmd);
  2013. + if (err)
  2014. + return err;
  2015. +
  2016. + /* retrieve response parameters */
  2017. + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
  2018. +
  2019. + return 0;
  2020. +}
  2021. +
  2022. +int dpdmai_close(struct fsl_mc_io *mc_io,
  2023. + uint32_t cmd_flags,
  2024. + uint16_t token)
  2025. +{
  2026. + struct mc_command cmd = { 0 };
  2027. +
  2028. + /* prepare command */
  2029. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
  2030. + cmd_flags, token);
  2031. +
  2032. + /* send command to mc*/
  2033. + return mc_send_command(mc_io, &cmd);
  2034. +}
  2035. +
  2036. +int dpdmai_create(struct fsl_mc_io *mc_io,
  2037. + uint32_t cmd_flags,
  2038. + const struct dpdmai_cfg *cfg,
  2039. + uint16_t *token)
  2040. +{
  2041. + struct mc_command cmd = { 0 };
  2042. + int err;
  2043. +
  2044. + /* prepare command */
  2045. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
  2046. + cmd_flags,
  2047. + 0);
  2048. + DPDMAI_CMD_CREATE(cmd, cfg);
  2049. +
  2050. + /* send command to mc*/
  2051. + err = mc_send_command(mc_io, &cmd);
  2052. + if (err)
  2053. + return err;
  2054. +
  2055. + /* retrieve response parameters */
  2056. + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
  2057. +
  2058. + return 0;
  2059. +}
  2060. +
  2061. +int dpdmai_destroy(struct fsl_mc_io *mc_io,
  2062. + uint32_t cmd_flags,
  2063. + uint16_t token)
  2064. +{
  2065. + struct mc_command cmd = { 0 };
  2066. +
  2067. + /* prepare command */
  2068. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
  2069. + cmd_flags,
  2070. + token);
  2071. +
  2072. + /* send command to mc*/
  2073. + return mc_send_command(mc_io, &cmd);
  2074. +}
  2075. +
  2076. +int dpdmai_enable(struct fsl_mc_io *mc_io,
  2077. + uint32_t cmd_flags,
  2078. + uint16_t token)
  2079. +{
  2080. + struct mc_command cmd = { 0 };
  2081. +
  2082. + /* prepare command */
  2083. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
  2084. + cmd_flags,
  2085. + token);
  2086. +
  2087. + /* send command to mc*/
  2088. + return mc_send_command(mc_io, &cmd);
  2089. +}
  2090. +
  2091. +int dpdmai_disable(struct fsl_mc_io *mc_io,
  2092. + uint32_t cmd_flags,
  2093. + uint16_t token)
  2094. +{
  2095. + struct mc_command cmd = { 0 };
  2096. +
  2097. + /* prepare command */
  2098. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
  2099. + cmd_flags,
  2100. + token);
  2101. +
  2102. + /* send command to mc*/
  2103. + return mc_send_command(mc_io, &cmd);
  2104. +}
  2105. +
  2106. +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
  2107. + uint32_t cmd_flags,
  2108. + uint16_t token,
  2109. + int *en)
  2110. +{
  2111. + struct mc_command cmd = { 0 };
  2112. + int err;
  2113. + /* prepare command */
  2114. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
  2115. + cmd_flags,
  2116. + token);
  2117. +
  2118. + /* send command to mc*/
  2119. + err = mc_send_command(mc_io, &cmd);
  2120. + if (err)
  2121. + return err;
  2122. +
  2123. + /* retrieve response parameters */
  2124. + DPDMAI_RSP_IS_ENABLED(cmd, *en);
  2125. +
  2126. + return 0;
  2127. +}
  2128. +
  2129. +int dpdmai_reset(struct fsl_mc_io *mc_io,
  2130. + uint32_t cmd_flags,
  2131. + uint16_t token)
  2132. +{
  2133. + struct mc_command cmd = { 0 };
  2134. +
  2135. + /* prepare command */
  2136. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
  2137. + cmd_flags,
  2138. + token);
  2139. +
  2140. + /* send command to mc*/
  2141. + return mc_send_command(mc_io, &cmd);
  2142. +}
  2143. +
  2144. +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
  2145. + uint32_t cmd_flags,
  2146. + uint16_t token,
  2147. + uint8_t irq_index,
  2148. + int *type,
  2149. + struct dpdmai_irq_cfg *irq_cfg)
  2150. +{
  2151. + struct mc_command cmd = { 0 };
  2152. + int err;
  2153. +
  2154. + /* prepare command */
  2155. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
  2156. + cmd_flags,
  2157. + token);
  2158. + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
  2159. +
  2160. + /* send command to mc*/
  2161. + err = mc_send_command(mc_io, &cmd);
  2162. + if (err)
  2163. + return err;
  2164. +
  2165. + /* retrieve response parameters */
  2166. + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
  2167. +
  2168. + return 0;
  2169. +}
  2170. +
  2171. +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
  2172. + uint32_t cmd_flags,
  2173. + uint16_t token,
  2174. + uint8_t irq_index,
  2175. + struct dpdmai_irq_cfg *irq_cfg)
  2176. +{
  2177. + struct mc_command cmd = { 0 };
  2178. +
  2179. + /* prepare command */
  2180. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
  2181. + cmd_flags,
  2182. + token);
  2183. + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
  2184. +
  2185. + /* send command to mc*/
  2186. + return mc_send_command(mc_io, &cmd);
  2187. +}
  2188. +
  2189. +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
  2190. + uint32_t cmd_flags,
  2191. + uint16_t token,
  2192. + uint8_t irq_index,
  2193. + uint8_t *en)
  2194. +{
  2195. + struct mc_command cmd = { 0 };
  2196. + int err;
  2197. +
  2198. + /* prepare command */
  2199. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
  2200. + cmd_flags,
  2201. + token);
  2202. + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
  2203. +
  2204. + /* send command to mc*/
  2205. + err = mc_send_command(mc_io, &cmd);
  2206. + if (err)
  2207. + return err;
  2208. +
  2209. + /* retrieve response parameters */
  2210. + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
  2211. +
  2212. + return 0;
  2213. +}
  2214. +
  2215. +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
  2216. + uint32_t cmd_flags,
  2217. + uint16_t token,
  2218. + uint8_t irq_index,
  2219. + uint8_t en)
  2220. +{
  2221. + struct mc_command cmd = { 0 };
  2222. +
  2223. + /* prepare command */
  2224. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
  2225. + cmd_flags,
  2226. + token);
  2227. + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
  2228. +
  2229. + /* send command to mc*/
  2230. + return mc_send_command(mc_io, &cmd);
  2231. +}
  2232. +
  2233. +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
  2234. + uint32_t cmd_flags,
  2235. + uint16_t token,
  2236. + uint8_t irq_index,
  2237. + uint32_t *mask)
  2238. +{
  2239. + struct mc_command cmd = { 0 };
  2240. + int err;
  2241. +
  2242. + /* prepare command */
  2243. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
  2244. + cmd_flags,
  2245. + token);
  2246. + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
  2247. +
  2248. + /* send command to mc*/
  2249. + err = mc_send_command(mc_io, &cmd);
  2250. + if (err)
  2251. + return err;
  2252. +
  2253. + /* retrieve response parameters */
  2254. + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
  2255. +
  2256. + return 0;
  2257. +}
  2258. +
  2259. +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
  2260. + uint32_t cmd_flags,
  2261. + uint16_t token,
  2262. + uint8_t irq_index,
  2263. + uint32_t mask)
  2264. +{
  2265. + struct mc_command cmd = { 0 };
  2266. +
  2267. + /* prepare command */
  2268. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
  2269. + cmd_flags,
  2270. + token);
  2271. + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
  2272. +
  2273. + /* send command to mc*/
  2274. + return mc_send_command(mc_io, &cmd);
  2275. +}
  2276. +
  2277. +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
  2278. + uint32_t cmd_flags,
  2279. + uint16_t token,
  2280. + uint8_t irq_index,
  2281. + uint32_t *status)
  2282. +{
  2283. + struct mc_command cmd = { 0 };
  2284. + int err;
  2285. +
  2286. + /* prepare command */
  2287. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
  2288. + cmd_flags,
  2289. + token);
  2290. + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
  2291. +
  2292. + /* send command to mc*/
  2293. + err = mc_send_command(mc_io, &cmd);
  2294. + if (err)
  2295. + return err;
  2296. +
  2297. + /* retrieve response parameters */
  2298. + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
  2299. +
  2300. + return 0;
  2301. +}
  2302. +
  2303. +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
  2304. + uint32_t cmd_flags,
  2305. + uint16_t token,
  2306. + uint8_t irq_index,
  2307. + uint32_t status)
  2308. +{
  2309. + struct mc_command cmd = { 0 };
  2310. +
  2311. + /* prepare command */
  2312. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
  2313. + cmd_flags,
  2314. + token);
  2315. + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
  2316. +
  2317. + /* send command to mc*/
  2318. + return mc_send_command(mc_io, &cmd);
  2319. +}
  2320. +
  2321. +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
  2322. + uint32_t cmd_flags,
  2323. + uint16_t token,
  2324. + struct dpdmai_attr *attr)
  2325. +{
  2326. + struct mc_command cmd = { 0 };
  2327. + int err;
  2328. +
  2329. + /* prepare command */
  2330. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
  2331. + cmd_flags,
  2332. + token);
  2333. +
  2334. + /* send command to mc*/
  2335. + err = mc_send_command(mc_io, &cmd);
  2336. + if (err)
  2337. + return err;
  2338. +
  2339. + /* retrieve response parameters */
  2340. + DPDMAI_RSP_GET_ATTR(cmd, attr);
  2341. +
  2342. + return 0;
  2343. +}
  2344. +
  2345. +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
  2346. + uint32_t cmd_flags,
  2347. + uint16_t token,
  2348. + uint8_t priority,
  2349. + const struct dpdmai_rx_queue_cfg *cfg)
  2350. +{
  2351. + struct mc_command cmd = { 0 };
  2352. +
  2353. + /* prepare command */
  2354. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
  2355. + cmd_flags,
  2356. + token);
  2357. + DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
  2358. +
  2359. + /* send command to mc*/
  2360. + return mc_send_command(mc_io, &cmd);
  2361. +}
  2362. +
  2363. +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
  2364. + uint32_t cmd_flags,
  2365. + uint16_t token,
  2366. + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
  2367. +{
  2368. + struct mc_command cmd = { 0 };
  2369. + int err;
  2370. +
  2371. + /* prepare command */
  2372. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
  2373. + cmd_flags,
  2374. + token);
  2375. + DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
  2376. +
  2377. + /* send command to mc*/
  2378. + err = mc_send_command(mc_io, &cmd);
  2379. + if (err)
  2380. + return err;
  2381. +
  2382. + /* retrieve response parameters */
  2383. + DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
  2384. +
  2385. + return 0;
  2386. +}
  2387. +
  2388. +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
  2389. + uint32_t cmd_flags,
  2390. + uint16_t token,
  2391. + uint8_t priority,
  2392. + struct dpdmai_tx_queue_attr *attr)
  2393. +{
  2394. + struct mc_command cmd = { 0 };
  2395. + int err;
  2396. +
  2397. + /* prepare command */
  2398. + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
  2399. + cmd_flags,
  2400. + token);
  2401. + DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
  2402. +
  2403. + /* send command to mc*/
  2404. + err = mc_send_command(mc_io, &cmd);
  2405. + if (err)
  2406. + return err;
  2407. +
  2408. + /* retrieve response parameters */
  2409. + DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
  2410. +
  2411. + return 0;
  2412. +}
  2413. diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
  2414. new file mode 100644
  2415. index 00000000..e931ce16
  2416. --- /dev/null
  2417. +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
  2418. @@ -0,0 +1,521 @@
  2419. +/* Copyright 2013-2015 Freescale Semiconductor Inc.
  2420. + *
  2421. + * Redistribution and use in source and binary forms, with or without
  2422. + * modification, are permitted provided that the following conditions are met:
  2423. + * * Redistributions of source code must retain the above copyright
  2424. + * notice, this list of conditions and the following disclaimer.
  2425. + * * Redistributions in binary form must reproduce the above copyright
  2426. + * notice, this list of conditions and the following disclaimer in the
  2427. + * documentation and/or other materials provided with the distribution.
  2428. + * * Neither the name of the above-listed copyright holders nor the
  2429. + * names of any contributors may be used to endorse or promote products
  2430. + * derived from this software without specific prior written permission.
  2431. + *
  2432. + *
  2433. + * ALTERNATIVELY, this software may be distributed under the terms of the
  2434. + * GNU General Public License ("GPL") as published by the Free Software
  2435. + * Foundation, either version 2 of that License or (at your option) any
  2436. + * later version.
  2437. + *
  2438. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  2439. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  2440. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  2441. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  2442. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  2443. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  2444. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  2445. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  2446. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  2447. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  2448. + * POSSIBILITY OF SUCH DAMAGE.
  2449. + */
  2450. +#ifndef __FSL_DPDMAI_H
  2451. +#define __FSL_DPDMAI_H
  2452. +
  2453. +struct fsl_mc_io;
  2454. +
  2455. +/* Data Path DMA Interface API
  2456. + * Contains initialization APIs and runtime control APIs for DPDMAI
  2457. + */
  2458. +
  2459. +/* General DPDMAI macros */
  2460. +
  2461. +/**
  2462. + * Maximum number of Tx/Rx priorities per DPDMAI object
  2463. + */
  2464. +#define DPDMAI_PRIO_NUM 2
  2465. +
  2466. +/**
  2467. + * All queues considered; see dpdmai_set_rx_queue()
  2468. + */
  2469. +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
  2470. +
  2471. +/**
  2472. + * dpdmai_open() - Open a control session for the specified object
  2473. + * @mc_io: Pointer to MC portal's I/O object
  2474. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2475. + * @dpdmai_id: DPDMAI unique ID
  2476. + * @token: Returned token; use in subsequent API calls
  2477. + *
  2478. + * This function can be used to open a control session for an
  2479. + * already created object; an object may have been declared in
  2480. + * the DPL or by calling the dpdmai_create() function.
  2481. + * This function returns a unique authentication token,
  2482. + * associated with the specific object ID and the specific MC
  2483. + * portal; this token must be used in all subsequent commands for
  2484. + * this specific object.
  2485. + *
  2486. + * Return: '0' on Success; Error code otherwise.
  2487. + */
  2488. +int dpdmai_open(struct fsl_mc_io *mc_io,
  2489. + uint32_t cmd_flags,
  2490. + int dpdmai_id,
  2491. + uint16_t *token);
  2492. +
  2493. +/**
  2494. + * dpdmai_close() - Close the control session of the object
  2495. + * @mc_io: Pointer to MC portal's I/O object
  2496. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2497. + * @token: Token of DPDMAI object
  2498. + *
  2499. + * After this function is called, no further operations are
  2500. + * allowed on the object without opening a new control session.
  2501. + *
  2502. + * Return: '0' on Success; Error code otherwise.
  2503. + */
  2504. +int dpdmai_close(struct fsl_mc_io *mc_io,
  2505. + uint32_t cmd_flags,
  2506. + uint16_t token);
  2507. +
  2508. +/**
  2509. + * struct dpdmai_cfg - Structure representing DPDMAI configuration
  2510. + * @priorities: Priorities for the DMA hardware processing; valid priorities are
  2511. + * configured with values 1-8; the entry following last valid entry
  2512. + * should be configured with 0
  2513. + */
  2514. +struct dpdmai_cfg {
  2515. + uint8_t priorities[DPDMAI_PRIO_NUM];
  2516. +};
  2517. +
  2518. +/**
  2519. + * dpdmai_create() - Create the DPDMAI object
  2520. + * @mc_io: Pointer to MC portal's I/O object
  2521. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2522. + * @cfg: Configuration structure
  2523. + * @token: Returned token; use in subsequent API calls
  2524. + *
  2525. + * Create the DPDMAI object, allocate required resources and
  2526. + * perform required initialization.
  2527. + *
  2528. + * The object can be created either by declaring it in the
  2529. + * DPL file, or by calling this function.
  2530. + *
  2531. + * This function returns a unique authentication token,
  2532. + * associated with the specific object ID and the specific MC
  2533. + * portal; this token must be used in all subsequent calls to
  2534. + * this specific object. For objects that are created using the
  2535. + * DPL file, call dpdmai_open() function to get an authentication
  2536. + * token first.
  2537. + *
  2538. + * Return: '0' on Success; Error code otherwise.
  2539. + */
  2540. +int dpdmai_create(struct fsl_mc_io *mc_io,
  2541. + uint32_t cmd_flags,
  2542. + const struct dpdmai_cfg *cfg,
  2543. + uint16_t *token);
  2544. +
  2545. +/**
  2546. + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
  2547. + * @mc_io: Pointer to MC portal's I/O object
  2548. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2549. + * @token: Token of DPDMAI object
  2550. + *
  2551. + * Return: '0' on Success; error code otherwise.
  2552. + */
  2553. +int dpdmai_destroy(struct fsl_mc_io *mc_io,
  2554. + uint32_t cmd_flags,
  2555. + uint16_t token);
  2556. +
  2557. +/**
  2558. + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
  2559. + * @mc_io: Pointer to MC portal's I/O object
  2560. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2561. + * @token: Token of DPDMAI object
  2562. + *
  2563. + * Return: '0' on Success; Error code otherwise.
  2564. + */
  2565. +int dpdmai_enable(struct fsl_mc_io *mc_io,
  2566. + uint32_t cmd_flags,
  2567. + uint16_t token);
  2568. +
  2569. +/**
  2570. + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
  2571. + * @mc_io: Pointer to MC portal's I/O object
  2572. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2573. + * @token: Token of DPDMAI object
  2574. + *
  2575. + * Return: '0' on Success; Error code otherwise.
  2576. + */
  2577. +int dpdmai_disable(struct fsl_mc_io *mc_io,
  2578. + uint32_t cmd_flags,
  2579. + uint16_t token);
  2580. +
  2581. +/**
  2582. + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
  2583. + * @mc_io: Pointer to MC portal's I/O object
  2584. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2585. + * @token: Token of DPDMAI object
  2586. + * @en: Returns '1' if object is enabled; '0' otherwise
  2587. + *
  2588. + * Return: '0' on Success; Error code otherwise.
  2589. + */
  2590. +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
  2591. + uint32_t cmd_flags,
  2592. + uint16_t token,
  2593. + int *en);
  2594. +
  2595. +/**
  2596. + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
  2597. + * @mc_io: Pointer to MC portal's I/O object
  2598. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2599. + * @token: Token of DPDMAI object
  2600. + *
  2601. + * Return: '0' on Success; Error code otherwise.
  2602. + */
  2603. +int dpdmai_reset(struct fsl_mc_io *mc_io,
  2604. + uint32_t cmd_flags,
  2605. + uint16_t token);
  2606. +
  2607. +/**
  2608. + * struct dpdmai_irq_cfg - IRQ configuration
  2609. + * @addr: Address that must be written to signal a message-based interrupt
  2610. + * @val: Value to write into irq_addr address
  2611. + * @irq_num: A user defined number associated with this IRQ
  2612. + */
  2613. +struct dpdmai_irq_cfg {
  2614. + uint64_t addr;
  2615. + uint32_t val;
  2616. + int irq_num;
  2617. +};
  2618. +
  2619. +/**
  2620. + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
  2621. + * @mc_io: Pointer to MC portal's I/O object
  2622. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2623. + * @token: Token of DPDMAI object
  2624. + * @irq_index: Identifies the interrupt index to configure
  2625. + * @irq_cfg: IRQ configuration
  2626. + *
  2627. + * Return: '0' on Success; Error code otherwise.
  2628. + */
  2629. +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
  2630. + uint32_t cmd_flags,
  2631. + uint16_t token,
  2632. + uint8_t irq_index,
  2633. + struct dpdmai_irq_cfg *irq_cfg);
  2634. +
  2635. +/**
  2636. + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
  2637. + *
  2638. + * @mc_io: Pointer to MC portal's I/O object
  2639. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2640. + * @token: Token of DPDMAI object
  2641. + * @irq_index: The interrupt index to configure
  2642. + * @type: Interrupt type: 0 represents message interrupt
  2643. + * type (both irq_addr and irq_val are valid)
  2644. + * @irq_cfg: IRQ attributes
  2645. + *
  2646. + * Return: '0' on Success; Error code otherwise.
  2647. + */
  2648. +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
  2649. + uint32_t cmd_flags,
  2650. + uint16_t token,
  2651. + uint8_t irq_index,
  2652. + int *type,
  2653. + struct dpdmai_irq_cfg *irq_cfg);
  2654. +
  2655. +/**
  2656. + * dpdmai_set_irq_enable() - Set overall interrupt state.
  2657. + * @mc_io: Pointer to MC portal's I/O object
  2658. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2659. + * @token: Token of DPDMAI object
  2660. + * @irq_index: The interrupt index to configure
  2661. + * @en: Interrupt state - enable = 1, disable = 0
  2662. + *
  2663. + * Allows GPP software to control when interrupts are generated.
  2664. + * Each interrupt can have up to 32 causes. The enable/disable control's the
  2665. + * overall interrupt state. if the interrupt is disabled no causes will cause
  2666. + * an interrupt
  2667. + *
  2668. + * Return: '0' on Success; Error code otherwise.
  2669. + */
  2670. +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
  2671. + uint32_t cmd_flags,
  2672. + uint16_t token,
  2673. + uint8_t irq_index,
  2674. + uint8_t en);
  2675. +
  2676. +/**
  2677. + * dpdmai_get_irq_enable() - Get overall interrupt state
  2678. + * @mc_io: Pointer to MC portal's I/O object
  2679. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2680. + * @token: Token of DPDMAI object
  2681. + * @irq_index: The interrupt index to configure
  2682. + * @en: Returned Interrupt state - enable = 1, disable = 0
  2683. + *
  2684. + * Return: '0' on Success; Error code otherwise.
  2685. + */
  2686. +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
  2687. + uint32_t cmd_flags,
  2688. + uint16_t token,
  2689. + uint8_t irq_index,
  2690. + uint8_t *en);
  2691. +
  2692. +/**
  2693. + * dpdmai_set_irq_mask() - Set interrupt mask.
  2694. + * @mc_io: Pointer to MC portal's I/O object
  2695. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2696. + * @token: Token of DPDMAI object
  2697. + * @irq_index: The interrupt index to configure
  2698. + * @mask: event mask to trigger interrupt;
  2699. + * each bit:
  2700. + * 0 = ignore event
  2701. + * 1 = consider event for asserting IRQ
  2702. + *
  2703. + * Every interrupt can have up to 32 causes and the interrupt model supports
  2704. + * masking/unmasking each cause independently
  2705. + *
  2706. + * Return: '0' on Success; Error code otherwise.
  2707. + */
  2708. +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
  2709. + uint32_t cmd_flags,
  2710. + uint16_t token,
  2711. + uint8_t irq_index,
  2712. + uint32_t mask);
  2713. +
  2714. +/**
  2715. + * dpdmai_get_irq_mask() - Get interrupt mask.
  2716. + * @mc_io: Pointer to MC portal's I/O object
  2717. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2718. + * @token: Token of DPDMAI object
  2719. + * @irq_index: The interrupt index to configure
  2720. + * @mask: Returned event mask to trigger interrupt
  2721. + *
  2722. + * Every interrupt can have up to 32 causes and the interrupt model supports
  2723. + * masking/unmasking each cause independently
  2724. + *
  2725. + * Return: '0' on Success; Error code otherwise.
  2726. + */
  2727. +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
  2728. + uint32_t cmd_flags,
  2729. + uint16_t token,
  2730. + uint8_t irq_index,
  2731. + uint32_t *mask);
  2732. +
  2733. +/**
  2734. + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
  2735. + * @mc_io: Pointer to MC portal's I/O object
  2736. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2737. + * @token: Token of DPDMAI object
  2738. + * @irq_index: The interrupt index to configure
  2739. + * @status: Returned interrupts status - one bit per cause:
  2740. + * 0 = no interrupt pending
  2741. + * 1 = interrupt pending
  2742. + *
  2743. + * Return: '0' on Success; Error code otherwise.
  2744. + */
  2745. +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
  2746. + uint32_t cmd_flags,
  2747. + uint16_t token,
  2748. + uint8_t irq_index,
  2749. + uint32_t *status);
  2750. +
  2751. +/**
  2752. + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
  2753. + * @mc_io: Pointer to MC portal's I/O object
  2754. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2755. + * @token: Token of DPDMAI object
  2756. + * @irq_index: The interrupt index to configure
  2757. + * @status: bits to clear (W1C) - one bit per cause:
  2758. + * 0 = don't change
  2759. + * 1 = clear status bit
  2760. + *
  2761. + * Return: '0' on Success; Error code otherwise.
  2762. + */
  2763. +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
  2764. + uint32_t cmd_flags,
  2765. + uint16_t token,
  2766. + uint8_t irq_index,
  2767. + uint32_t status);
  2768. +
  2769. +/**
  2770. + * struct dpdmai_attr - Structure representing DPDMAI attributes
  2771. + * @id: DPDMAI object ID
  2772. + * @version: DPDMAI version
  2773. + * @num_of_priorities: number of priorities
  2774. + */
  2775. +struct dpdmai_attr {
  2776. + int id;
  2777. + /**
  2778. + * struct version - DPDMAI version
  2779. + * @major: DPDMAI major version
  2780. + * @minor: DPDMAI minor version
  2781. + */
  2782. + struct {
  2783. + uint16_t major;
  2784. + uint16_t minor;
  2785. + } version;
  2786. + uint8_t num_of_priorities;
  2787. +};
  2788. +
  2789. +/**
  2790. + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
  2791. + * @mc_io: Pointer to MC portal's I/O object
  2792. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2793. + * @token: Token of DPDMAI object
  2794. + * @attr: Returned object's attributes
  2795. + *
  2796. + * Return: '0' on Success; Error code otherwise.
  2797. + */
  2798. +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
  2799. + uint32_t cmd_flags,
  2800. + uint16_t token,
  2801. + struct dpdmai_attr *attr);
  2802. +
  2803. +/**
  2804. + * enum dpdmai_dest - DPDMAI destination types
  2805. + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
  2806. + * and does not generate FQDAN notifications; user is expected to dequeue
  2807. + * from the queue based on polling or other user-defined method
  2808. + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
  2809. + * notifications to the specified DPIO; user is expected to dequeue
  2810. + * from the queue only after notification is received
  2811. + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
  2812. + * FQDAN notifications, but is connected to the specified DPCON object;
  2813. + * user is expected to dequeue from the DPCON channel
  2814. + */
  2815. +enum dpdmai_dest {
  2816. + DPDMAI_DEST_NONE = 0,
  2817. + DPDMAI_DEST_DPIO = 1,
  2818. + DPDMAI_DEST_DPCON = 2
  2819. +};
  2820. +
  2821. +/**
  2822. + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
  2823. + * @dest_type: Destination type
  2824. + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
  2825. + * @priority: Priority selection within the DPIO or DPCON channel; valid values
  2826. + * are 0-1 or 0-7, depending on the number of priorities in that
  2827. + * channel; not relevant for 'DPDMAI_DEST_NONE' option
  2828. + */
  2829. +struct dpdmai_dest_cfg {
  2830. + enum dpdmai_dest dest_type;
  2831. + int dest_id;
  2832. + uint8_t priority;
  2833. +};
  2834. +
  2835. +/* DPDMAI queue modification options */
  2836. +
  2837. +/**
  2838. + * Select to modify the user's context associated with the queue
  2839. + */
  2840. +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
  2841. +
  2842. +/**
  2843. + * Select to modify the queue's destination
  2844. + */
  2845. +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
  2846. +
  2847. +/**
  2848. + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
  2849. + * @options: Flags representing the suggested modifications to the queue;
  2850. + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
  2851. + * @user_ctx: User context value provided in the frame descriptor of each
  2852. + * dequeued frame;
  2853. + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
  2854. + * @dest_cfg: Queue destination parameters;
  2855. + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
  2856. + */
  2857. +struct dpdmai_rx_queue_cfg {
  2858. + uint32_t options;
  2859. + uint64_t user_ctx;
  2860. + struct dpdmai_dest_cfg dest_cfg;
  2861. +
  2862. +};
  2863. +
  2864. +/**
  2865. + * dpdmai_set_rx_queue() - Set Rx queue configuration
  2866. + * @mc_io: Pointer to MC portal's I/O object
  2867. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2868. + * @token: Token of DPDMAI object
  2869. + * @priority: Select the queue relative to number of
  2870. + * priorities configured at DPDMAI creation; use
  2871. + * DPDMAI_ALL_QUEUES to configure all Rx queues
  2872. + * identically.
  2873. + * @cfg: Rx queue configuration
  2874. + *
  2875. + * Return: '0' on Success; Error code otherwise.
  2876. + */
  2877. +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
  2878. + uint32_t cmd_flags,
  2879. + uint16_t token,
  2880. + uint8_t priority,
  2881. + const struct dpdmai_rx_queue_cfg *cfg);
  2882. +
  2883. +/**
  2884. + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
  2885. + * @user_ctx: User context value provided in the frame descriptor of each
  2886. + * dequeued frame
  2887. + * @dest_cfg: Queue destination configuration
  2888. + * @fqid: Virtual FQID value to be used for dequeue operations
  2889. + */
  2890. +struct dpdmai_rx_queue_attr {
  2891. + uint64_t user_ctx;
  2892. + struct dpdmai_dest_cfg dest_cfg;
  2893. + uint32_t fqid;
  2894. +};
  2895. +
  2896. +/**
  2897. + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
  2898. + * @mc_io: Pointer to MC portal's I/O object
  2899. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2900. + * @token: Token of DPDMAI object
  2901. + * @priority: Select the queue relative to number of
  2902. + * priorities configured at DPDMAI creation
  2903. + * @attr: Returned Rx queue attributes
  2904. + *
  2905. + * Return: '0' on Success; Error code otherwise.
  2906. + */
  2907. +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
  2908. + uint32_t cmd_flags,
  2909. + uint16_t token,
  2910. + uint8_t priority,
  2911. + struct dpdmai_rx_queue_attr *attr);
  2912. +
  2913. +/**
  2914. + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
  2915. + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
  2916. + */
  2917. +
  2918. +struct dpdmai_tx_queue_attr {
  2919. + uint32_t fqid;
  2920. +};
  2921. +
  2922. +/**
  2923. + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
  2924. + * @mc_io: Pointer to MC portal's I/O object
  2925. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  2926. + * @token: Token of DPDMAI object
  2927. + * @priority: Select the queue relative to number of
  2928. + * priorities configured at DPDMAI creation
  2929. + * @attr: Returned Tx queue attributes
  2930. + *
  2931. + * Return: '0' on Success; Error code otherwise.
  2932. + */
  2933. +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
  2934. + uint32_t cmd_flags,
  2935. + uint16_t token,
  2936. + uint8_t priority,
  2937. + struct dpdmai_tx_queue_attr *attr);
  2938. +
  2939. +#endif /* __FSL_DPDMAI_H */
  2940. diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  2941. new file mode 100644
  2942. index 00000000..7d403c01
  2943. --- /dev/null
  2944. +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  2945. @@ -0,0 +1,222 @@
  2946. +/* Copyright 2013-2016 Freescale Semiconductor Inc.
  2947. + *
  2948. + * Redistribution and use in source and binary forms, with or without
  2949. + * modification, are permitted provided that the following conditions are met:
  2950. + * * Redistributions of source code must retain the above copyright
  2951. + * notice, this list of conditions and the following disclaimer.
  2952. + * * Redistributions in binary form must reproduce the above copyright
  2953. + * notice, this list of conditions and the following disclaimer in the
  2954. + * documentation and/or other materials provided with the distribution.
  2955. + * * Neither the name of the above-listed copyright holders nor the
  2956. + * names of any contributors may be used to endorse or promote products
  2957. + * derived from this software without specific prior written permission.
  2958. + *
  2959. + *
  2960. + * ALTERNATIVELY, this software may be distributed under the terms of the
  2961. + * GNU General Public License ("GPL") as published by the Free Software
  2962. + * Foundation, either version 2 of that License or (at your option) any
  2963. + * later version.
  2964. + *
  2965. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  2966. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  2967. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  2968. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  2969. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  2970. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  2971. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  2972. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  2973. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  2974. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  2975. + * POSSIBILITY OF SUCH DAMAGE.
  2976. + */
  2977. +#ifndef _FSL_DPDMAI_CMD_H
  2978. +#define _FSL_DPDMAI_CMD_H
  2979. +
  2980. +/* DPDMAI Version */
  2981. +#define DPDMAI_VER_MAJOR 2
  2982. +#define DPDMAI_VER_MINOR 2
  2983. +
  2984. +#define DPDMAI_CMD_BASE_VERSION 0
  2985. +#define DPDMAI_CMD_ID_OFFSET 4
  2986. +
  2987. +/* Command IDs */
  2988. +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2989. +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2990. +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2991. +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2992. +
  2993. +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2994. +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2995. +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2996. +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2997. +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  2998. +
  2999. +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3000. +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3001. +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3002. +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3003. +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3004. +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3005. +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3006. +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3007. +
  3008. +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3009. +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3010. +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
  3011. +
  3012. +
  3013. +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
  3014. +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
  3015. +
  3016. +
  3017. +#define MAKE_UMASK64(_width) \
  3018. + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
  3019. + (uint64_t)-1))
  3020. +
  3021. +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
  3022. +{
  3023. + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
  3024. +}
  3025. +
  3026. +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
  3027. +{
  3028. + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
  3029. +}
  3030. +
  3031. +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
  3032. + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
  3033. +
  3034. +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
  3035. + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
  3036. +
  3037. +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
  3038. + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
  3039. +
  3040. +/* cmd, param, offset, width, type, arg_name */
  3041. +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
  3042. + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
  3043. +
  3044. +/* cmd, param, offset, width, type, arg_name */
  3045. +#define DPDMAI_CMD_CREATE(cmd, cfg) \
  3046. +do { \
  3047. + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
  3048. + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
  3049. +} while (0)
  3050. +
  3051. +/* cmd, param, offset, width, type, arg_name */
  3052. +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
  3053. + MC_RSP_OP(cmd, 0, 0, 1, int, en)
  3054. +
  3055. +/* cmd, param, offset, width, type, arg_name */
  3056. +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
  3057. +do { \
  3058. + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
  3059. + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
  3060. + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
  3061. + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
  3062. +} while (0)
  3063. +
  3064. +/* cmd, param, offset, width, type, arg_name */
  3065. +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
  3066. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  3067. +
  3068. +/* cmd, param, offset, width, type, arg_name */
  3069. +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
  3070. +do { \
  3071. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
  3072. + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
  3073. + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
  3074. + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
  3075. +} while (0)
  3076. +
  3077. +/* cmd, param, offset, width, type, arg_name */
  3078. +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
  3079. +do { \
  3080. + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
  3081. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  3082. +} while (0)
  3083. +
  3084. +/* cmd, param, offset, width, type, arg_name */
  3085. +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
  3086. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  3087. +
  3088. +/* cmd, param, offset, width, type, arg_name */
  3089. +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
  3090. + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
  3091. +
  3092. +/* cmd, param, offset, width, type, arg_name */
  3093. +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
  3094. +do { \
  3095. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
  3096. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  3097. +} while (0)
  3098. +
  3099. +/* cmd, param, offset, width, type, arg_name */
  3100. +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
  3101. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
  3102. +
  3103. +/* cmd, param, offset, width, type, arg_name */
  3104. +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
  3105. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
  3106. +
  3107. +/* cmd, param, offset, width, type, arg_name */
  3108. +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
  3109. +do { \
  3110. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
  3111. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
  3112. +} while (0)
  3113. +
  3114. +/* cmd, param, offset, width, type, arg_name */
  3115. +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
  3116. + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
  3117. +
  3118. +/* cmd, param, offset, width, type, arg_name */
  3119. +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
  3120. +do { \
  3121. + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
  3122. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
  3123. +} while (0)
  3124. +
  3125. +/* cmd, param, offset, width, type, arg_name */
  3126. +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
  3127. +do { \
  3128. + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
  3129. + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
  3130. + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
  3131. + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
  3132. +} while (0)
  3133. +
  3134. +/* cmd, param, offset, width, type, arg_name */
  3135. +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
  3136. +do { \
  3137. + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
  3138. + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
  3139. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
  3140. + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
  3141. + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
  3142. + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
  3143. +} while (0)
  3144. +
  3145. +/* cmd, param, offset, width, type, arg_name */
  3146. +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
  3147. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
  3148. +
  3149. +/* cmd, param, offset, width, type, arg_name */
  3150. +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
  3151. +do { \
  3152. + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
  3153. + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
  3154. + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
  3155. + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
  3156. + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
  3157. +} while (0)
  3158. +
  3159. +/* cmd, param, offset, width, type, arg_name */
  3160. +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
  3161. + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
  3162. +
  3163. +/* cmd, param, offset, width, type, arg_name */
  3164. +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
  3165. + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
  3166. +
  3167. +#endif /* _FSL_DPDMAI_CMD_H */
  3168. diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
  3169. new file mode 100644
  3170. index 00000000..6c4c2813
  3171. --- /dev/null
  3172. +++ b/drivers/dma/fsl-qdma.c
  3173. @@ -0,0 +1,1201 @@
  3174. +/*
  3175. + * drivers/dma/fsl-qdma.c
  3176. + *
  3177. + * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3178. + *
  3179. + * Driver for the Freescale qDMA engine with software command queue mode.
  3180. + * Channel virtualization is supported through enqueuing of DMA jobs to,
  3181. + * or dequeuing DMA jobs from, different work queues.
  3182. + * This module can be found on Freescale LS SoCs.
  3183. + *
  3184. + * This program is free software; you can redistribute it and/or modify it
  3185. + * under the terms of the GNU General Public License as published by the
  3186. + * Free Software Foundation; either version 2 of the License, or (at your
  3187. + * option) any later version.
  3188. + */
  3189. +
  3190. +#include <asm/cacheflush.h>
  3191. +#include <linux/clk.h>
  3192. +#include <linux/delay.h>
  3193. +#include <linux/dma-mapping.h>
  3194. +#include <linux/dmapool.h>
  3195. +#include <linux/init.h>
  3196. +#include <linux/interrupt.h>
  3197. +#include <linux/module.h>
  3198. +#include <linux/of.h>
  3199. +#include <linux/of_address.h>
  3200. +#include <linux/of_device.h>
  3201. +#include <linux/of_dma.h>
  3202. +#include <linux/of_irq.h>
  3203. +#include <linux/slab.h>
  3204. +#include <linux/spinlock.h>
  3205. +
  3206. +#include "virt-dma.h"
  3207. +
  3208. +#define FSL_QDMA_DMR 0x0
  3209. +#define FSL_QDMA_DSR 0x4
  3210. +#define FSL_QDMA_DEIER 0xe00
  3211. +#define FSL_QDMA_DEDR 0xe04
  3212. +#define FSL_QDMA_DECFDW0R 0xe10
  3213. +#define FSL_QDMA_DECFDW1R 0xe14
  3214. +#define FSL_QDMA_DECFDW2R 0xe18
  3215. +#define FSL_QDMA_DECFDW3R 0xe1c
  3216. +#define FSL_QDMA_DECFQIDR 0xe30
  3217. +#define FSL_QDMA_DECBR 0xe34
  3218. +
  3219. +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
  3220. +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
  3221. +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
  3222. +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
  3223. +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
  3224. +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
  3225. +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
  3226. +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
  3227. +
  3228. +#define FSL_QDMA_SQDPAR 0x80c
  3229. +#define FSL_QDMA_SQEPAR 0x814
  3230. +#define FSL_QDMA_BSQMR 0x800
  3231. +#define FSL_QDMA_BSQSR 0x804
  3232. +#define FSL_QDMA_BSQICR 0x828
  3233. +#define FSL_QDMA_CQMR 0xa00
  3234. +#define FSL_QDMA_CQDSCR1 0xa08
  3235. +#define FSL_QDMA_CQDSCR2 0xa0c
  3236. +#define FSL_QDMA_CQIER 0xa10
  3237. +#define FSL_QDMA_CQEDR 0xa14
  3238. +#define FSL_QDMA_SQCCMR 0xa20
  3239. +
  3240. +#define FSL_QDMA_SQICR_ICEN
  3241. +
  3242. +#define FSL_QDMA_CQIDR_CQT 0xff000000
  3243. +#define FSL_QDMA_CQIDR_SQPE 0x800000
  3244. +#define FSL_QDMA_CQIDR_SQT 0x8000
  3245. +
  3246. +#define FSL_QDMA_BCQIER_CQTIE 0x8000
  3247. +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
  3248. +#define FSL_QDMA_BSQICR_ICEN 0x80000000
  3249. +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
  3250. +#define FSL_QDMA_CQIER_MEIE 0x80000000
  3251. +#define FSL_QDMA_CQIER_TEIE 0x1
  3252. +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
  3253. +
  3254. +#define FSL_QDMA_QUEUE_MAX 8
  3255. +
  3256. +#define FSL_QDMA_BCQMR_EN 0x80000000
  3257. +#define FSL_QDMA_BCQMR_EI 0x40000000
  3258. +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
  3259. +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
  3260. +
  3261. +#define FSL_QDMA_BCQSR_QF 0x10000
  3262. +#define FSL_QDMA_BCQSR_XOFF 0x1
  3263. +
  3264. +#define FSL_QDMA_BSQMR_EN 0x80000000
  3265. +#define FSL_QDMA_BSQMR_DI 0x40000000
  3266. +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
  3267. +
  3268. +#define FSL_QDMA_BSQSR_QE 0x20000
  3269. +
  3270. +#define FSL_QDMA_DMR_DQD 0x40000000
  3271. +#define FSL_QDMA_DSR_DB 0x80000000
  3272. +
  3273. +#define FSL_QDMA_BASE_BUFFER_SIZE 96
  3274. +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
  3275. +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
  3276. +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
  3277. +#define FSL_QDMA_QUEUE_NUM_MAX 8
  3278. +
  3279. +#define FSL_QDMA_CMD_RWTTYPE 0x4
  3280. +#define FSL_QDMA_CMD_LWC 0x2
  3281. +
  3282. +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
  3283. +#define FSL_QDMA_CMD_NS_OFFSET 27
  3284. +#define FSL_QDMA_CMD_DQOS_OFFSET 24
  3285. +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
  3286. +#define FSL_QDMA_CMD_DSEN_OFFSET 19
  3287. +#define FSL_QDMA_CMD_LWC_OFFSET 16
  3288. +
  3289. +#define FSL_QDMA_E_SG_TABLE 1
  3290. +#define FSL_QDMA_E_DATA_BUFFER 0
  3291. +#define FSL_QDMA_F_LAST_ENTRY 1
  3292. +
  3293. +u64 pre_addr, pre_queue;
  3294. +
  3295. +struct fsl_qdma_ccdf {
  3296. + u8 status;
  3297. + u32 rev1:22;
  3298. + u32 ser:1;
  3299. + u32 rev2:1;
  3300. + u32 rev3:20;
  3301. + u32 offset:9;
  3302. + u32 format:3;
  3303. + union {
  3304. + struct {
  3305. + u32 addr_lo; /* low 32-bits of 40-bit address */
  3306. + u32 addr_hi:8; /* high 8-bits of 40-bit address */
  3307. + u32 rev4:16;
  3308. + u32 queue:3;
  3309. + u32 rev5:3;
  3310. + u32 dd:2; /* dynamic debug */
  3311. + };
  3312. + struct {
  3313. + u64 addr:40;
  3314. + /* More efficient address accessor */
  3315. + u64 __notaddress:24;
  3316. + };
  3317. + };
  3318. +} __packed;
  3319. +
  3320. +struct fsl_qdma_csgf {
  3321. + u32 offset:13;
  3322. + u32 rev1:19;
  3323. + u32 length:30;
  3324. + u32 f:1;
  3325. + u32 e:1;
  3326. + union {
  3327. + struct {
  3328. + u32 addr_lo; /* low 32-bits of 40-bit address */
  3329. + u32 addr_hi:8; /* high 8-bits of 40-bit address */
  3330. + u32 rev2:24;
  3331. + };
  3332. + struct {
  3333. + u64 addr:40;
  3334. + /* More efficient address accessor */
  3335. + u64 __notaddress:24;
  3336. + };
  3337. + };
  3338. +} __packed;
  3339. +
  3340. +struct fsl_qdma_sdf {
  3341. + u32 rev3:32;
  3342. + u32 ssd:12; /* souce stride distance */
  3343. + u32 sss:12; /* souce stride size */
  3344. + u32 rev4:8;
  3345. + u32 rev5:32;
  3346. + u32 cmd;
  3347. +} __packed;
  3348. +
  3349. +struct fsl_qdma_ddf {
  3350. + u32 rev1:32;
  3351. + u32 dsd:12; /* Destination stride distance */
  3352. + u32 dss:12; /* Destination stride size */
  3353. + u32 rev2:8;
  3354. + u32 rev3:32;
  3355. + u32 cmd;
  3356. +} __packed;
  3357. +
  3358. +struct fsl_qdma_chan {
  3359. + struct virt_dma_chan vchan;
  3360. + struct virt_dma_desc vdesc;
  3361. + enum dma_status status;
  3362. + u32 slave_id;
  3363. + struct fsl_qdma_engine *qdma;
  3364. + struct fsl_qdma_queue *queue;
  3365. + struct list_head qcomp;
  3366. +};
  3367. +
  3368. +struct fsl_qdma_queue {
  3369. + struct fsl_qdma_ccdf *virt_head;
  3370. + struct fsl_qdma_ccdf *virt_tail;
  3371. + struct list_head comp_used;
  3372. + struct list_head comp_free;
  3373. + struct dma_pool *comp_pool;
  3374. + struct dma_pool *sg_pool;
  3375. + spinlock_t queue_lock;
  3376. + dma_addr_t bus_addr;
  3377. + u32 n_cq;
  3378. + u32 id;
  3379. + struct fsl_qdma_ccdf *cq;
  3380. +};
  3381. +
  3382. +struct fsl_qdma_sg {
  3383. + dma_addr_t bus_addr;
  3384. + void *virt_addr;
  3385. +};
  3386. +
  3387. +struct fsl_qdma_comp {
  3388. + dma_addr_t bus_addr;
  3389. + void *virt_addr;
  3390. + struct fsl_qdma_chan *qchan;
  3391. + struct fsl_qdma_sg *sg_block;
  3392. + struct virt_dma_desc vdesc;
  3393. + struct list_head list;
  3394. + u32 sg_block_src;
  3395. + u32 sg_block_dst;
  3396. +};
  3397. +
  3398. +struct fsl_qdma_engine {
  3399. + struct dma_device dma_dev;
  3400. + void __iomem *ctrl_base;
  3401. + void __iomem *status_base;
  3402. + void __iomem *block_base;
  3403. + u32 n_chans;
  3404. + u32 n_queues;
  3405. + struct mutex fsl_qdma_mutex;
  3406. + int error_irq;
  3407. + int queue_irq;
  3408. + bool big_endian;
  3409. + struct fsl_qdma_queue *queue;
  3410. + struct fsl_qdma_queue *status;
  3411. + struct fsl_qdma_chan chans[];
  3412. +
  3413. +};
  3414. +
  3415. +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
  3416. +{
  3417. + if (qdma->big_endian)
  3418. + return ioread32be(addr);
  3419. + else
  3420. + return ioread32(addr);
  3421. +}
  3422. +
  3423. +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
  3424. + void __iomem *addr)
  3425. +{
  3426. + if (qdma->big_endian)
  3427. + iowrite32be(val, addr);
  3428. + else
  3429. + iowrite32(val, addr);
  3430. +}
  3431. +
  3432. +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
  3433. +{
  3434. + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
  3435. +}
  3436. +
  3437. +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  3438. +{
  3439. + return container_of(vd, struct fsl_qdma_comp, vdesc);
  3440. +}
  3441. +
  3442. +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
  3443. +{
  3444. + /*
  3445. + * In QDMA mode, We don't need to do anything.
  3446. + */
  3447. + return 0;
  3448. +}
  3449. +
  3450. +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
  3451. +{
  3452. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  3453. + unsigned long flags;
  3454. + LIST_HEAD(head);
  3455. +
  3456. + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  3457. + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  3458. + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  3459. +
  3460. + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  3461. +}
  3462. +
  3463. +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
  3464. + dma_addr_t dst, dma_addr_t src, u32 len)
  3465. +{
  3466. + struct fsl_qdma_ccdf *ccdf;
  3467. + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
  3468. + struct fsl_qdma_sdf *sdf;
  3469. + struct fsl_qdma_ddf *ddf;
  3470. +
  3471. + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
  3472. + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
  3473. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
  3474. + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
  3475. + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
  3476. + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
  3477. +
  3478. + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
  3479. + /* Head Command Descriptor(Frame Descriptor) */
  3480. + ccdf->addr = fsl_comp->bus_addr + 16;
  3481. + ccdf->format = 1; /* Compound S/G format */
  3482. + /* Status notification is enqueued to status queue. */
  3483. + ccdf->ser = 1;
  3484. + /* Compound Command Descriptor(Frame List Table) */
  3485. + csgf_desc->addr = fsl_comp->bus_addr + 64;
  3486. + /* It must be 32 as Compound S/G Descriptor */
  3487. + csgf_desc->length = 32;
  3488. + csgf_src->addr = src;
  3489. + csgf_src->length = len;
  3490. + csgf_dest->addr = dst;
  3491. + csgf_dest->length = len;
  3492. + /* This entry is the last entry. */
  3493. + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
  3494. + /* Descriptor Buffer */
  3495. + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  3496. + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  3497. + ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
  3498. +}
  3499. +
  3500. +static void fsl_qdma_comp_fill_sg(
  3501. + struct fsl_qdma_comp *fsl_comp,
  3502. + struct scatterlist *dst_sg, unsigned int dst_nents,
  3503. + struct scatterlist *src_sg, unsigned int src_nents)
  3504. +{
  3505. + struct fsl_qdma_ccdf *ccdf;
  3506. + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
  3507. + struct fsl_qdma_sdf *sdf;
  3508. + struct fsl_qdma_ddf *ddf;
  3509. + struct fsl_qdma_sg *sg_block, *temp;
  3510. + struct scatterlist *sg;
  3511. + u64 total_src_len = 0;
  3512. + u64 total_dst_len = 0;
  3513. + u32 i;
  3514. +
  3515. + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
  3516. + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
  3517. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
  3518. + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
  3519. + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
  3520. + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
  3521. +
  3522. + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
  3523. + /* Head Command Descriptor(Frame Descriptor) */
  3524. + ccdf->addr = fsl_comp->bus_addr + 16;
  3525. + ccdf->format = 1; /* Compound S/G format */
  3526. + /* Status notification is enqueued to status queue. */
  3527. + ccdf->ser = 1;
  3528. +
  3529. + /* Compound Command Descriptor(Frame List Table) */
  3530. + csgf_desc->addr = fsl_comp->bus_addr + 64;
  3531. + /* It must be 32 as Compound S/G Descriptor */
  3532. + csgf_desc->length = 32;
  3533. +
  3534. + sg_block = fsl_comp->sg_block;
  3535. + csgf_src->addr = sg_block->bus_addr;
  3536. + /* This entry link to the s/g entry. */
  3537. + csgf_src->e = FSL_QDMA_E_SG_TABLE;
  3538. +
  3539. + temp = sg_block + fsl_comp->sg_block_src;
  3540. + csgf_dest->addr = temp->bus_addr;
  3541. + /* This entry is the last entry. */
  3542. + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
  3543. + /* This entry link to the s/g entry. */
  3544. + csgf_dest->e = FSL_QDMA_E_SG_TABLE;
  3545. +
  3546. + for_each_sg(src_sg, sg, src_nents, i) {
  3547. + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  3548. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  3549. + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  3550. + csgf_sg->addr = sg_dma_address(sg);
  3551. + csgf_sg->length = sg_dma_len(sg);
  3552. + total_src_len += sg_dma_len(sg);
  3553. +
  3554. + if (i == src_nents - 1)
  3555. + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
  3556. + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
  3557. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
  3558. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  3559. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
  3560. + temp = sg_block +
  3561. + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3562. + csgf_sg->addr = temp->bus_addr;
  3563. + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
  3564. + }
  3565. + }
  3566. +
  3567. + sg_block += fsl_comp->sg_block_src;
  3568. + for_each_sg(dst_sg, sg, dst_nents, i) {
  3569. + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  3570. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  3571. + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
  3572. + csgf_sg->addr = sg_dma_address(sg);
  3573. + csgf_sg->length = sg_dma_len(sg);
  3574. + total_dst_len += sg_dma_len(sg);
  3575. +
  3576. + if (i == dst_nents - 1)
  3577. + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
  3578. + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
  3579. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
  3580. + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
  3581. + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
  3582. + temp = sg_block +
  3583. + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3584. + csgf_sg->addr = temp->bus_addr;
  3585. + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
  3586. + }
  3587. + }
  3588. +
  3589. + if (total_src_len != total_dst_len)
  3590. + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
  3591. + "The data length for src and dst isn't match.\n");
  3592. +
  3593. + csgf_src->length = total_src_len;
  3594. + csgf_dest->length = total_dst_len;
  3595. +
  3596. + /* Descriptor Buffer */
  3597. + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  3598. + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
  3599. +}
  3600. +
  3601. +/*
  3602. + * Prei-request full command descriptor for enqueue.
  3603. + */
  3604. +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
  3605. +{
  3606. + struct fsl_qdma_comp *comp_temp;
  3607. + int i;
  3608. +
  3609. + for (i = 0; i < queue->n_cq; i++) {
  3610. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  3611. + if (!comp_temp)
  3612. + return -1;
  3613. + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
  3614. + GFP_NOWAIT,
  3615. + &comp_temp->bus_addr);
  3616. + if (!comp_temp->virt_addr)
  3617. + return -1;
  3618. + list_add_tail(&comp_temp->list, &queue->comp_free);
  3619. + }
  3620. + return 0;
  3621. +}
  3622. +
  3623. +/*
  3624. + * Request a command descriptor for enqueue.
  3625. + */
  3626. +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
  3627. + struct fsl_qdma_chan *fsl_chan,
  3628. + unsigned int dst_nents,
  3629. + unsigned int src_nents)
  3630. +{
  3631. + struct fsl_qdma_comp *comp_temp;
  3632. + struct fsl_qdma_sg *sg_block;
  3633. + struct fsl_qdma_queue *queue = fsl_chan->queue;
  3634. + unsigned long flags;
  3635. + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
  3636. +
  3637. + spin_lock_irqsave(&queue->queue_lock, flags);
  3638. + if (list_empty(&queue->comp_free)) {
  3639. + spin_unlock_irqrestore(&queue->queue_lock, flags);
  3640. + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  3641. + if (!comp_temp)
  3642. + return NULL;
  3643. + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
  3644. + GFP_NOWAIT,
  3645. + &comp_temp->bus_addr);
  3646. + if (!comp_temp->virt_addr)
  3647. + return NULL;
  3648. + } else {
  3649. + comp_temp = list_first_entry(&queue->comp_free,
  3650. + struct fsl_qdma_comp,
  3651. + list);
  3652. + list_del(&comp_temp->list);
  3653. + spin_unlock_irqrestore(&queue->queue_lock, flags);
  3654. + }
  3655. +
  3656. + if (dst_nents != 0)
  3657. + dst_sg_entry_block = dst_nents /
  3658. + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3659. + else
  3660. + dst_sg_entry_block = 0;
  3661. +
  3662. + if (src_nents != 0)
  3663. + src_sg_entry_block = src_nents /
  3664. + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
  3665. + else
  3666. + src_sg_entry_block = 0;
  3667. +
  3668. + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
  3669. + if (sg_entry_total) {
  3670. + sg_block = kzalloc(sizeof(*sg_block) *
  3671. + sg_entry_total,
  3672. + GFP_KERNEL);
  3673. + if (!sg_block)
  3674. + return NULL;
  3675. + comp_temp->sg_block = sg_block;
  3676. + for (i = 0; i < sg_entry_total; i++) {
  3677. + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
  3678. + GFP_NOWAIT,
  3679. + &sg_block->bus_addr);
  3680. + memset(sg_block->virt_addr, 0,
  3681. + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
  3682. + sg_block++;
  3683. + }
  3684. + }
  3685. +
  3686. + comp_temp->sg_block_src = src_sg_entry_block;
  3687. + comp_temp->sg_block_dst = dst_sg_entry_block;
  3688. + comp_temp->qchan = fsl_chan;
  3689. +
  3690. + return comp_temp;
  3691. +}
  3692. +
  3693. +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
  3694. + struct platform_device *pdev,
  3695. + unsigned int queue_num)
  3696. +{
  3697. + struct device_node *np = pdev->dev.of_node;
  3698. + struct fsl_qdma_queue *queue_head, *queue_temp;
  3699. + int ret, len, i;
  3700. + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
  3701. +
  3702. + if (queue_num > FSL_QDMA_QUEUE_MAX)
  3703. + queue_num = FSL_QDMA_QUEUE_MAX;
  3704. + len = sizeof(*queue_head) * queue_num;
  3705. + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  3706. + if (!queue_head)
  3707. + return NULL;
  3708. +
  3709. + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
  3710. + queue_num);
  3711. + if (ret) {
  3712. + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
  3713. + return NULL;
  3714. + }
  3715. +
  3716. + for (i = 0; i < queue_num; i++) {
  3717. + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
  3718. + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  3719. + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
  3720. + return NULL;
  3721. + }
  3722. + queue_temp = queue_head + i;
  3723. + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
  3724. + sizeof(struct fsl_qdma_ccdf) *
  3725. + queue_size[i],
  3726. + &queue_temp->bus_addr,
  3727. + GFP_KERNEL);
  3728. + if (!queue_temp->cq)
  3729. + return NULL;
  3730. + queue_temp->n_cq = queue_size[i];
  3731. + queue_temp->id = i;
  3732. + queue_temp->virt_head = queue_temp->cq;
  3733. + queue_temp->virt_tail = queue_temp->cq;
  3734. + /*
  3735. + * The dma pool for queue command buffer
  3736. + */
  3737. + queue_temp->comp_pool = dma_pool_create("comp_pool",
  3738. + &pdev->dev,
  3739. + FSL_QDMA_BASE_BUFFER_SIZE,
  3740. + 16, 0);
  3741. + if (!queue_temp->comp_pool) {
  3742. + dma_free_coherent(&pdev->dev,
  3743. + sizeof(struct fsl_qdma_ccdf) *
  3744. + queue_size[i],
  3745. + queue_temp->cq,
  3746. + queue_temp->bus_addr);
  3747. + return NULL;
  3748. + }
  3749. + /*
  3750. + * The dma pool for queue command buffer
  3751. + */
  3752. + queue_temp->sg_pool = dma_pool_create("sg_pool",
  3753. + &pdev->dev,
  3754. + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
  3755. + 64, 0);
  3756. + if (!queue_temp->sg_pool) {
  3757. + dma_free_coherent(&pdev->dev,
  3758. + sizeof(struct fsl_qdma_ccdf) *
  3759. + queue_size[i],
  3760. + queue_temp->cq,
  3761. + queue_temp->bus_addr);
  3762. + dma_pool_destroy(queue_temp->comp_pool);
  3763. + return NULL;
  3764. + }
  3765. + /*
  3766. + * List for queue command buffer
  3767. + */
  3768. + INIT_LIST_HEAD(&queue_temp->comp_used);
  3769. + INIT_LIST_HEAD(&queue_temp->comp_free);
  3770. + spin_lock_init(&queue_temp->queue_lock);
  3771. + }
  3772. +
  3773. + return queue_head;
  3774. +}
  3775. +
  3776. +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
  3777. + struct platform_device *pdev)
  3778. +{
  3779. + struct device_node *np = pdev->dev.of_node;
  3780. + struct fsl_qdma_queue *status_head;
  3781. + unsigned int status_size;
  3782. + int ret;
  3783. +
  3784. + ret = of_property_read_u32(np, "status-sizes", &status_size);
  3785. + if (ret) {
  3786. + dev_err(&pdev->dev, "Can't get status-sizes.\n");
  3787. + return NULL;
  3788. + }
  3789. + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
  3790. + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  3791. + dev_err(&pdev->dev, "Get wrong status_size.\n");
  3792. + return NULL;
  3793. + }
  3794. + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
  3795. + GFP_KERNEL);
  3796. + if (!status_head)
  3797. + return NULL;
  3798. +
  3799. + /*
  3800. + * Buffer for queue command
  3801. + */
  3802. + status_head->cq = dma_alloc_coherent(&pdev->dev,
  3803. + sizeof(struct fsl_qdma_ccdf) *
  3804. + status_size,
  3805. + &status_head->bus_addr,
  3806. + GFP_KERNEL);
  3807. + if (!status_head->cq)
  3808. + return NULL;
  3809. + status_head->n_cq = status_size;
  3810. + status_head->virt_head = status_head->cq;
  3811. + status_head->virt_tail = status_head->cq;
  3812. + status_head->comp_pool = NULL;
  3813. +
  3814. + return status_head;
  3815. +}
  3816. +
  3817. +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
  3818. +{
  3819. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  3820. + void __iomem *block = fsl_qdma->block_base;
  3821. + int i, count = 5;
  3822. + u32 reg;
  3823. +
  3824. + /* Disable the command queue and wait for idle state. */
  3825. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  3826. + reg |= FSL_QDMA_DMR_DQD;
  3827. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  3828. + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
  3829. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
  3830. +
  3831. + while (1) {
  3832. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
  3833. + if (!(reg & FSL_QDMA_DSR_DB))
  3834. + break;
  3835. + if (count-- < 0)
  3836. + return -EBUSY;
  3837. + udelay(100);
  3838. + }
  3839. +
  3840. + /* Disable status queue. */
  3841. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
  3842. +
  3843. + /*
  3844. + * Clear the command queue interrupt detect register for all queues.
  3845. + */
  3846. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  3847. +
  3848. + return 0;
  3849. +}
  3850. +
  3851. +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
  3852. +{
  3853. + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  3854. + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
  3855. + struct fsl_qdma_queue *temp_queue;
  3856. + struct fsl_qdma_comp *fsl_comp;
  3857. + struct fsl_qdma_ccdf *status_addr;
  3858. + struct fsl_qdma_csgf *csgf_src;
  3859. + void __iomem *block = fsl_qdma->block_base;
  3860. + u32 reg, i;
  3861. + bool duplicate, duplicate_handle;
  3862. +
  3863. + while (1) {
  3864. + duplicate = 0;
  3865. + duplicate_handle = 0;
  3866. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
  3867. + if (reg & FSL_QDMA_BSQSR_QE)
  3868. + return 0;
  3869. + status_addr = fsl_status->virt_head;
  3870. + if (status_addr->queue == pre_queue &&
  3871. + status_addr->addr == pre_addr)
  3872. + duplicate = 1;
  3873. +
  3874. + i = status_addr->queue;
  3875. + pre_queue = status_addr->queue;
  3876. + pre_addr = status_addr->addr;
  3877. + temp_queue = fsl_queue + i;
  3878. + spin_lock(&temp_queue->queue_lock);
  3879. + if (list_empty(&temp_queue->comp_used)) {
  3880. + if (duplicate)
  3881. + duplicate_handle = 1;
  3882. + else {
  3883. + spin_unlock(&temp_queue->queue_lock);
  3884. + return -1;
  3885. + }
  3886. + } else {
  3887. + fsl_comp = list_first_entry(&temp_queue->comp_used,
  3888. + struct fsl_qdma_comp,
  3889. + list);
  3890. + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
  3891. + + 2;
  3892. + if (fsl_comp->bus_addr + 16 !=
  3893. + (dma_addr_t)status_addr->addr) {
  3894. + if (duplicate)
  3895. + duplicate_handle = 1;
  3896. + else {
  3897. + spin_unlock(&temp_queue->queue_lock);
  3898. + return -1;
  3899. + }
  3900. + }
  3901. + }
  3902. +
  3903. + if (duplicate_handle) {
  3904. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  3905. + reg |= FSL_QDMA_BSQMR_DI;
  3906. + status_addr->addr = 0x0;
  3907. + fsl_status->virt_head++;
  3908. + if (fsl_status->virt_head == fsl_status->cq
  3909. + + fsl_status->n_cq)
  3910. + fsl_status->virt_head = fsl_status->cq;
  3911. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  3912. + spin_unlock(&temp_queue->queue_lock);
  3913. + continue;
  3914. + }
  3915. + list_del(&fsl_comp->list);
  3916. +
  3917. + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  3918. + reg |= FSL_QDMA_BSQMR_DI;
  3919. + status_addr->addr = 0x0;
  3920. + fsl_status->virt_head++;
  3921. + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
  3922. + fsl_status->virt_head = fsl_status->cq;
  3923. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  3924. + spin_unlock(&temp_queue->queue_lock);
  3925. +
  3926. + spin_lock(&fsl_comp->qchan->vchan.lock);
  3927. + vchan_cookie_complete(&fsl_comp->vdesc);
  3928. + fsl_comp->qchan->status = DMA_COMPLETE;
  3929. + spin_unlock(&fsl_comp->qchan->vchan.lock);
  3930. + }
  3931. + return 0;
  3932. +}
  3933. +
  3934. +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
  3935. +{
  3936. + struct fsl_qdma_engine *fsl_qdma = dev_id;
  3937. + unsigned int intr;
  3938. + void __iomem *status = fsl_qdma->status_base;
  3939. +
  3940. + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
  3941. +
  3942. + if (intr)
  3943. + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
  3944. +
  3945. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
  3946. + return IRQ_HANDLED;
  3947. +}
  3948. +
  3949. +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
  3950. +{
  3951. + struct fsl_qdma_engine *fsl_qdma = dev_id;
  3952. + unsigned int intr, reg;
  3953. + void __iomem *block = fsl_qdma->block_base;
  3954. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  3955. +
  3956. + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
  3957. +
  3958. + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
  3959. + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
  3960. +
  3961. + if (intr != 0) {
  3962. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  3963. + reg |= FSL_QDMA_DMR_DQD;
  3964. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  3965. + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
  3966. + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
  3967. + }
  3968. +
  3969. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  3970. +
  3971. + return IRQ_HANDLED;
  3972. +}
  3973. +
  3974. +static int
  3975. +fsl_qdma_irq_init(struct platform_device *pdev,
  3976. + struct fsl_qdma_engine *fsl_qdma)
  3977. +{
  3978. + int ret;
  3979. +
  3980. + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
  3981. + "qdma-error");
  3982. + if (fsl_qdma->error_irq < 0) {
  3983. + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
  3984. + return fsl_qdma->error_irq;
  3985. + }
  3986. +
  3987. + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
  3988. + if (fsl_qdma->queue_irq < 0) {
  3989. + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
  3990. + return fsl_qdma->queue_irq;
  3991. + }
  3992. +
  3993. + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
  3994. + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
  3995. + if (ret) {
  3996. + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
  3997. + return ret;
  3998. + }
  3999. + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
  4000. + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
  4001. + if (ret) {
  4002. + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
  4003. + return ret;
  4004. + }
  4005. +
  4006. + return 0;
  4007. +}
  4008. +
  4009. +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
  4010. +{
  4011. + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  4012. + struct fsl_qdma_queue *temp;
  4013. + void __iomem *ctrl = fsl_qdma->ctrl_base;
  4014. + void __iomem *status = fsl_qdma->status_base;
  4015. + void __iomem *block = fsl_qdma->block_base;
  4016. + int i, ret;
  4017. + u32 reg;
  4018. +
  4019. + /* Try to halt the qDMA engine first. */
  4020. + ret = fsl_qdma_halt(fsl_qdma);
  4021. + if (ret) {
  4022. + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
  4023. + return ret;
  4024. + }
  4025. +
  4026. + /*
  4027. + * Clear the command queue interrupt detect register for all queues.
  4028. + */
  4029. + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
  4030. +
  4031. + for (i = 0; i < fsl_qdma->n_queues; i++) {
  4032. + temp = fsl_queue + i;
  4033. + /*
  4034. + * Initialize Command Queue registers to point to the first
  4035. + * command descriptor in memory.
  4036. + * Dequeue Pointer Address Registers
  4037. + * Enqueue Pointer Address Registers
  4038. + */
  4039. + qdma_writel(fsl_qdma, temp->bus_addr,
  4040. + block + FSL_QDMA_BCQDPA_SADDR(i));
  4041. + qdma_writel(fsl_qdma, temp->bus_addr,
  4042. + block + FSL_QDMA_BCQEPA_SADDR(i));
  4043. +
  4044. + /* Initialize the queue mode. */
  4045. + reg = FSL_QDMA_BCQMR_EN;
  4046. + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
  4047. + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
  4048. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
  4049. + }
  4050. +
  4051. + /*
  4052. + * Workaround for erratum: ERR010812.
  4053. + * We must enable XOFF to avoid the enqueue rejection occurs.
  4054. + * Setting SQCCMR ENTER_WM to 0x20.
  4055. + */
  4056. + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
  4057. + block + FSL_QDMA_SQCCMR);
  4058. + /*
  4059. + * Initialize status queue registers to point to the first
  4060. + * command descriptor in memory.
  4061. + * Dequeue Pointer Address Registers
  4062. + * Enqueue Pointer Address Registers
  4063. + */
  4064. + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
  4065. + block + FSL_QDMA_SQEPAR);
  4066. + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
  4067. + block + FSL_QDMA_SQDPAR);
  4068. + /* Initialize status queue interrupt. */
  4069. + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
  4070. + block + FSL_QDMA_BCQIER(0));
  4071. + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
  4072. + | 0x8000,
  4073. + block + FSL_QDMA_BSQICR);
  4074. + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
  4075. + block + FSL_QDMA_CQIER);
  4076. + /* Initialize controller interrupt register. */
  4077. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
  4078. + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
  4079. +
  4080. + /* Initialize the status queue mode. */
  4081. + reg = FSL_QDMA_BSQMR_EN;
  4082. + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
  4083. + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  4084. +
  4085. + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  4086. + reg &= ~FSL_QDMA_DMR_DQD;
  4087. + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  4088. +
  4089. + return 0;
  4090. +}
  4091. +
  4092. +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
  4093. + struct dma_chan *chan,
  4094. + struct scatterlist *dst_sg, unsigned int dst_nents,
  4095. + struct scatterlist *src_sg, unsigned int src_nents,
  4096. + unsigned long flags)
  4097. +{
  4098. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  4099. + struct fsl_qdma_comp *fsl_comp;
  4100. +
  4101. + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
  4102. + dst_nents,
  4103. + src_nents);
  4104. + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
  4105. +
  4106. + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
  4107. +}
  4108. +
  4109. +static struct dma_async_tx_descriptor *
  4110. +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
  4111. + dma_addr_t src, size_t len, unsigned long flags)
  4112. +{
  4113. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  4114. + struct fsl_qdma_comp *fsl_comp;
  4115. +
  4116. + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
  4117. + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
  4118. +
  4119. + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
  4120. +}
  4121. +
  4122. +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
  4123. +{
  4124. + void __iomem *block = fsl_chan->qdma->block_base;
  4125. + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  4126. + struct fsl_qdma_comp *fsl_comp;
  4127. + struct virt_dma_desc *vdesc;
  4128. + u32 reg;
  4129. +
  4130. + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
  4131. + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
  4132. + return;
  4133. + vdesc = vchan_next_desc(&fsl_chan->vchan);
  4134. + if (!vdesc)
  4135. + return;
  4136. + list_del(&vdesc->node);
  4137. + fsl_comp = to_fsl_qdma_comp(vdesc);
  4138. +
  4139. + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
  4140. + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
  4141. + fsl_queue->virt_head = fsl_queue->cq;
  4142. +
  4143. + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
  4144. + barrier();
  4145. + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
  4146. + reg |= FSL_QDMA_BCQMR_EI;
  4147. + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
  4148. + fsl_chan->status = DMA_IN_PROGRESS;
  4149. +}
  4150. +
  4151. +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
  4152. + dma_cookie_t cookie, struct dma_tx_state *txstate)
  4153. +{
  4154. + return dma_cookie_status(chan, cookie, txstate);
  4155. +}
  4156. +
  4157. +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
  4158. +{
  4159. + struct fsl_qdma_comp *fsl_comp;
  4160. + struct fsl_qdma_queue *fsl_queue;
  4161. + struct fsl_qdma_sg *sg_block;
  4162. + unsigned long flags;
  4163. + unsigned int i;
  4164. +
  4165. + fsl_comp = to_fsl_qdma_comp(vdesc);
  4166. + fsl_queue = fsl_comp->qchan->queue;
  4167. +
  4168. + if (fsl_comp->sg_block) {
  4169. + for (i = 0; i < fsl_comp->sg_block_src +
  4170. + fsl_comp->sg_block_dst; i++) {
  4171. + sg_block = fsl_comp->sg_block + i;
  4172. + dma_pool_free(fsl_queue->sg_pool,
  4173. + sg_block->virt_addr,
  4174. + sg_block->bus_addr);
  4175. + }
  4176. + kfree(fsl_comp->sg_block);
  4177. + }
  4178. +
  4179. + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  4180. + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
  4181. + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  4182. +}
  4183. +
  4184. +static void fsl_qdma_issue_pending(struct dma_chan *chan)
  4185. +{
  4186. + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  4187. + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  4188. + unsigned long flags;
  4189. +
  4190. + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  4191. + spin_lock(&fsl_chan->vchan.lock);
  4192. + if (vchan_issue_pending(&fsl_chan->vchan))
  4193. + fsl_qdma_enqueue_desc(fsl_chan);
  4194. + spin_unlock(&fsl_chan->vchan.lock);
  4195. + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  4196. +}
  4197. +
  4198. +static int fsl_qdma_probe(struct platform_device *pdev)
  4199. +{
  4200. + struct device_node *np = pdev->dev.of_node;
  4201. + struct fsl_qdma_engine *fsl_qdma;
  4202. + struct fsl_qdma_chan *fsl_chan;
  4203. + struct resource *res;
  4204. + unsigned int len, chans, queues;
  4205. + int ret, i;
  4206. +
  4207. + ret = of_property_read_u32(np, "channels", &chans);
  4208. + if (ret) {
  4209. + dev_err(&pdev->dev, "Can't get channels.\n");
  4210. + return ret;
  4211. + }
  4212. +
  4213. + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
  4214. + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  4215. + if (!fsl_qdma)
  4216. + return -ENOMEM;
  4217. +
  4218. + ret = of_property_read_u32(np, "queues", &queues);
  4219. + if (ret) {
  4220. + dev_err(&pdev->dev, "Can't get queues.\n");
  4221. + return ret;
  4222. + }
  4223. +
  4224. + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
  4225. + if (!fsl_qdma->queue)
  4226. + return -ENOMEM;
  4227. +
  4228. + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
  4229. + if (!fsl_qdma->status)
  4230. + return -ENOMEM;
  4231. +
  4232. + fsl_qdma->n_chans = chans;
  4233. + fsl_qdma->n_queues = queues;
  4234. + mutex_init(&fsl_qdma->fsl_qdma_mutex);
  4235. +
  4236. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  4237. + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
  4238. + if (IS_ERR(fsl_qdma->ctrl_base))
  4239. + return PTR_ERR(fsl_qdma->ctrl_base);
  4240. +
  4241. + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  4242. + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
  4243. + if (IS_ERR(fsl_qdma->status_base))
  4244. + return PTR_ERR(fsl_qdma->status_base);
  4245. +
  4246. + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
  4247. + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
  4248. + if (IS_ERR(fsl_qdma->block_base))
  4249. + return PTR_ERR(fsl_qdma->block_base);
  4250. +
  4251. + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
  4252. + if (ret)
  4253. + return ret;
  4254. +
  4255. + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
  4256. + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
  4257. + for (i = 0; i < fsl_qdma->n_chans; i++) {
  4258. + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
  4259. +
  4260. + fsl_chan->qdma = fsl_qdma;
  4261. + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
  4262. + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
  4263. + INIT_LIST_HEAD(&fsl_chan->qcomp);
  4264. + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
  4265. + }
  4266. + for (i = 0; i < fsl_qdma->n_queues; i++)
  4267. + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
  4268. +
  4269. + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
  4270. + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
  4271. +
  4272. + fsl_qdma->dma_dev.dev = &pdev->dev;
  4273. + fsl_qdma->dma_dev.device_alloc_chan_resources
  4274. + = fsl_qdma_alloc_chan_resources;
  4275. + fsl_qdma->dma_dev.device_free_chan_resources
  4276. + = fsl_qdma_free_chan_resources;
  4277. + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
  4278. + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
  4279. + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
  4280. + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
  4281. +
  4282. + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
  4283. +
  4284. + platform_set_drvdata(pdev, fsl_qdma);
  4285. +
  4286. + ret = dma_async_device_register(&fsl_qdma->dma_dev);
  4287. + if (ret) {
  4288. + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
  4289. + return ret;
  4290. + }
  4291. +
  4292. + ret = fsl_qdma_reg_init(fsl_qdma);
  4293. + if (ret) {
  4294. + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
  4295. + return ret;
  4296. + }
  4297. +
  4298. +
  4299. + return 0;
  4300. +}
  4301. +
  4302. +static int fsl_qdma_remove(struct platform_device *pdev)
  4303. +{
  4304. + struct device_node *np = pdev->dev.of_node;
  4305. + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
  4306. + struct fsl_qdma_queue *queue_temp;
  4307. + struct fsl_qdma_queue *status = fsl_qdma->status;
  4308. + struct fsl_qdma_comp *comp_temp, *_comp_temp;
  4309. + int i;
  4310. +
  4311. + of_dma_controller_free(np);
  4312. + dma_async_device_unregister(&fsl_qdma->dma_dev);
  4313. +
  4314. + /* Free descriptor areas */
  4315. + for (i = 0; i < fsl_qdma->n_queues; i++) {
  4316. + queue_temp = fsl_qdma->queue + i;
  4317. + list_for_each_entry_safe(comp_temp, _comp_temp,
  4318. + &queue_temp->comp_used, list) {
  4319. + dma_pool_free(queue_temp->comp_pool,
  4320. + comp_temp->virt_addr,
  4321. + comp_temp->bus_addr);
  4322. + list_del(&comp_temp->list);
  4323. + kfree(comp_temp);
  4324. + }
  4325. + list_for_each_entry_safe(comp_temp, _comp_temp,
  4326. + &queue_temp->comp_free, list) {
  4327. + dma_pool_free(queue_temp->comp_pool,
  4328. + comp_temp->virt_addr,
  4329. + comp_temp->bus_addr);
  4330. + list_del(&comp_temp->list);
  4331. + kfree(comp_temp);
  4332. + }
  4333. + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
  4334. + queue_temp->n_cq, queue_temp->cq,
  4335. + queue_temp->bus_addr);
  4336. + dma_pool_destroy(queue_temp->comp_pool);
  4337. + }
  4338. +
  4339. + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
  4340. + status->n_cq, status->cq, status->bus_addr);
  4341. + return 0;
  4342. +}
  4343. +
  4344. +static const struct of_device_id fsl_qdma_dt_ids[] = {
  4345. + { .compatible = "fsl,ls1021a-qdma", },
  4346. + { /* sentinel */ }
  4347. +};
  4348. +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
  4349. +
  4350. +static struct platform_driver fsl_qdma_driver = {
  4351. + .driver = {
  4352. + .name = "fsl-qdma",
  4353. + .owner = THIS_MODULE,
  4354. + .of_match_table = fsl_qdma_dt_ids,
  4355. + },
  4356. + .probe = fsl_qdma_probe,
  4357. + .remove = fsl_qdma_remove,
  4358. +};
  4359. +
  4360. +static int __init fsl_qdma_init(void)
  4361. +{
  4362. + return platform_driver_register(&fsl_qdma_driver);
  4363. +}
  4364. +subsys_initcall(fsl_qdma_init);
  4365. +
  4366. +static void __exit fsl_qdma_exit(void)
  4367. +{
  4368. + platform_driver_unregister(&fsl_qdma_driver);
  4369. +}
  4370. +module_exit(fsl_qdma_exit);
  4371. +
  4372. +MODULE_ALIAS("platform:fsl-qdma");
  4373. +MODULE_DESCRIPTION("Freescale qDMA engine driver");
  4374. +MODULE_LICENSE("GPL v2");
  4375. --
  4376. 2.14.1