820-sec-support-layerscape.patch 397 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093
  1. From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
  2. From: Biwen Li <[email protected]>
  3. Date: Tue, 30 Oct 2018 18:28:03 +0800
  4. Subject: [PATCH 37/40] sec: support layerscape
  5. This is an integrated patch of sec for layerscape
  6. Signed-off-by: Alex Porosanu <[email protected]>
  7. Signed-off-by: Cristian Stoica <[email protected]>
  8. Signed-off-by: Guanhua Gao <[email protected]>
  9. Signed-off-by: Herbert Xu <[email protected]>
  10. Signed-off-by: Horia Geantă <[email protected]>
  11. Signed-off-by: Horia Geantă [email protected]
  12. Signed-off-by: Radu Alexe <[email protected]>
  13. Signed-off-by: Tudor Ambarus <[email protected]>
  14. Signed-off-by: Yangbo Lu <[email protected]>
  15. Signed-off-by: Zhao Qiang <[email protected]>
  16. Signed-off-by: Biwen Li <[email protected]>
  17. ---
  18. crypto/Kconfig | 20 +
  19. crypto/Makefile | 1 +
  20. crypto/tcrypt.c | 27 +-
  21. crypto/testmgr.c | 244 ++
  22. crypto/testmgr.h | 219 ++
  23. crypto/tls.c | 607 +++
  24. drivers/crypto/Makefile | 2 +-
  25. drivers/crypto/caam/Kconfig | 57 +-
  26. drivers/crypto/caam/Makefile | 10 +-
  27. drivers/crypto/caam/caamalg.c | 131 +-
  28. drivers/crypto/caam/caamalg_desc.c | 761 +++-
  29. drivers/crypto/caam/caamalg_desc.h | 47 +-
  30. drivers/crypto/caam/caamalg_qi.c | 927 ++++-
  31. drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
  32. drivers/crypto/caam/caamalg_qi2.h | 274 ++
  33. drivers/crypto/caam/caamhash.c | 132 +-
  34. drivers/crypto/caam/caamhash_desc.c | 108 +
  35. drivers/crypto/caam/caamhash_desc.h | 49 +
  36. drivers/crypto/caam/compat.h | 2 +
  37. drivers/crypto/caam/ctrl.c | 23 +-
  38. drivers/crypto/caam/desc.h | 62 +-
  39. drivers/crypto/caam/desc_constr.h | 52 +-
  40. drivers/crypto/caam/dpseci.c | 865 ++++
  41. drivers/crypto/caam/dpseci.h | 433 ++
  42. drivers/crypto/caam/dpseci_cmd.h | 287 ++
  43. drivers/crypto/caam/error.c | 75 +-
  44. drivers/crypto/caam/error.h | 6 +-
  45. drivers/crypto/caam/intern.h | 1 +
  46. drivers/crypto/caam/jr.c | 42 +
  47. drivers/crypto/caam/jr.h | 2 +
  48. drivers/crypto/caam/key_gen.c | 30 -
  49. drivers/crypto/caam/key_gen.h | 30 +
  50. drivers/crypto/caam/qi.c | 85 +-
  51. drivers/crypto/caam/qi.h | 2 +-
  52. drivers/crypto/caam/regs.h | 2 +
  53. drivers/crypto/caam/sg_sw_qm.h | 46 +-
  54. drivers/crypto/talitos.c | 8 +
  55. 37 files changed, 11006 insertions(+), 354 deletions(-)
  56. create mode 100644 crypto/tls.c
  57. create mode 100644 drivers/crypto/caam/caamalg_qi2.c
  58. create mode 100644 drivers/crypto/caam/caamalg_qi2.h
  59. create mode 100644 drivers/crypto/caam/caamhash_desc.c
  60. create mode 100644 drivers/crypto/caam/caamhash_desc.h
  61. create mode 100644 drivers/crypto/caam/dpseci.c
  62. create mode 100644 drivers/crypto/caam/dpseci.h
  63. create mode 100644 drivers/crypto/caam/dpseci_cmd.h
  64. --- a/crypto/Kconfig
  65. +++ b/crypto/Kconfig
  66. @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
  67. a sequence number xored with a salt. This is the default
  68. algorithm for CBC.
  69. +config CRYPTO_TLS
  70. + tristate "TLS support"
  71. + select CRYPTO_AEAD
  72. + select CRYPTO_BLKCIPHER
  73. + select CRYPTO_MANAGER
  74. + select CRYPTO_HASH
  75. + select CRYPTO_NULL
  76. + select CRYPTO_AUTHENC
  77. + help
  78. + Support for TLS 1.0 record encryption and decryption
  79. +
  80. + This module adds support for encryption/decryption of TLS 1.0 frames
  81. + using blockcipher algorithms. The name of the resulting algorithm is
  82. + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
  83. + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
  84. + accelerated versions will be used automatically if available.
  85. +
  86. + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
  87. + operations through AF_ALG or cryptodev interfaces
  88. +
  89. comment "Block modes"
  90. config CRYPTO_CBC
  91. --- a/crypto/Makefile
  92. +++ b/crypto/Makefile
  93. @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
  94. obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
  95. obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
  96. obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
  97. +obj-$(CONFIG_CRYPTO_TLS) += tls.o
  98. obj-$(CONFIG_CRYPTO_LZO) += lzo.o
  99. obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
  100. obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
  101. --- a/crypto/tcrypt.c
  102. +++ b/crypto/tcrypt.c
  103. @@ -76,7 +76,7 @@ static char *check[] = {
  104. "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
  105. "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
  106. "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
  107. - NULL
  108. + "rsa", NULL
  109. };
  110. struct tcrypt_result {
  111. @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
  112. iv);
  113. aead_request_set_ad(req, aad_size);
  114. - if (secs)
  115. + if (secs) {
  116. ret = test_aead_jiffies(req, enc, *b_size,
  117. secs);
  118. - else
  119. + cond_resched();
  120. + } else {
  121. ret = test_aead_cycles(req, enc, *b_size);
  122. + }
  123. if (ret) {
  124. pr_err("%s() failed return code=%d\n", e, ret);
  125. @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
  126. ahash_request_set_crypt(req, sg, output, speed[i].plen);
  127. - if (secs)
  128. + if (secs) {
  129. ret = test_ahash_jiffies(req, speed[i].blen,
  130. speed[i].plen, output, secs);
  131. - else
  132. + cond_resched();
  133. + } else {
  134. ret = test_ahash_cycles(req, speed[i].blen,
  135. speed[i].plen, output);
  136. + }
  137. if (ret) {
  138. pr_err("hashing failed ret=%d\n", ret);
  139. @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
  140. skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
  141. - if (secs)
  142. + if (secs) {
  143. ret = test_acipher_jiffies(req, enc,
  144. *b_size, secs);
  145. - else
  146. + cond_resched();
  147. + } else {
  148. ret = test_acipher_cycles(req, enc,
  149. *b_size);
  150. + }
  151. if (ret) {
  152. pr_err("%s() failed flags=%x\n", e,
  153. @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
  154. ret += tcrypt_test("hmac(sha3-512)");
  155. break;
  156. + case 115:
  157. + ret += tcrypt_test("rsa");
  158. + break;
  159. +
  160. case 150:
  161. ret += tcrypt_test("ansi_cprng");
  162. break;
  163. @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
  164. case 190:
  165. ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
  166. break;
  167. + case 191:
  168. + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
  169. + break;
  170. case 200:
  171. test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
  172. speed_template_16_24_32);
  173. --- a/crypto/testmgr.c
  174. +++ b/crypto/testmgr.c
  175. @@ -117,6 +117,13 @@ struct drbg_test_suite {
  176. unsigned int count;
  177. };
  178. +struct tls_test_suite {
  179. + struct {
  180. + struct tls_testvec *vecs;
  181. + unsigned int count;
  182. + } enc, dec;
  183. +};
  184. +
  185. struct akcipher_test_suite {
  186. const struct akcipher_testvec *vecs;
  187. unsigned int count;
  188. @@ -140,6 +147,7 @@ struct alg_test_desc {
  189. struct hash_test_suite hash;
  190. struct cprng_test_suite cprng;
  191. struct drbg_test_suite drbg;
  192. + struct tls_test_suite tls;
  193. struct akcipher_test_suite akcipher;
  194. struct kpp_test_suite kpp;
  195. } suite;
  196. @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
  197. return 0;
  198. }
  199. +static int __test_tls(struct crypto_aead *tfm, int enc,
  200. + struct tls_testvec *template, unsigned int tcount,
  201. + const bool diff_dst)
  202. +{
  203. + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
  204. + unsigned int i, k, authsize;
  205. + char *q;
  206. + struct aead_request *req;
  207. + struct scatterlist *sg;
  208. + struct scatterlist *sgout;
  209. + const char *e, *d;
  210. + struct tcrypt_result result;
  211. + void *input;
  212. + void *output;
  213. + void *assoc;
  214. + char *iv;
  215. + char *key;
  216. + char *xbuf[XBUFSIZE];
  217. + char *xoutbuf[XBUFSIZE];
  218. + char *axbuf[XBUFSIZE];
  219. + int ret = -ENOMEM;
  220. +
  221. + if (testmgr_alloc_buf(xbuf))
  222. + goto out_noxbuf;
  223. +
  224. + if (diff_dst && testmgr_alloc_buf(xoutbuf))
  225. + goto out_nooutbuf;
  226. +
  227. + if (testmgr_alloc_buf(axbuf))
  228. + goto out_noaxbuf;
  229. +
  230. + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
  231. + if (!iv)
  232. + goto out_noiv;
  233. +
  234. + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
  235. + if (!key)
  236. + goto out_nokey;
  237. +
  238. + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
  239. + if (!sg)
  240. + goto out_nosg;
  241. +
  242. + sgout = sg + 8;
  243. +
  244. + d = diff_dst ? "-ddst" : "";
  245. + e = enc ? "encryption" : "decryption";
  246. +
  247. + init_completion(&result.completion);
  248. +
  249. + req = aead_request_alloc(tfm, GFP_KERNEL);
  250. + if (!req) {
  251. + pr_err("alg: tls%s: Failed to allocate request for %s\n",
  252. + d, algo);
  253. + goto out;
  254. + }
  255. +
  256. + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  257. + tcrypt_complete, &result);
  258. +
  259. + for (i = 0; i < tcount; i++) {
  260. + input = xbuf[0];
  261. + assoc = axbuf[0];
  262. +
  263. + ret = -EINVAL;
  264. + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
  265. + template[i].alen > PAGE_SIZE))
  266. + goto out;
  267. +
  268. + memcpy(assoc, template[i].assoc, template[i].alen);
  269. + memcpy(input, template[i].input, template[i].ilen);
  270. +
  271. + if (template[i].iv)
  272. + memcpy(iv, template[i].iv, MAX_IVLEN);
  273. + else
  274. + memset(iv, 0, MAX_IVLEN);
  275. +
  276. + crypto_aead_clear_flags(tfm, ~0);
  277. +
  278. + if (template[i].klen > MAX_KEYLEN) {
  279. + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
  280. + d, i, algo, template[i].klen, MAX_KEYLEN);
  281. + ret = -EINVAL;
  282. + goto out;
  283. + }
  284. + memcpy(key, template[i].key, template[i].klen);
  285. +
  286. + ret = crypto_aead_setkey(tfm, key, template[i].klen);
  287. + if (!ret == template[i].fail) {
  288. + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
  289. + d, i, algo, crypto_aead_get_flags(tfm));
  290. + goto out;
  291. + } else if (ret)
  292. + continue;
  293. +
  294. + authsize = 20;
  295. + ret = crypto_aead_setauthsize(tfm, authsize);
  296. + if (ret) {
  297. + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
  298. + d, authsize, i, algo);
  299. + goto out;
  300. + }
  301. +
  302. + k = !!template[i].alen;
  303. + sg_init_table(sg, k + 1);
  304. + sg_set_buf(&sg[0], assoc, template[i].alen);
  305. + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
  306. + template[i].ilen));
  307. + output = input;
  308. +
  309. + if (diff_dst) {
  310. + sg_init_table(sgout, k + 1);
  311. + sg_set_buf(&sgout[0], assoc, template[i].alen);
  312. +
  313. + output = xoutbuf[0];
  314. + sg_set_buf(&sgout[k], output,
  315. + (enc ? template[i].rlen : template[i].ilen));
  316. + }
  317. +
  318. + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
  319. + template[i].ilen, iv);
  320. +
  321. + aead_request_set_ad(req, template[i].alen);
  322. +
  323. + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
  324. +
  325. + switch (ret) {
  326. + case 0:
  327. + if (template[i].novrfy) {
  328. + /* verification was supposed to fail */
  329. + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
  330. + d, e, i, algo);
  331. + /* so really, we got a bad message */
  332. + ret = -EBADMSG;
  333. + goto out;
  334. + }
  335. + break;
  336. + case -EINPROGRESS:
  337. + case -EBUSY:
  338. + wait_for_completion(&result.completion);
  339. + reinit_completion(&result.completion);
  340. + ret = result.err;
  341. + if (!ret)
  342. + break;
  343. + case -EBADMSG:
  344. + /* verification failure was expected */
  345. + if (template[i].novrfy)
  346. + continue;
  347. + /* fall through */
  348. + default:
  349. + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
  350. + d, e, i, algo, -ret);
  351. + goto out;
  352. + }
  353. +
  354. + q = output;
  355. + if (memcmp(q, template[i].result, template[i].rlen)) {
  356. + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
  357. + d, i, e, algo);
  358. + hexdump(q, template[i].rlen);
  359. + pr_err("should be:\n");
  360. + hexdump(template[i].result, template[i].rlen);
  361. + ret = -EINVAL;
  362. + goto out;
  363. + }
  364. + }
  365. +
  366. +out:
  367. + aead_request_free(req);
  368. +
  369. + kfree(sg);
  370. +out_nosg:
  371. + kfree(key);
  372. +out_nokey:
  373. + kfree(iv);
  374. +out_noiv:
  375. + testmgr_free_buf(axbuf);
  376. +out_noaxbuf:
  377. + if (diff_dst)
  378. + testmgr_free_buf(xoutbuf);
  379. +out_nooutbuf:
  380. + testmgr_free_buf(xbuf);
  381. +out_noxbuf:
  382. + return ret;
  383. +}
  384. +
  385. +static int test_tls(struct crypto_aead *tfm, int enc,
  386. + struct tls_testvec *template, unsigned int tcount)
  387. +{
  388. + int ret;
  389. + /* test 'dst == src' case */
  390. + ret = __test_tls(tfm, enc, template, tcount, false);
  391. + if (ret)
  392. + return ret;
  393. + /* test 'dst != src' case */
  394. + return __test_tls(tfm, enc, template, tcount, true);
  395. +}
  396. +
  397. +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
  398. + u32 type, u32 mask)
  399. +{
  400. + struct crypto_aead *tfm;
  401. + int err = 0;
  402. +
  403. + tfm = crypto_alloc_aead(driver, type, mask);
  404. + if (IS_ERR(tfm)) {
  405. + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
  406. + driver, PTR_ERR(tfm));
  407. + return PTR_ERR(tfm);
  408. + }
  409. +
  410. + if (desc->suite.tls.enc.vecs) {
  411. + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
  412. + desc->suite.tls.enc.count);
  413. + if (err)
  414. + goto out;
  415. + }
  416. +
  417. + if (!err && desc->suite.tls.dec.vecs)
  418. + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
  419. + desc->suite.tls.dec.count);
  420. +
  421. +out:
  422. + crypto_free_aead(tfm);
  423. + return err;
  424. +}
  425. +
  426. static int test_cipher(struct crypto_cipher *tfm, int enc,
  427. const struct cipher_testvec *template,
  428. unsigned int tcount)
  429. @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
  430. .hash = __VECS(tgr192_tv_template)
  431. }
  432. }, {
  433. + .alg = "tls10(hmac(sha1),cbc(aes))",
  434. + .test = alg_test_tls,
  435. + .suite = {
  436. + .tls = {
  437. + .enc = __VECS(tls_enc_tv_template),
  438. + .dec = __VECS(tls_dec_tv_template)
  439. + }
  440. + }
  441. + }, {
  442. .alg = "vmac(aes)",
  443. .test = alg_test_hash,
  444. .suite = {
  445. --- a/crypto/testmgr.h
  446. +++ b/crypto/testmgr.h
  447. @@ -125,6 +125,20 @@ struct drbg_testvec {
  448. size_t expectedlen;
  449. };
  450. +struct tls_testvec {
  451. + char *key; /* wrapped keys for encryption and authentication */
  452. + char *iv; /* initialization vector */
  453. + char *input; /* input data */
  454. + char *assoc; /* associated data: seq num, type, version, input len */
  455. + char *result; /* result data */
  456. + unsigned char fail; /* the test failure is expected */
  457. + unsigned char novrfy; /* dec verification failure expected */
  458. + unsigned char klen; /* key length */
  459. + unsigned short ilen; /* input data length */
  460. + unsigned short alen; /* associated data length */
  461. + unsigned short rlen; /* result length */
  462. +};
  463. +
  464. struct akcipher_testvec {
  465. const unsigned char *key;
  466. const unsigned char *m;
  467. @@ -153,6 +167,211 @@ struct kpp_testvec {
  468. static const char zeroed_string[48];
  469. /*
  470. + * TLS1.0 synthetic test vectors
  471. + */
  472. +static struct tls_testvec tls_enc_tv_template[] = {
  473. + {
  474. +#ifdef __LITTLE_ENDIAN
  475. + .key = "\x08\x00" /* rta length */
  476. + "\x01\x00" /* rta type */
  477. +#else
  478. + .key = "\x00\x08" /* rta length */
  479. + "\x00\x01" /* rta type */
  480. +#endif
  481. + "\x00\x00\x00\x10" /* enc key length */
  482. + "authenticationkey20benckeyis16_bytes",
  483. + .klen = 8 + 20 + 16,
  484. + .iv = "iv0123456789abcd",
  485. + .input = "Single block msg",
  486. + .ilen = 16,
  487. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  488. + "\x00\x03\x01\x00\x10",
  489. + .alen = 13,
  490. + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
  491. + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
  492. + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
  493. + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
  494. + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
  495. + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
  496. + .rlen = 16 + 20 + 12,
  497. + }, {
  498. +#ifdef __LITTLE_ENDIAN
  499. + .key = "\x08\x00" /* rta length */
  500. + "\x01\x00" /* rta type */
  501. +#else
  502. + .key = "\x00\x08" /* rta length */
  503. + "\x00\x01" /* rta type */
  504. +#endif
  505. + "\x00\x00\x00\x10" /* enc key length */
  506. + "authenticationkey20benckeyis16_bytes",
  507. + .klen = 8 + 20 + 16,
  508. + .iv = "iv0123456789abcd",
  509. + .input = "",
  510. + .ilen = 0,
  511. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  512. + "\x00\x03\x01\x00\x00",
  513. + .alen = 13,
  514. + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
  515. + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
  516. + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
  517. + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
  518. + .rlen = 20 + 12,
  519. + }, {
  520. +#ifdef __LITTLE_ENDIAN
  521. + .key = "\x08\x00" /* rta length */
  522. + "\x01\x00" /* rta type */
  523. +#else
  524. + .key = "\x00\x08" /* rta length */
  525. + "\x00\x01" /* rta type */
  526. +#endif
  527. + "\x00\x00\x00\x10" /* enc key length */
  528. + "authenticationkey20benckeyis16_bytes",
  529. + .klen = 8 + 20 + 16,
  530. + .iv = "iv0123456789abcd",
  531. + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
  532. + " plaintext285 bytes plaintext285 bytes plaintext285"
  533. + " bytes plaintext285 bytes plaintext285 bytes"
  534. + " plaintext285 bytes plaintext285 bytes plaintext285"
  535. + " bytes plaintext285 bytes plaintext285 bytes"
  536. + " plaintext285 bytes plaintext285 bytes plaintext285"
  537. + " bytes plaintext285 bytes plaintext",
  538. + .ilen = 285,
  539. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  540. + "\x00\x03\x01\x01\x1d",
  541. + .alen = 13,
  542. + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
  543. + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
  544. + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
  545. + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
  546. + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
  547. + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
  548. + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
  549. + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
  550. + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
  551. + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
  552. + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
  553. + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
  554. + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
  555. + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
  556. + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
  557. + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
  558. + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
  559. + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
  560. + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
  561. + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
  562. + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
  563. + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
  564. + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
  565. + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
  566. + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
  567. + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
  568. + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
  569. + .rlen = 285 + 20 + 15,
  570. + }
  571. +};
  572. +
  573. +static struct tls_testvec tls_dec_tv_template[] = {
  574. + {
  575. +#ifdef __LITTLE_ENDIAN
  576. + .key = "\x08\x00" /* rta length */
  577. + "\x01\x00" /* rta type */
  578. +#else
  579. + .key = "\x00\x08" /* rta length */
  580. + "\x00\x01" /* rta type */
  581. +#endif
  582. + "\x00\x00\x00\x10" /* enc key length */
  583. + "authenticationkey20benckeyis16_bytes",
  584. + .klen = 8 + 20 + 16,
  585. + .iv = "iv0123456789abcd",
  586. + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
  587. + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
  588. + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
  589. + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
  590. + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
  591. + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
  592. + .ilen = 16 + 20 + 12,
  593. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  594. + "\x00\x03\x01\x00\x30",
  595. + .alen = 13,
  596. + .result = "Single block msg",
  597. + .rlen = 16,
  598. + }, {
  599. +#ifdef __LITTLE_ENDIAN
  600. + .key = "\x08\x00" /* rta length */
  601. + "\x01\x00" /* rta type */
  602. +#else
  603. + .key = "\x00\x08" /* rta length */
  604. + "\x00\x01" /* rta type */
  605. +#endif
  606. + "\x00\x00\x00\x10" /* enc key length */
  607. + "authenticationkey20benckeyis16_bytes",
  608. + .klen = 8 + 20 + 16,
  609. + .iv = "iv0123456789abcd",
  610. + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
  611. + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
  612. + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
  613. + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
  614. + .ilen = 20 + 12,
  615. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  616. + "\x00\x03\x01\x00\x20",
  617. + .alen = 13,
  618. + .result = "",
  619. + .rlen = 0,
  620. + }, {
  621. +#ifdef __LITTLE_ENDIAN
  622. + .key = "\x08\x00" /* rta length */
  623. + "\x01\x00" /* rta type */
  624. +#else
  625. + .key = "\x00\x08" /* rta length */
  626. + "\x00\x01" /* rta type */
  627. +#endif
  628. + "\x00\x00\x00\x10" /* enc key length */
  629. + "authenticationkey20benckeyis16_bytes",
  630. + .klen = 8 + 20 + 16,
  631. + .iv = "iv0123456789abcd",
  632. + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
  633. + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
  634. + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
  635. + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
  636. + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
  637. + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
  638. + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
  639. + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
  640. + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
  641. + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
  642. + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
  643. + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
  644. + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
  645. + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
  646. + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
  647. + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
  648. + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
  649. + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
  650. + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
  651. + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
  652. + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
  653. + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
  654. + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
  655. + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
  656. + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
  657. + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
  658. + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
  659. +
  660. + .ilen = 285 + 20 + 15,
  661. + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
  662. + "\x00\x03\x01\x01\x40",
  663. + .alen = 13,
  664. + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
  665. + " plaintext285 bytes plaintext285 bytes plaintext285"
  666. + " bytes plaintext285 bytes plaintext285 bytes"
  667. + " plaintext285 bytes plaintext285 bytes plaintext285"
  668. + " bytes plaintext285 bytes plaintext285 bytes"
  669. + " plaintext285 bytes plaintext285 bytes plaintext",
  670. + .rlen = 285,
  671. + }
  672. +};
  673. +
  674. +/*
  675. * RSA test vectors. Borrowed from openSSL.
  676. */
  677. static const struct akcipher_testvec rsa_tv_template[] = {
  678. --- /dev/null
  679. +++ b/crypto/tls.c
  680. @@ -0,0 +1,607 @@
  681. +/*
  682. + * Copyright 2013 Freescale Semiconductor, Inc.
  683. + * Copyright 2017 NXP Semiconductor, Inc.
  684. + *
  685. + * This program is free software; you can redistribute it and/or modify it
  686. + * under the terms of the GNU General Public License as published by the Free
  687. + * Software Foundation; either version 2 of the License, or (at your option)
  688. + * any later version.
  689. + *
  690. + */
  691. +
  692. +#include <crypto/internal/aead.h>
  693. +#include <crypto/internal/hash.h>
  694. +#include <crypto/internal/skcipher.h>
  695. +#include <crypto/authenc.h>
  696. +#include <crypto/null.h>
  697. +#include <crypto/scatterwalk.h>
  698. +#include <linux/err.h>
  699. +#include <linux/init.h>
  700. +#include <linux/module.h>
  701. +#include <linux/rtnetlink.h>
  702. +
  703. +struct tls_instance_ctx {
  704. + struct crypto_ahash_spawn auth;
  705. + struct crypto_skcipher_spawn enc;
  706. +};
  707. +
  708. +struct crypto_tls_ctx {
  709. + unsigned int reqoff;
  710. + struct crypto_ahash *auth;
  711. + struct crypto_skcipher *enc;
  712. + struct crypto_skcipher *null;
  713. +};
  714. +
  715. +struct tls_request_ctx {
  716. + /*
  717. + * cryptlen holds the payload length in the case of encryption or
  718. + * payload_len + icv_len + padding_len in case of decryption
  719. + */
  720. + unsigned int cryptlen;
  721. + /* working space for partial results */
  722. + struct scatterlist tmp[2];
  723. + struct scatterlist cipher[2];
  724. + struct scatterlist dst[2];
  725. + char tail[];
  726. +};
  727. +
  728. +struct async_op {
  729. + struct completion completion;
  730. + int err;
  731. +};
  732. +
  733. +static void tls_async_op_done(struct crypto_async_request *req, int err)
  734. +{
  735. + struct async_op *areq = req->data;
  736. +
  737. + if (err == -EINPROGRESS)
  738. + return;
  739. +
  740. + areq->err = err;
  741. + complete(&areq->completion);
  742. +}
  743. +
  744. +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
  745. + unsigned int keylen)
  746. +{
  747. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
  748. + struct crypto_ahash *auth = ctx->auth;
  749. + struct crypto_skcipher *enc = ctx->enc;
  750. + struct crypto_authenc_keys keys;
  751. + int err = -EINVAL;
  752. +
  753. + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  754. + goto badkey;
  755. +
  756. + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
  757. + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
  758. + CRYPTO_TFM_REQ_MASK);
  759. + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
  760. + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
  761. + CRYPTO_TFM_RES_MASK);
  762. +
  763. + if (err)
  764. + goto out;
  765. +
  766. + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
  767. + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
  768. + CRYPTO_TFM_REQ_MASK);
  769. + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
  770. + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
  771. + CRYPTO_TFM_RES_MASK);
  772. +
  773. +out:
  774. + return err;
  775. +
  776. +badkey:
  777. + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
  778. + goto out;
  779. +}
  780. +
  781. +/**
  782. + * crypto_tls_genicv - Calculate hmac digest for a TLS record
  783. + * @hash: (output) buffer to save the digest into
  784. + * @src: (input) scatterlist with the assoc and payload data
  785. + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
  786. + * @req: (input) aead request
  787. + **/
  788. +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
  789. + unsigned int srclen, struct aead_request *req)
  790. +{
  791. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  792. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
  793. + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
  794. + struct async_op ahash_op;
  795. + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
  796. + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  797. + int err = -EBADMSG;
  798. +
  799. + /* Bail out if the request assoc len is 0 */
  800. + if (!req->assoclen)
  801. + return err;
  802. +
  803. + init_completion(&ahash_op.completion);
  804. +
  805. + /* the hash transform to be executed comes from the original request */
  806. + ahash_request_set_tfm(ahreq, ctx->auth);
  807. + /* prepare the hash request with input data and result pointer */
  808. + ahash_request_set_crypt(ahreq, src, hash, srclen);
  809. + /* set the notifier for when the async hash function returns */
  810. + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
  811. + tls_async_op_done, &ahash_op);
  812. +
  813. + /* Calculate the digest on the given data. The result is put in hash */
  814. + err = crypto_ahash_digest(ahreq);
  815. + if (err == -EINPROGRESS) {
  816. + err = wait_for_completion_interruptible(&ahash_op.completion);
  817. + if (!err)
  818. + err = ahash_op.err;
  819. + }
  820. +
  821. + return err;
  822. +}
  823. +
  824. +/**
  825. + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
  826. + * @hash: (output) buffer to save the digest and padding into
  827. + * @phashlen: (output) the size of digest + padding
  828. + * @req: (input) aead request
  829. + **/
  830. +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
  831. + struct aead_request *req)
  832. +{
  833. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  834. + unsigned int hash_size = crypto_aead_authsize(tls);
  835. + unsigned int block_size = crypto_aead_blocksize(tls);
  836. + unsigned int srclen = req->cryptlen + hash_size;
  837. + unsigned int icvlen = req->cryptlen + req->assoclen;
  838. + unsigned int padlen;
  839. + int err;
  840. +
  841. + err = crypto_tls_genicv(hash, req->src, icvlen, req);
  842. + if (err)
  843. + goto out;
  844. +
  845. + /* add padding after digest */
  846. + padlen = block_size - (srclen % block_size);
  847. + memset(hash + hash_size, padlen - 1, padlen);
  848. +
  849. + *phashlen = hash_size + padlen;
  850. +out:
  851. + return err;
  852. +}
  853. +
  854. +static int crypto_tls_copy_data(struct aead_request *req,
  855. + struct scatterlist *src,
  856. + struct scatterlist *dst,
  857. + unsigned int len)
  858. +{
  859. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  860. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
  861. + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
  862. +
  863. + skcipher_request_set_tfm(skreq, ctx->null);
  864. + skcipher_request_set_callback(skreq, aead_request_flags(req),
  865. + NULL, NULL);
  866. + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
  867. +
  868. + return crypto_skcipher_encrypt(skreq);
  869. +}
  870. +
  871. +static int crypto_tls_encrypt(struct aead_request *req)
  872. +{
  873. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  874. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
  875. + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
  876. + struct skcipher_request *skreq;
  877. + struct scatterlist *cipher = treq_ctx->cipher;
  878. + struct scatterlist *tmp = treq_ctx->tmp;
  879. + struct scatterlist *sg, *src, *dst;
  880. + unsigned int cryptlen, phashlen;
  881. + u8 *hash = treq_ctx->tail;
  882. + int err;
  883. +
  884. + /*
  885. + * The hash result is saved at the beginning of the tls request ctx
  886. + * and is aligned as required by the hash transform. Enough space was
  887. + * allocated in crypto_tls_init_tfm to accommodate the difference. The
  888. + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
  889. + * the result is not overwritten by the second (cipher) request.
  890. + */
  891. + hash = (u8 *)ALIGN((unsigned long)hash +
  892. + crypto_ahash_alignmask(ctx->auth),
  893. + crypto_ahash_alignmask(ctx->auth) + 1);
  894. +
  895. + /*
  896. + * STEP 1: create ICV together with necessary padding
  897. + */
  898. + err = crypto_tls_gen_padicv(hash, &phashlen, req);
  899. + if (err)
  900. + return err;
  901. +
  902. + /*
  903. + * STEP 2: Hash and padding are combined with the payload
  904. + * depending on the form it arrives. Scatter tables must have at least
  905. + * one page of data before chaining with another table and can't have
  906. + * an empty data page. The following code addresses these requirements.
  907. + *
  908. + * If the payload is empty, only the hash is encrypted, otherwise the
  909. + * payload scatterlist is merged with the hash. A special merging case
  910. + * is when the payload has only one page of data. In that case the
  911. + * payload page is moved to another scatterlist and prepared there for
  912. + * encryption.
  913. + */
  914. + if (req->cryptlen) {
  915. + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
  916. +
  917. + sg_init_table(cipher, 2);
  918. + sg_set_buf(cipher + 1, hash, phashlen);
  919. +
  920. + if (sg_is_last(src)) {
  921. + sg_set_page(cipher, sg_page(src), req->cryptlen,
  922. + src->offset);
  923. + src = cipher;
  924. + } else {
  925. + unsigned int rem_len = req->cryptlen;
  926. +
  927. + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
  928. + rem_len -= min(rem_len, sg->length);
  929. +
  930. + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
  931. + sg_chain(sg, 1, cipher);
  932. + }
  933. + } else {
  934. + sg_init_one(cipher, hash, phashlen);
  935. + src = cipher;
  936. + }
  937. +
  938. + /**
  939. + * If src != dst copy the associated data from source to destination.
  940. + * In both cases fast-forward passed the associated data in the dest.
  941. + */
  942. + if (req->src != req->dst) {
  943. + err = crypto_tls_copy_data(req, req->src, req->dst,
  944. + req->assoclen);
  945. + if (err)
  946. + return err;
  947. + }
  948. + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
  949. +
  950. + /*
  951. + * STEP 3: encrypt the frame and return the result
  952. + */
  953. + cryptlen = req->cryptlen + phashlen;
  954. +
  955. + /*
  956. + * The hash and the cipher are applied at different times and their
  957. + * requests can use the same memory space without interference
  958. + */
  959. + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
  960. + skcipher_request_set_tfm(skreq, ctx->enc);
  961. + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
  962. + skcipher_request_set_callback(skreq, aead_request_flags(req),
  963. + req->base.complete, req->base.data);
  964. + /*
  965. + * Apply the cipher transform. The result will be in req->dst when the
  966. + * asynchronuous call terminates
  967. + */
  968. + err = crypto_skcipher_encrypt(skreq);
  969. +
  970. + return err;
  971. +}
  972. +
  973. +static int crypto_tls_decrypt(struct aead_request *req)
  974. +{
  975. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  976. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
  977. + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
  978. + unsigned int cryptlen = req->cryptlen;
  979. + unsigned int hash_size = crypto_aead_authsize(tls);
  980. + unsigned int block_size = crypto_aead_blocksize(tls);
  981. + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
  982. + struct scatterlist *tmp = treq_ctx->tmp;
  983. + struct scatterlist *src, *dst;
  984. +
  985. + u8 padding[255]; /* padding can be 0-255 bytes */
  986. + u8 pad_size;
  987. + u16 *len_field;
  988. + u8 *ihash, *hash = treq_ctx->tail;
  989. +
  990. + int paderr = 0;
  991. + int err = -EINVAL;
  992. + int i;
  993. + struct async_op ciph_op;
  994. +
  995. + /*
  996. + * Rule out bad packets. The input packet length must be at least one
  997. + * byte more than the hash_size
  998. + */
  999. + if (cryptlen <= hash_size || cryptlen % block_size)
  1000. + goto out;
  1001. +
  1002. + /*
  1003. + * Step 1 - Decrypt the source. Fast-forward past the associated data
  1004. + * to the encrypted data. The result will be overwritten in place so
  1005. + * that the decrypted data will be adjacent to the associated data. The
  1006. + * last step (computing the hash) will have it's input data already
  1007. + * prepared and ready to be accessed at req->src.
  1008. + */
  1009. + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
  1010. + dst = src;
  1011. +
  1012. + init_completion(&ciph_op.completion);
  1013. + skcipher_request_set_tfm(skreq, ctx->enc);
  1014. + skcipher_request_set_callback(skreq, aead_request_flags(req),
  1015. + tls_async_op_done, &ciph_op);
  1016. + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
  1017. + err = crypto_skcipher_decrypt(skreq);
  1018. + if (err == -EINPROGRESS) {
  1019. + err = wait_for_completion_interruptible(&ciph_op.completion);
  1020. + if (!err)
  1021. + err = ciph_op.err;
  1022. + }
  1023. + if (err)
  1024. + goto out;
  1025. +
  1026. + /*
  1027. + * Step 2 - Verify padding
  1028. + * Retrieve the last byte of the payload; this is the padding size.
  1029. + */
  1030. + cryptlen -= 1;
  1031. + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
  1032. +
  1033. + /* RFC recommendation for invalid padding size. */
  1034. + if (cryptlen < pad_size + hash_size) {
  1035. + pad_size = 0;
  1036. + paderr = -EBADMSG;
  1037. + }
  1038. + cryptlen -= pad_size;
  1039. + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
  1040. +
  1041. + /* Padding content must be equal with pad_size. We verify it all */
  1042. + for (i = 0; i < pad_size; i++)
  1043. + if (padding[i] != pad_size)
  1044. + paderr = -EBADMSG;
  1045. +
  1046. + /*
  1047. + * Step 3 - Verify hash
  1048. + * Align the digest result as required by the hash transform. Enough
  1049. + * space was allocated in crypto_tls_init_tfm
  1050. + */
  1051. + hash = (u8 *)ALIGN((unsigned long)hash +
  1052. + crypto_ahash_alignmask(ctx->auth),
  1053. + crypto_ahash_alignmask(ctx->auth) + 1);
  1054. + /*
  1055. + * Two bytes at the end of the associated data make the length field.
  1056. + * It must be updated with the length of the cleartext message before
  1057. + * the hash is calculated.
  1058. + */
  1059. + len_field = sg_virt(req->src) + req->assoclen - 2;
  1060. + cryptlen -= hash_size;
  1061. + *len_field = htons(cryptlen);
  1062. +
  1063. + /* This is the hash from the decrypted packet. Save it for later */
  1064. + ihash = hash + hash_size;
  1065. + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
  1066. +
  1067. + /* Now compute and compare our ICV with the one from the packet */
  1068. + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
  1069. + if (!err)
  1070. + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
  1071. +
  1072. + if (req->src != req->dst) {
  1073. + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
  1074. + req->assoclen);
  1075. + if (err)
  1076. + goto out;
  1077. + }
  1078. +
  1079. + /* return the first found error */
  1080. + if (paderr)
  1081. + err = paderr;
  1082. +
  1083. +out:
  1084. + aead_request_complete(req, err);
  1085. + return err;
  1086. +}
  1087. +
  1088. +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
  1089. +{
  1090. + struct aead_instance *inst = aead_alg_instance(tfm);
  1091. + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
  1092. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
  1093. + struct crypto_ahash *auth;
  1094. + struct crypto_skcipher *enc;
  1095. + struct crypto_skcipher *null;
  1096. + int err;
  1097. +
  1098. + auth = crypto_spawn_ahash(&ictx->auth);
  1099. + if (IS_ERR(auth))
  1100. + return PTR_ERR(auth);
  1101. +
  1102. + enc = crypto_spawn_skcipher(&ictx->enc);
  1103. + err = PTR_ERR(enc);
  1104. + if (IS_ERR(enc))
  1105. + goto err_free_ahash;
  1106. +
  1107. + null = crypto_get_default_null_skcipher2();
  1108. + err = PTR_ERR(null);
  1109. + if (IS_ERR(null))
  1110. + goto err_free_skcipher;
  1111. +
  1112. + ctx->auth = auth;
  1113. + ctx->enc = enc;
  1114. + ctx->null = null;
  1115. +
  1116. + /*
  1117. + * Allow enough space for two digests. The two digests will be compared
  1118. + * during the decryption phase. One will come from the decrypted packet
  1119. + * and the other will be calculated. For encryption, one digest is
  1120. + * padded (up to a cipher blocksize) and chained with the payload
  1121. + */
  1122. + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
  1123. + crypto_ahash_alignmask(auth),
  1124. + crypto_ahash_alignmask(auth) + 1) +
  1125. + max(crypto_ahash_digestsize(auth),
  1126. + crypto_skcipher_blocksize(enc));
  1127. +
  1128. + crypto_aead_set_reqsize(tfm,
  1129. + sizeof(struct tls_request_ctx) +
  1130. + ctx->reqoff +
  1131. + max_t(unsigned int,
  1132. + crypto_ahash_reqsize(auth) +
  1133. + sizeof(struct ahash_request),
  1134. + crypto_skcipher_reqsize(enc) +
  1135. + sizeof(struct skcipher_request)));
  1136. +
  1137. + return 0;
  1138. +
  1139. +err_free_skcipher:
  1140. + crypto_free_skcipher(enc);
  1141. +err_free_ahash:
  1142. + crypto_free_ahash(auth);
  1143. + return err;
  1144. +}
  1145. +
  1146. +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
  1147. +{
  1148. + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
  1149. +
  1150. + crypto_free_ahash(ctx->auth);
  1151. + crypto_free_skcipher(ctx->enc);
  1152. + crypto_put_default_null_skcipher2();
  1153. +}
  1154. +
  1155. +static void crypto_tls_free(struct aead_instance *inst)
  1156. +{
  1157. + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
  1158. +
  1159. + crypto_drop_skcipher(&ctx->enc);
  1160. + crypto_drop_ahash(&ctx->auth);
  1161. + kfree(inst);
  1162. +}
  1163. +
  1164. +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
  1165. +{
  1166. + struct crypto_attr_type *algt;
  1167. + struct aead_instance *inst;
  1168. + struct hash_alg_common *auth;
  1169. + struct crypto_alg *auth_base;
  1170. + struct skcipher_alg *enc;
  1171. + struct tls_instance_ctx *ctx;
  1172. + const char *enc_name;
  1173. + int err;
  1174. +
  1175. + algt = crypto_get_attr_type(tb);
  1176. + if (IS_ERR(algt))
  1177. + return PTR_ERR(algt);
  1178. +
  1179. + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
  1180. + return -EINVAL;
  1181. +
  1182. + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
  1183. + CRYPTO_ALG_TYPE_AHASH_MASK |
  1184. + crypto_requires_sync(algt->type, algt->mask));
  1185. + if (IS_ERR(auth))
  1186. + return PTR_ERR(auth);
  1187. +
  1188. + auth_base = &auth->base;
  1189. +
  1190. + enc_name = crypto_attr_alg_name(tb[2]);
  1191. + err = PTR_ERR(enc_name);
  1192. + if (IS_ERR(enc_name))
  1193. + goto out_put_auth;
  1194. +
  1195. + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  1196. + err = -ENOMEM;
  1197. + if (!inst)
  1198. + goto out_put_auth;
  1199. +
  1200. + ctx = aead_instance_ctx(inst);
  1201. +
  1202. + err = crypto_init_ahash_spawn(&ctx->auth, auth,
  1203. + aead_crypto_instance(inst));
  1204. + if (err)
  1205. + goto err_free_inst;
  1206. +
  1207. + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
  1208. + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
  1209. + crypto_requires_sync(algt->type,
  1210. + algt->mask));
  1211. + if (err)
  1212. + goto err_drop_auth;
  1213. +
  1214. + enc = crypto_spawn_skcipher_alg(&ctx->enc);
  1215. +
  1216. + err = -ENAMETOOLONG;
  1217. + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  1218. + "tls10(%s,%s)", auth_base->cra_name,
  1219. + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
  1220. + goto err_drop_enc;
  1221. +
  1222. + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1223. + "tls10(%s,%s)", auth_base->cra_driver_name,
  1224. + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  1225. + goto err_drop_enc;
  1226. +
  1227. + inst->alg.base.cra_flags = (auth_base->cra_flags |
  1228. + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
  1229. + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
  1230. + auth_base->cra_priority;
  1231. + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
  1232. + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
  1233. + enc->base.cra_alignmask;
  1234. + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
  1235. +
  1236. + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
  1237. + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
  1238. + inst->alg.maxauthsize = auth->digestsize;
  1239. +
  1240. + inst->alg.init = crypto_tls_init_tfm;
  1241. + inst->alg.exit = crypto_tls_exit_tfm;
  1242. +
  1243. + inst->alg.setkey = crypto_tls_setkey;
  1244. + inst->alg.encrypt = crypto_tls_encrypt;
  1245. + inst->alg.decrypt = crypto_tls_decrypt;
  1246. +
  1247. + inst->free = crypto_tls_free;
  1248. +
  1249. + err = aead_register_instance(tmpl, inst);
  1250. + if (err)
  1251. + goto err_drop_enc;
  1252. +
  1253. +out:
  1254. + crypto_mod_put(auth_base);
  1255. + return err;
  1256. +
  1257. +err_drop_enc:
  1258. + crypto_drop_skcipher(&ctx->enc);
  1259. +err_drop_auth:
  1260. + crypto_drop_ahash(&ctx->auth);
  1261. +err_free_inst:
  1262. + kfree(inst);
  1263. +out_put_auth:
  1264. + goto out;
  1265. +}
  1266. +
  1267. +static struct crypto_template crypto_tls_tmpl = {
  1268. + .name = "tls10",
  1269. + .create = crypto_tls_create,
  1270. + .module = THIS_MODULE,
  1271. +};
  1272. +
  1273. +static int __init crypto_tls_module_init(void)
  1274. +{
  1275. + return crypto_register_template(&crypto_tls_tmpl);
  1276. +}
  1277. +
  1278. +static void __exit crypto_tls_module_exit(void)
  1279. +{
  1280. + crypto_unregister_template(&crypto_tls_tmpl);
  1281. +}
  1282. +
  1283. +module_init(crypto_tls_module_init);
  1284. +module_exit(crypto_tls_module_exit);
  1285. +
  1286. +MODULE_LICENSE("GPL");
  1287. +MODULE_DESCRIPTION("TLS 1.0 record encryption");
  1288. --- a/drivers/crypto/Makefile
  1289. +++ b/drivers/crypto/Makefile
  1290. @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
  1291. obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
  1292. obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
  1293. obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
  1294. -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
  1295. +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
  1296. obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
  1297. obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
  1298. obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
  1299. --- a/drivers/crypto/caam/Kconfig
  1300. +++ b/drivers/crypto/caam/Kconfig
  1301. @@ -1,7 +1,11 @@
  1302. +config CRYPTO_DEV_FSL_CAAM_COMMON
  1303. + tristate
  1304. +
  1305. config CRYPTO_DEV_FSL_CAAM
  1306. - tristate "Freescale CAAM-Multicore driver backend"
  1307. + tristate "Freescale CAAM-Multicore platform driver backend"
  1308. depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
  1309. select SOC_BUS
  1310. + select CRYPTO_DEV_FSL_CAAM_COMMON
  1311. help
  1312. Enables the driver module for Freescale's Cryptographic Accelerator
  1313. and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
  1314. @@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
  1315. To compile this driver as a module, choose M here: the module
  1316. will be called caam.
  1317. +if CRYPTO_DEV_FSL_CAAM
  1318. +
  1319. +config CRYPTO_DEV_FSL_CAAM_DEBUG
  1320. + bool "Enable debug output in CAAM driver"
  1321. + help
  1322. + Selecting this will enable printing of various debug
  1323. + information in the CAAM driver.
  1324. +
  1325. config CRYPTO_DEV_FSL_CAAM_JR
  1326. tristate "Freescale CAAM Job Ring driver backend"
  1327. - depends on CRYPTO_DEV_FSL_CAAM
  1328. default y
  1329. help
  1330. Enables the driver module for Job Rings which are part of
  1331. @@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
  1332. To compile this driver as a module, choose M here: the module
  1333. will be called caam_jr.
  1334. +if CRYPTO_DEV_FSL_CAAM_JR
  1335. +
  1336. config CRYPTO_DEV_FSL_CAAM_RINGSIZE
  1337. int "Job Ring size"
  1338. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1339. range 2 9
  1340. default "9"
  1341. help
  1342. @@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
  1343. config CRYPTO_DEV_FSL_CAAM_INTC
  1344. bool "Job Ring interrupt coalescing"
  1345. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1346. help
  1347. Enable the Job Ring's interrupt coalescing feature.
  1348. @@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
  1349. config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
  1350. tristate "Register algorithm implementations with the Crypto API"
  1351. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1352. default y
  1353. select CRYPTO_AEAD
  1354. select CRYPTO_AUTHENC
  1355. @@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
  1356. config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
  1357. tristate "Queue Interface as Crypto API backend"
  1358. - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
  1359. + depends on FSL_SDK_DPA && NET
  1360. default y
  1361. select CRYPTO_AUTHENC
  1362. select CRYPTO_BLKCIPHER
  1363. @@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
  1364. config CRYPTO_DEV_FSL_CAAM_AHASH_API
  1365. tristate "Register hash algorithm implementations with Crypto API"
  1366. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1367. default y
  1368. select CRYPTO_HASH
  1369. help
  1370. @@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
  1371. config CRYPTO_DEV_FSL_CAAM_PKC_API
  1372. tristate "Register public key cryptography implementations with Crypto API"
  1373. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1374. default y
  1375. select CRYPTO_RSA
  1376. help
  1377. @@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
  1378. config CRYPTO_DEV_FSL_CAAM_RNG_API
  1379. tristate "Register caam device for hwrng API"
  1380. - depends on CRYPTO_DEV_FSL_CAAM_JR
  1381. default y
  1382. select CRYPTO_RNG
  1383. select HW_RANDOM
  1384. @@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
  1385. To compile this as a module, choose M here: the module
  1386. will be called caamrng.
  1387. -config CRYPTO_DEV_FSL_CAAM_DEBUG
  1388. - bool "Enable debug output in CAAM driver"
  1389. - depends on CRYPTO_DEV_FSL_CAAM
  1390. - help
  1391. - Selecting this will enable printing of various debug
  1392. - information in the CAAM driver.
  1393. +endif # CRYPTO_DEV_FSL_CAAM_JR
  1394. +
  1395. +endif # CRYPTO_DEV_FSL_CAAM
  1396. +
  1397. +config CRYPTO_DEV_FSL_DPAA2_CAAM
  1398. + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
  1399. + depends on FSL_MC_DPIO
  1400. + select CRYPTO_DEV_FSL_CAAM_COMMON
  1401. + select CRYPTO_BLKCIPHER
  1402. + select CRYPTO_AUTHENC
  1403. + select CRYPTO_AEAD
  1404. + select CRYPTO_HASH
  1405. + ---help---
  1406. + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
  1407. + It handles DPSECI DPAA2 objects that sit on the Management Complex
  1408. + (MC) fsl-mc bus.
  1409. +
  1410. + To compile this as a module, choose M here: the module
  1411. + will be called dpaa2_caam.
  1412. config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
  1413. def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
  1414. - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
  1415. + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
  1416. + CRYPTO_DEV_FSL_DPAA2_CAAM)
  1417. +
  1418. +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
  1419. + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
  1420. + CRYPTO_DEV_FSL_DPAA2_CAAM)
  1421. --- a/drivers/crypto/caam/Makefile
  1422. +++ b/drivers/crypto/caam/Makefile
  1423. @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
  1424. ccflags-y := -DDEBUG
  1425. endif
  1426. +ccflags-y += -DVERSION=\"\"
  1427. +
  1428. +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
  1429. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
  1430. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
  1431. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
  1432. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
  1433. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
  1434. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
  1435. +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
  1436. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
  1437. obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
  1438. caam-objs := ctrl.o
  1439. -caam_jr-objs := jr.o key_gen.o error.o
  1440. +caam_jr-objs := jr.o key_gen.o
  1441. caam_pkc-y := caampkc.o pkc_desc.o
  1442. ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
  1443. ccflags-y += -DCONFIG_CAAM_QI
  1444. caam-objs += qi.o
  1445. endif
  1446. +
  1447. +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
  1448. +
  1449. +dpaa2_caam-y := caamalg_qi2.o dpseci.o
  1450. --- a/drivers/crypto/caam/caamalg.c
  1451. +++ b/drivers/crypto/caam/caamalg.c
  1452. @@ -108,6 +108,7 @@ struct caam_ctx {
  1453. dma_addr_t sh_desc_dec_dma;
  1454. dma_addr_t sh_desc_givenc_dma;
  1455. dma_addr_t key_dma;
  1456. + enum dma_data_direction dir;
  1457. struct device *jrdev;
  1458. struct alginfo adata;
  1459. struct alginfo cdata;
  1460. @@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
  1461. {
  1462. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1463. struct device *jrdev = ctx->jrdev;
  1464. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  1465. u32 *desc;
  1466. int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
  1467. ctx->adata.keylen_pad;
  1468. @@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
  1469. /* aead_encrypt shared descriptor */
  1470. desc = ctx->sh_desc_enc;
  1471. - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
  1472. + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
  1473. + ctrlpriv->era);
  1474. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1475. - desc_bytes(desc), DMA_TO_DEVICE);
  1476. + desc_bytes(desc), ctx->dir);
  1477. /*
  1478. * Job Descriptor and Shared Descriptors
  1479. @@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
  1480. /* aead_decrypt shared descriptor */
  1481. desc = ctx->sh_desc_dec;
  1482. - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
  1483. + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
  1484. + ctrlpriv->era);
  1485. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1486. - desc_bytes(desc), DMA_TO_DEVICE);
  1487. + desc_bytes(desc), ctx->dir);
  1488. return 0;
  1489. }
  1490. @@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
  1491. unsigned int ivsize = crypto_aead_ivsize(aead);
  1492. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1493. struct device *jrdev = ctx->jrdev;
  1494. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  1495. u32 ctx1_iv_off = 0;
  1496. u32 *desc, *nonce = NULL;
  1497. u32 inl_mask;
  1498. @@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
  1499. desc = ctx->sh_desc_enc;
  1500. cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
  1501. ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
  1502. - false);
  1503. + false, ctrlpriv->era);
  1504. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1505. - desc_bytes(desc), DMA_TO_DEVICE);
  1506. + desc_bytes(desc), ctx->dir);
  1507. skip_enc:
  1508. /*
  1509. @@ -266,9 +271,9 @@ skip_enc:
  1510. desc = ctx->sh_desc_dec;
  1511. cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
  1512. ctx->authsize, alg->caam.geniv, is_rfc3686,
  1513. - nonce, ctx1_iv_off, false);
  1514. + nonce, ctx1_iv_off, false, ctrlpriv->era);
  1515. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1516. - desc_bytes(desc), DMA_TO_DEVICE);
  1517. + desc_bytes(desc), ctx->dir);
  1518. if (!alg->caam.geniv)
  1519. goto skip_givenc;
  1520. @@ -300,9 +305,9 @@ skip_enc:
  1521. desc = ctx->sh_desc_enc;
  1522. cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
  1523. ctx->authsize, is_rfc3686, nonce,
  1524. - ctx1_iv_off, false);
  1525. + ctx1_iv_off, false, ctrlpriv->era);
  1526. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1527. - desc_bytes(desc), DMA_TO_DEVICE);
  1528. + desc_bytes(desc), ctx->dir);
  1529. skip_givenc:
  1530. return 0;
  1531. @@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
  1532. {
  1533. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1534. struct device *jrdev = ctx->jrdev;
  1535. + unsigned int ivsize = crypto_aead_ivsize(aead);
  1536. u32 *desc;
  1537. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  1538. ctx->cdata.keylen;
  1539. @@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
  1540. }
  1541. desc = ctx->sh_desc_enc;
  1542. - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
  1543. + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  1544. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1545. - desc_bytes(desc), DMA_TO_DEVICE);
  1546. + desc_bytes(desc), ctx->dir);
  1547. /*
  1548. * Job Descriptor and Shared Descriptors
  1549. @@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
  1550. }
  1551. desc = ctx->sh_desc_dec;
  1552. - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
  1553. + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  1554. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1555. - desc_bytes(desc), DMA_TO_DEVICE);
  1556. + desc_bytes(desc), ctx->dir);
  1557. return 0;
  1558. }
  1559. @@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
  1560. {
  1561. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1562. struct device *jrdev = ctx->jrdev;
  1563. + unsigned int ivsize = crypto_aead_ivsize(aead);
  1564. u32 *desc;
  1565. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  1566. ctx->cdata.keylen;
  1567. @@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
  1568. }
  1569. desc = ctx->sh_desc_enc;
  1570. - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
  1571. + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  1572. + false);
  1573. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1574. - desc_bytes(desc), DMA_TO_DEVICE);
  1575. + desc_bytes(desc), ctx->dir);
  1576. /*
  1577. * Job Descriptor and Shared Descriptors
  1578. @@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
  1579. }
  1580. desc = ctx->sh_desc_dec;
  1581. - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
  1582. + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  1583. + false);
  1584. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1585. - desc_bytes(desc), DMA_TO_DEVICE);
  1586. + desc_bytes(desc), ctx->dir);
  1587. return 0;
  1588. }
  1589. @@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
  1590. {
  1591. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1592. struct device *jrdev = ctx->jrdev;
  1593. + unsigned int ivsize = crypto_aead_ivsize(aead);
  1594. u32 *desc;
  1595. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  1596. ctx->cdata.keylen;
  1597. @@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
  1598. }
  1599. desc = ctx->sh_desc_enc;
  1600. - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
  1601. + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  1602. + false);
  1603. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1604. - desc_bytes(desc), DMA_TO_DEVICE);
  1605. + desc_bytes(desc), ctx->dir);
  1606. /*
  1607. * Job Descriptor and Shared Descriptors
  1608. @@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
  1609. }
  1610. desc = ctx->sh_desc_dec;
  1611. - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
  1612. + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  1613. + false);
  1614. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1615. - desc_bytes(desc), DMA_TO_DEVICE);
  1616. + desc_bytes(desc), ctx->dir);
  1617. return 0;
  1618. }
  1619. @@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
  1620. {
  1621. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1622. struct device *jrdev = ctx->jrdev;
  1623. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  1624. struct crypto_authenc_keys keys;
  1625. int ret = 0;
  1626. @@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
  1627. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1628. #endif
  1629. + /*
  1630. + * If DKP is supported, use it in the shared descriptor to generate
  1631. + * the split key.
  1632. + */
  1633. + if (ctrlpriv->era >= 6) {
  1634. + ctx->adata.keylen = keys.authkeylen;
  1635. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  1636. + OP_ALG_ALGSEL_MASK);
  1637. +
  1638. + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1639. + goto badkey;
  1640. +
  1641. + memcpy(ctx->key, keys.authkey, keys.authkeylen);
  1642. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  1643. + keys.enckeylen);
  1644. + dma_sync_single_for_device(jrdev, ctx->key_dma,
  1645. + ctx->adata.keylen_pad +
  1646. + keys.enckeylen, ctx->dir);
  1647. + goto skip_split_key;
  1648. + }
  1649. +
  1650. ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
  1651. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  1652. keys.enckeylen);
  1653. @@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
  1654. /* postpend encryption key to auth split key */
  1655. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  1656. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  1657. - keys.enckeylen, DMA_TO_DEVICE);
  1658. + keys.enckeylen, ctx->dir);
  1659. #ifdef DEBUG
  1660. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1661. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1662. ctx->adata.keylen_pad + keys.enckeylen, 1);
  1663. #endif
  1664. +
  1665. +skip_split_key:
  1666. ctx->cdata.keylen = keys.enckeylen;
  1667. return aead_set_sh_desc(aead);
  1668. badkey:
  1669. @@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
  1670. #endif
  1671. memcpy(ctx->key, key, keylen);
  1672. - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
  1673. + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  1674. ctx->cdata.keylen = keylen;
  1675. return gcm_set_sh_desc(aead);
  1676. @@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
  1677. */
  1678. ctx->cdata.keylen = keylen - 4;
  1679. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  1680. - DMA_TO_DEVICE);
  1681. + ctx->dir);
  1682. return rfc4106_set_sh_desc(aead);
  1683. }
  1684. @@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
  1685. */
  1686. ctx->cdata.keylen = keylen - 4;
  1687. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  1688. - DMA_TO_DEVICE);
  1689. + ctx->dir);
  1690. return rfc4543_set_sh_desc(aead);
  1691. }
  1692. @@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
  1693. cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
  1694. ctx1_iv_off);
  1695. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1696. - desc_bytes(desc), DMA_TO_DEVICE);
  1697. + desc_bytes(desc), ctx->dir);
  1698. /* ablkcipher_decrypt shared descriptor */
  1699. desc = ctx->sh_desc_dec;
  1700. cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
  1701. ctx1_iv_off);
  1702. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1703. - desc_bytes(desc), DMA_TO_DEVICE);
  1704. + desc_bytes(desc), ctx->dir);
  1705. /* ablkcipher_givencrypt shared descriptor */
  1706. desc = ctx->sh_desc_givenc;
  1707. cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
  1708. ctx1_iv_off);
  1709. dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
  1710. - desc_bytes(desc), DMA_TO_DEVICE);
  1711. + desc_bytes(desc), ctx->dir);
  1712. return 0;
  1713. }
  1714. @@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
  1715. desc = ctx->sh_desc_enc;
  1716. cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
  1717. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  1718. - desc_bytes(desc), DMA_TO_DEVICE);
  1719. + desc_bytes(desc), ctx->dir);
  1720. /* xts_ablkcipher_decrypt shared descriptor */
  1721. desc = ctx->sh_desc_dec;
  1722. cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
  1723. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  1724. - desc_bytes(desc), DMA_TO_DEVICE);
  1725. + desc_bytes(desc), ctx->dir);
  1726. return 0;
  1727. }
  1728. @@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
  1729. append_seq_out_ptr(desc, dst_dma,
  1730. req->assoclen + req->cryptlen - authsize,
  1731. out_options);
  1732. -
  1733. - /* REG3 = assoclen */
  1734. - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1735. }
  1736. static void init_gcm_job(struct aead_request *req,
  1737. @@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
  1738. unsigned int last;
  1739. init_aead_job(req, edesc, all_contig, encrypt);
  1740. + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1741. /* BUG This should not be specific to generic GCM. */
  1742. last = 0;
  1743. @@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
  1744. struct caam_aead_alg, aead);
  1745. unsigned int ivsize = crypto_aead_ivsize(aead);
  1746. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1747. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  1748. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  1749. OP_ALG_AAI_CTR_MOD128);
  1750. const bool is_rfc3686 = alg->caam.rfc3686;
  1751. @@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
  1752. init_aead_job(req, edesc, all_contig, encrypt);
  1753. + /*
  1754. + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
  1755. + * having DPOVRD as destination.
  1756. + */
  1757. + if (ctrlpriv->era < 3)
  1758. + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1759. + else
  1760. + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
  1761. +
  1762. if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
  1763. append_load_as_imm(desc, req->iv, ivsize,
  1764. LDST_CLASS_1_CCB |
  1765. @@ -3204,9 +3248,11 @@ struct caam_crypto_alg {
  1766. struct caam_alg_entry caam;
  1767. };
  1768. -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  1769. +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  1770. + bool uses_dkp)
  1771. {
  1772. dma_addr_t dma_addr;
  1773. + struct caam_drv_private *priv;
  1774. ctx->jrdev = caam_jr_alloc();
  1775. if (IS_ERR(ctx->jrdev)) {
  1776. @@ -3214,10 +3260,16 @@ static int caam_init_common(struct caam_
  1777. return PTR_ERR(ctx->jrdev);
  1778. }
  1779. + priv = dev_get_drvdata(ctx->jrdev->parent);
  1780. + if (priv->era >= 6 && uses_dkp)
  1781. + ctx->dir = DMA_BIDIRECTIONAL;
  1782. + else
  1783. + ctx->dir = DMA_TO_DEVICE;
  1784. +
  1785. dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
  1786. offsetof(struct caam_ctx,
  1787. sh_desc_enc_dma),
  1788. - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
  1789. + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  1790. if (dma_mapping_error(ctx->jrdev, dma_addr)) {
  1791. dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
  1792. caam_jr_free(ctx->jrdev);
  1793. @@ -3245,7 +3297,7 @@ static int caam_cra_init(struct crypto_t
  1794. container_of(alg, struct caam_crypto_alg, crypto_alg);
  1795. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  1796. - return caam_init_common(ctx, &caam_alg->caam);
  1797. + return caam_init_common(ctx, &caam_alg->caam, false);
  1798. }
  1799. static int caam_aead_init(struct crypto_aead *tfm)
  1800. @@ -3255,14 +3307,15 @@ static int caam_aead_init(struct crypto_
  1801. container_of(alg, struct caam_aead_alg, aead);
  1802. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  1803. - return caam_init_common(ctx, &caam_alg->caam);
  1804. + return caam_init_common(ctx, &caam_alg->caam,
  1805. + alg->setkey == aead_setkey);
  1806. }
  1807. static void caam_exit_common(struct caam_ctx *ctx)
  1808. {
  1809. dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
  1810. offsetof(struct caam_ctx, sh_desc_enc_dma),
  1811. - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
  1812. + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  1813. caam_jr_free(ctx->jrdev);
  1814. }
  1815. --- a/drivers/crypto/caam/caamalg_desc.c
  1816. +++ b/drivers/crypto/caam/caamalg_desc.c
  1817. @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
  1818. * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
  1819. * (non-protocol) with no (null) encryption.
  1820. * @desc: pointer to buffer used for descriptor construction
  1821. - * @adata: pointer to authentication transform definitions. Note that since a
  1822. - * split key is to be used, the size of the split key itself is
  1823. - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
  1824. - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  1825. + * @adata: pointer to authentication transform definitions.
  1826. + * A split key is required for SEC Era < 6; the size of the split key
  1827. + * is specified in this case. Valid algorithm values - one of
  1828. + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
  1829. + * with OP_ALG_AAI_HMAC_PRECOMP.
  1830. * @icvsize: integrity check value (ICV) size (truncated or full)
  1831. - *
  1832. - * Note: Requires an MDHA split key.
  1833. + * @era: SEC Era
  1834. */
  1835. void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
  1836. - unsigned int icvsize)
  1837. + unsigned int icvsize, int era)
  1838. {
  1839. u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
  1840. @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
  1841. /* Skip if already shared */
  1842. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1843. JUMP_COND_SHRD);
  1844. - if (adata->key_inline)
  1845. - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
  1846. - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
  1847. - KEY_ENC);
  1848. - else
  1849. - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
  1850. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1851. + if (era < 6) {
  1852. + if (adata->key_inline)
  1853. + append_key_as_imm(desc, adata->key_virt,
  1854. + adata->keylen_pad, adata->keylen,
  1855. + CLASS_2 | KEY_DEST_MDHA_SPLIT |
  1856. + KEY_ENC);
  1857. + else
  1858. + append_key(desc, adata->key_dma, adata->keylen,
  1859. + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1860. + } else {
  1861. + append_proto_dkp(desc, adata);
  1862. + }
  1863. set_jump_tgt_here(desc, key_jump_cmd);
  1864. /* assoclen + cryptlen = seqinlen */
  1865. @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
  1866. * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
  1867. * (non-protocol) with no (null) decryption.
  1868. * @desc: pointer to buffer used for descriptor construction
  1869. - * @adata: pointer to authentication transform definitions. Note that since a
  1870. - * split key is to be used, the size of the split key itself is
  1871. - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
  1872. - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  1873. + * @adata: pointer to authentication transform definitions.
  1874. + * A split key is required for SEC Era < 6; the size of the split key
  1875. + * is specified in this case. Valid algorithm values - one of
  1876. + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
  1877. + * with OP_ALG_AAI_HMAC_PRECOMP.
  1878. * @icvsize: integrity check value (ICV) size (truncated or full)
  1879. - *
  1880. - * Note: Requires an MDHA split key.
  1881. + * @era: SEC Era
  1882. */
  1883. void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
  1884. - unsigned int icvsize)
  1885. + unsigned int icvsize, int era)
  1886. {
  1887. u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
  1888. @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
  1889. /* Skip if already shared */
  1890. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1891. JUMP_COND_SHRD);
  1892. - if (adata->key_inline)
  1893. - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
  1894. - adata->keylen, CLASS_2 |
  1895. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1896. - else
  1897. - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
  1898. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1899. + if (era < 6) {
  1900. + if (adata->key_inline)
  1901. + append_key_as_imm(desc, adata->key_virt,
  1902. + adata->keylen_pad, adata->keylen,
  1903. + CLASS_2 | KEY_DEST_MDHA_SPLIT |
  1904. + KEY_ENC);
  1905. + else
  1906. + append_key(desc, adata->key_dma, adata->keylen,
  1907. + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1908. + } else {
  1909. + append_proto_dkp(desc, adata);
  1910. + }
  1911. set_jump_tgt_here(desc, key_jump_cmd);
  1912. /* Class 2 operation */
  1913. @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
  1914. static void init_sh_desc_key_aead(u32 * const desc,
  1915. struct alginfo * const cdata,
  1916. struct alginfo * const adata,
  1917. - const bool is_rfc3686, u32 *nonce)
  1918. + const bool is_rfc3686, u32 *nonce, int era)
  1919. {
  1920. u32 *key_jump_cmd;
  1921. unsigned int enckeylen = cdata->keylen;
  1922. @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
  1923. if (is_rfc3686)
  1924. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  1925. - if (adata->key_inline)
  1926. - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
  1927. - adata->keylen, CLASS_2 |
  1928. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1929. - else
  1930. - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
  1931. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1932. + if (era < 6) {
  1933. + if (adata->key_inline)
  1934. + append_key_as_imm(desc, adata->key_virt,
  1935. + adata->keylen_pad, adata->keylen,
  1936. + CLASS_2 | KEY_DEST_MDHA_SPLIT |
  1937. + KEY_ENC);
  1938. + else
  1939. + append_key(desc, adata->key_dma, adata->keylen,
  1940. + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
  1941. + } else {
  1942. + append_proto_dkp(desc, adata);
  1943. + }
  1944. if (cdata->key_inline)
  1945. append_key_as_imm(desc, cdata->key_virt, enckeylen,
  1946. @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
  1947. * @cdata: pointer to block cipher transform definitions
  1948. * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
  1949. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
  1950. - * @adata: pointer to authentication transform definitions. Note that since a
  1951. - * split key is to be used, the size of the split key itself is
  1952. - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
  1953. - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  1954. + * @adata: pointer to authentication transform definitions.
  1955. + * A split key is required for SEC Era < 6; the size of the split key
  1956. + * is specified in this case. Valid algorithm values - one of
  1957. + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
  1958. + * with OP_ALG_AAI_HMAC_PRECOMP.
  1959. * @ivsize: initialization vector size
  1960. * @icvsize: integrity check value (ICV) size (truncated or full)
  1961. * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  1962. * @nonce: pointer to rfc3686 nonce
  1963. * @ctx1_iv_off: IV offset in CONTEXT1 register
  1964. * @is_qi: true when called from caam/qi
  1965. - *
  1966. - * Note: Requires an MDHA split key.
  1967. + * @era: SEC Era
  1968. */
  1969. void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
  1970. struct alginfo *adata, unsigned int ivsize,
  1971. unsigned int icvsize, const bool is_rfc3686,
  1972. - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
  1973. + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
  1974. + int era)
  1975. {
  1976. /* Note: Context registers are saved. */
  1977. - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
  1978. + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
  1979. /* Class 2 operation */
  1980. append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
  1981. @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
  1982. }
  1983. /* Read and write assoclen bytes */
  1984. - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  1985. - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  1986. + if (is_qi || era < 3) {
  1987. + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  1988. + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  1989. + } else {
  1990. + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
  1991. + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
  1992. + }
  1993. /* Skip assoc data */
  1994. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  1995. @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
  1996. * @cdata: pointer to block cipher transform definitions
  1997. * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
  1998. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
  1999. - * @adata: pointer to authentication transform definitions. Note that since a
  2000. - * split key is to be used, the size of the split key itself is
  2001. - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
  2002. - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  2003. + * @adata: pointer to authentication transform definitions.
  2004. + * A split key is required for SEC Era < 6; the size of the split key
  2005. + * is specified in this case. Valid algorithm values - one of
  2006. + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
  2007. + * with OP_ALG_AAI_HMAC_PRECOMP.
  2008. * @ivsize: initialization vector size
  2009. * @icvsize: integrity check value (ICV) size (truncated or full)
  2010. * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  2011. * @nonce: pointer to rfc3686 nonce
  2012. * @ctx1_iv_off: IV offset in CONTEXT1 register
  2013. * @is_qi: true when called from caam/qi
  2014. - *
  2015. - * Note: Requires an MDHA split key.
  2016. + * @era: SEC Era
  2017. */
  2018. void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
  2019. struct alginfo *adata, unsigned int ivsize,
  2020. unsigned int icvsize, const bool geniv,
  2021. const bool is_rfc3686, u32 *nonce,
  2022. - const u32 ctx1_iv_off, const bool is_qi)
  2023. + const u32 ctx1_iv_off, const bool is_qi, int era)
  2024. {
  2025. /* Note: Context registers are saved. */
  2026. - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
  2027. + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
  2028. /* Class 2 operation */
  2029. append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
  2030. @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
  2031. }
  2032. /* Read and write assoclen bytes */
  2033. - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2034. - if (geniv)
  2035. - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
  2036. - else
  2037. - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  2038. + if (is_qi || era < 3) {
  2039. + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2040. + if (geniv)
  2041. + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
  2042. + ivsize);
  2043. + else
  2044. + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
  2045. + CAAM_CMD_SZ);
  2046. + } else {
  2047. + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
  2048. + if (geniv)
  2049. + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
  2050. + ivsize);
  2051. + else
  2052. + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
  2053. + CAAM_CMD_SZ);
  2054. + }
  2055. /* Skip assoc data */
  2056. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  2057. @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
  2058. * @cdata: pointer to block cipher transform definitions
  2059. * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
  2060. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
  2061. - * @adata: pointer to authentication transform definitions. Note that since a
  2062. - * split key is to be used, the size of the split key itself is
  2063. - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
  2064. - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  2065. + * @adata: pointer to authentication transform definitions.
  2066. + * A split key is required for SEC Era < 6; the size of the split key
  2067. + * is specified in this case. Valid algorithm values - one of
  2068. + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
  2069. + * with OP_ALG_AAI_HMAC_PRECOMP.
  2070. * @ivsize: initialization vector size
  2071. * @icvsize: integrity check value (ICV) size (truncated or full)
  2072. * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  2073. * @nonce: pointer to rfc3686 nonce
  2074. * @ctx1_iv_off: IV offset in CONTEXT1 register
  2075. * @is_qi: true when called from caam/qi
  2076. - *
  2077. - * Note: Requires an MDHA split key.
  2078. + * @era: SEC Era
  2079. */
  2080. void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
  2081. struct alginfo *adata, unsigned int ivsize,
  2082. unsigned int icvsize, const bool is_rfc3686,
  2083. u32 *nonce, const u32 ctx1_iv_off,
  2084. - const bool is_qi)
  2085. + const bool is_qi, int era)
  2086. {
  2087. u32 geniv, moveiv;
  2088. /* Note: Context registers are saved. */
  2089. - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
  2090. + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
  2091. if (is_qi) {
  2092. u32 *wait_load_cmd;
  2093. @@ -528,8 +561,13 @@ copy_iv:
  2094. OP_ALG_ENCRYPT);
  2095. /* Read and write assoclen bytes */
  2096. - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2097. - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  2098. + if (is_qi || era < 3) {
  2099. + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2100. + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  2101. + } else {
  2102. + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
  2103. + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
  2104. + }
  2105. /* Skip assoc data */
  2106. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  2107. @@ -583,14 +621,431 @@ copy_iv:
  2108. EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
  2109. /**
  2110. + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
  2111. + * @desc: pointer to buffer used for descriptor construction
  2112. + * @cdata: pointer to block cipher transform definitions
  2113. + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
  2114. + * with OP_ALG_AAI_CBC
  2115. + * @adata: pointer to authentication transform definitions.
  2116. + * A split key is required for SEC Era < 6; the size of the split key
  2117. + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
  2118. + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  2119. + * @assoclen: associated data length
  2120. + * @ivsize: initialization vector size
  2121. + * @authsize: authentication data size
  2122. + * @blocksize: block cipher size
  2123. + * @era: SEC Era
  2124. + */
  2125. +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
  2126. + struct alginfo *adata, unsigned int assoclen,
  2127. + unsigned int ivsize, unsigned int authsize,
  2128. + unsigned int blocksize, int era)
  2129. +{
  2130. + u32 *key_jump_cmd, *zero_payload_jump_cmd;
  2131. + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
  2132. +
  2133. + /*
  2134. + * Compute the index (in bytes) for the LOAD with destination of
  2135. + * Class 1 Data Size Register and for the LOAD that generates padding
  2136. + */
  2137. + if (adata->key_inline) {
  2138. + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
  2139. + cdata->keylen - 4 * CAAM_CMD_SZ;
  2140. + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
  2141. + cdata->keylen - 2 * CAAM_CMD_SZ;
  2142. + } else {
  2143. + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
  2144. + 4 * CAAM_CMD_SZ;
  2145. + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
  2146. + 2 * CAAM_CMD_SZ;
  2147. + }
  2148. +
  2149. + stidx = 1 << HDR_START_IDX_SHIFT;
  2150. + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
  2151. +
  2152. + /* skip key loading if they are loaded due to sharing */
  2153. + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2154. + JUMP_COND_SHRD);
  2155. +
  2156. + if (era < 6) {
  2157. + if (adata->key_inline)
  2158. + append_key_as_imm(desc, adata->key_virt,
  2159. + adata->keylen_pad, adata->keylen,
  2160. + CLASS_2 | KEY_DEST_MDHA_SPLIT |
  2161. + KEY_ENC);
  2162. + else
  2163. + append_key(desc, adata->key_dma, adata->keylen,
  2164. + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
  2165. + } else {
  2166. + append_proto_dkp(desc, adata);
  2167. + }
  2168. +
  2169. + if (cdata->key_inline)
  2170. + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
  2171. + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
  2172. + else
  2173. + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
  2174. + KEY_DEST_CLASS_REG);
  2175. +
  2176. + set_jump_tgt_here(desc, key_jump_cmd);
  2177. +
  2178. + /* class 2 operation */
  2179. + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
  2180. + OP_ALG_ENCRYPT);
  2181. + /* class 1 operation */
  2182. + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2183. + OP_ALG_ENCRYPT);
  2184. +
  2185. + /* payloadlen = input data length - (assoclen + ivlen) */
  2186. + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
  2187. +
  2188. + /* math1 = payloadlen + icvlen */
  2189. + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
  2190. +
  2191. + /* padlen = block_size - math1 % block_size */
  2192. + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
  2193. + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
  2194. +
  2195. + /* cryptlen = payloadlen + icvlen + padlen */
  2196. + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
  2197. +
  2198. + /*
  2199. + * update immediate data with the padding length value
  2200. + * for the LOAD in the class 1 data size register.
  2201. + */
  2202. + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
  2203. + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
  2204. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
  2205. + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
  2206. +
  2207. + /* overwrite PL field for the padding iNFO FIFO entry */
  2208. + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
  2209. + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
  2210. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
  2211. + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
  2212. +
  2213. + /* store encrypted payload, icv and padding */
  2214. + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
  2215. +
  2216. + /* if payload length is zero, jump to zero-payload commands */
  2217. + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
  2218. + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  2219. + JUMP_COND_MATH_Z);
  2220. +
  2221. + /* load iv in context1 */
  2222. + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
  2223. + LDST_CLASS_1_CCB | ivsize);
  2224. +
  2225. + /* read assoc for authentication */
  2226. + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
  2227. + FIFOLD_TYPE_MSG);
  2228. + /* insnoop payload */
  2229. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
  2230. + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
  2231. +
  2232. + /* jump the zero-payload commands */
  2233. + append_jump(desc, JUMP_TEST_ALL | 3);
  2234. +
  2235. + /* zero-payload commands */
  2236. + set_jump_tgt_here(desc, zero_payload_jump_cmd);
  2237. +
  2238. + /* load iv in context1 */
  2239. + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
  2240. + LDST_CLASS_1_CCB | ivsize);
  2241. +
  2242. + /* assoc data is the only data for authentication */
  2243. + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
  2244. + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
  2245. +
  2246. + /* send icv to encryption */
  2247. + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
  2248. + authsize);
  2249. +
  2250. + /* update class 1 data size register with padding length */
  2251. + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
  2252. + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  2253. +
  2254. + /* generate padding and send it to encryption */
  2255. + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
  2256. + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
  2257. + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
  2258. + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  2259. +
  2260. +#ifdef DEBUG
  2261. + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
  2262. + DUMP_PREFIX_ADDRESS, 16, 4, desc,
  2263. + desc_bytes(desc), 1);
  2264. +#endif
  2265. +}
  2266. +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
  2267. +
  2268. +/**
  2269. + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
  2270. + * @desc: pointer to buffer used for descriptor construction
  2271. + * @cdata: pointer to block cipher transform definitions
  2272. + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
  2273. + * with OP_ALG_AAI_CBC
  2274. + * @adata: pointer to authentication transform definitions.
  2275. + * A split key is required for SEC Era < 6; the size of the split key
  2276. + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
  2277. + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
  2278. + * @assoclen: associated data length
  2279. + * @ivsize: initialization vector size
  2280. + * @authsize: authentication data size
  2281. + * @blocksize: block cipher size
  2282. + * @era: SEC Era
  2283. + */
  2284. +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
  2285. + struct alginfo *adata, unsigned int assoclen,
  2286. + unsigned int ivsize, unsigned int authsize,
  2287. + unsigned int blocksize, int era)
  2288. +{
  2289. + u32 stidx, jumpback;
  2290. + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
  2291. + /*
  2292. + * Pointer Size bool determines the size of address pointers.
  2293. + * false - Pointers fit in one 32-bit word.
  2294. + * true - Pointers fit in two 32-bit words.
  2295. + */
  2296. + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
  2297. +
  2298. + stidx = 1 << HDR_START_IDX_SHIFT;
  2299. + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
  2300. +
  2301. + /* skip key loading if they are loaded due to sharing */
  2302. + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2303. + JUMP_COND_SHRD);
  2304. +
  2305. + if (era < 6)
  2306. + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
  2307. + KEY_DEST_MDHA_SPLIT | KEY_ENC);
  2308. + else
  2309. + append_proto_dkp(desc, adata);
  2310. +
  2311. + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
  2312. + KEY_DEST_CLASS_REG);
  2313. +
  2314. + set_jump_tgt_here(desc, key_jump_cmd);
  2315. +
  2316. + /* class 2 operation */
  2317. + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
  2318. + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  2319. + /* class 1 operation */
  2320. + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2321. + OP_ALG_DECRYPT);
  2322. +
  2323. + /* VSIL = input data length - 2 * block_size */
  2324. + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
  2325. + blocksize);
  2326. +
  2327. + /*
  2328. + * payloadlen + icvlen + padlen = input data length - (assoclen +
  2329. + * ivsize)
  2330. + */
  2331. + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
  2332. +
  2333. + /* skip data to the last but one cipher block */
  2334. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
  2335. +
  2336. + /* load iv for the last cipher block */
  2337. + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
  2338. + LDST_CLASS_1_CCB | ivsize);
  2339. +
  2340. + /* read last cipher block */
  2341. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
  2342. + FIFOLD_TYPE_LAST1 | blocksize);
  2343. +
  2344. + /* move decrypted block into math0 and math1 */
  2345. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
  2346. + blocksize);
  2347. +
  2348. + /* reset AES CHA */
  2349. + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
  2350. + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
  2351. +
  2352. + /* rewind input sequence */
  2353. + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
  2354. +
  2355. + /* key1 is in decryption form */
  2356. + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
  2357. + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  2358. +
  2359. + /* load iv in context1 */
  2360. + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
  2361. + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
  2362. +
  2363. + /* read sequence number */
  2364. + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
  2365. + /* load Type, Version and Len fields in math0 */
  2366. + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
  2367. + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
  2368. +
  2369. + /* compute (padlen - 1) */
  2370. + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
  2371. +
  2372. + /* math2 = icvlen + (padlen - 1) + 1 */
  2373. + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
  2374. +
  2375. + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
  2376. +
  2377. + /* VSOL = payloadlen + icvlen + padlen */
  2378. + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
  2379. +
  2380. + if (caam_little_end)
  2381. + append_moveb(desc, MOVE_WAITCOMP |
  2382. + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
  2383. +
  2384. + /* update Len field */
  2385. + append_math_sub(desc, REG0, REG0, REG2, 8);
  2386. +
  2387. + /* store decrypted payload, icv and padding */
  2388. + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
  2389. +
  2390. + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
  2391. + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
  2392. +
  2393. + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  2394. + JUMP_COND_MATH_Z);
  2395. +
  2396. + /* send Type, Version and Len(pre ICV) fields to authentication */
  2397. + append_move(desc, MOVE_WAITCOMP |
  2398. + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
  2399. + (3 << MOVE_OFFSET_SHIFT) | 5);
  2400. +
  2401. + /* outsnooping payload */
  2402. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  2403. + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
  2404. + FIFOLDST_VLF);
  2405. + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
  2406. +
  2407. + set_jump_tgt_here(desc, zero_payload_jump_cmd);
  2408. + /* send Type, Version and Len(pre ICV) fields to authentication */
  2409. + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
  2410. + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
  2411. + (3 << MOVE_OFFSET_SHIFT) | 5);
  2412. +
  2413. + set_jump_tgt_here(desc, skip_zero_jump_cmd);
  2414. + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
  2415. +
  2416. + /* load icvlen and padlen */
  2417. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
  2418. + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
  2419. +
  2420. + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
  2421. + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
  2422. +
  2423. + /*
  2424. + * Start a new input sequence using the SEQ OUT PTR command options,
  2425. + * pointer and length used when the current output sequence was defined.
  2426. + */
  2427. + if (ps) {
  2428. + /*
  2429. + * Move the lower 32 bits of Shared Descriptor address, the
  2430. + * SEQ OUT PTR command, Output Pointer (2 words) and
  2431. + * Output Length into math registers.
  2432. + */
  2433. + if (caam_little_end)
  2434. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
  2435. + MOVE_DEST_MATH0 |
  2436. + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
  2437. + else
  2438. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
  2439. + MOVE_DEST_MATH0 |
  2440. + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
  2441. +
  2442. + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
  2443. + append_math_and_imm_u32(desc, REG0, REG0, IMM,
  2444. + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
  2445. + /* Append a JUMP command after the copied fields */
  2446. + jumpback = CMD_JUMP | (char)-9;
  2447. + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
  2448. + LDST_SRCDST_WORD_DECO_MATH2 |
  2449. + (4 << LDST_OFFSET_SHIFT));
  2450. + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
  2451. + /* Move the updated fields back to the Job Descriptor */
  2452. + if (caam_little_end)
  2453. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
  2454. + MOVE_DEST_DESCBUF |
  2455. + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
  2456. + else
  2457. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
  2458. + MOVE_DEST_DESCBUF |
  2459. + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
  2460. +
  2461. + /*
  2462. + * Read the new SEQ IN PTR command, Input Pointer, Input Length
  2463. + * and then jump back to the next command from the
  2464. + * Shared Descriptor.
  2465. + */
  2466. + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
  2467. + } else {
  2468. + /*
  2469. + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
  2470. + * Output Length into math registers.
  2471. + */
  2472. + if (caam_little_end)
  2473. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
  2474. + MOVE_DEST_MATH0 |
  2475. + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
  2476. + else
  2477. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
  2478. + MOVE_DEST_MATH0 |
  2479. + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
  2480. +
  2481. + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
  2482. + append_math_and_imm_u64(desc, REG0, REG0, IMM,
  2483. + ~(((u64)(CMD_SEQ_IN_PTR ^
  2484. + CMD_SEQ_OUT_PTR)) << 32));
  2485. + /* Append a JUMP command after the copied fields */
  2486. + jumpback = CMD_JUMP | (char)-7;
  2487. + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
  2488. + LDST_SRCDST_WORD_DECO_MATH1 |
  2489. + (4 << LDST_OFFSET_SHIFT));
  2490. + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
  2491. + /* Move the updated fields back to the Job Descriptor */
  2492. + if (caam_little_end)
  2493. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
  2494. + MOVE_DEST_DESCBUF |
  2495. + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
  2496. + else
  2497. + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
  2498. + MOVE_DEST_DESCBUF |
  2499. + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
  2500. +
  2501. + /*
  2502. + * Read the new SEQ IN PTR command, Input Pointer, Input Length
  2503. + * and then jump back to the next command from the
  2504. + * Shared Descriptor.
  2505. + */
  2506. + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
  2507. + }
  2508. +
  2509. + /* skip payload */
  2510. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
  2511. + /* check icv */
  2512. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
  2513. + FIFOLD_TYPE_LAST2 | authsize);
  2514. +
  2515. +#ifdef DEBUG
  2516. + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
  2517. + DUMP_PREFIX_ADDRESS, 16, 4, desc,
  2518. + desc_bytes(desc), 1);
  2519. +#endif
  2520. +}
  2521. +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
  2522. +
  2523. +/**
  2524. * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
  2525. * @desc: pointer to buffer used for descriptor construction
  2526. * @cdata: pointer to block cipher transform definitions
  2527. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2528. + * @ivsize: initialization vector size
  2529. * @icvsize: integrity check value (ICV) size (truncated or full)
  2530. + * @is_qi: true when called from caam/qi
  2531. */
  2532. void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
  2533. - unsigned int icvsize)
  2534. + unsigned int ivsize, unsigned int icvsize,
  2535. + const bool is_qi)
  2536. {
  2537. u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
  2538. *zero_assoc_jump_cmd2;
  2539. @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
  2540. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2541. OP_ALG_ENCRYPT);
  2542. + if (is_qi) {
  2543. + u32 *wait_load_cmd;
  2544. +
  2545. + /* REG3 = assoclen */
  2546. + append_seq_load(desc, 4, LDST_CLASS_DECO |
  2547. + LDST_SRCDST_WORD_DECO_MATH3 |
  2548. + (4 << LDST_OFFSET_SHIFT));
  2549. +
  2550. + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2551. + JUMP_COND_CALM | JUMP_COND_NCP |
  2552. + JUMP_COND_NOP | JUMP_COND_NIP |
  2553. + JUMP_COND_NIFP);
  2554. + set_jump_tgt_here(desc, wait_load_cmd);
  2555. +
  2556. + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
  2557. + ivsize);
  2558. + } else {
  2559. + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
  2560. + CAAM_CMD_SZ);
  2561. + }
  2562. +
  2563. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  2564. - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  2565. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  2566. JUMP_COND_MATH_Z);
  2567. + if (is_qi)
  2568. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2569. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2570. +
  2571. /* if assoclen is ZERO, skip reading the assoc data */
  2572. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2573. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  2574. @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
  2575. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  2576. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  2577. - /* jump the zero-payload commands */
  2578. - append_jump(desc, JUMP_TEST_ALL | 2);
  2579. + /* jump to ICV writing */
  2580. + if (is_qi)
  2581. + append_jump(desc, JUMP_TEST_ALL | 4);
  2582. + else
  2583. + append_jump(desc, JUMP_TEST_ALL | 2);
  2584. /* zero-payload commands */
  2585. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  2586. @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
  2587. /* read assoc data */
  2588. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  2589. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  2590. + if (is_qi)
  2591. + /* jump to ICV writing */
  2592. + append_jump(desc, JUMP_TEST_ALL | 2);
  2593. /* There is no input data */
  2594. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  2595. + if (is_qi)
  2596. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2597. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
  2598. + FIFOLD_TYPE_LAST1);
  2599. +
  2600. /* write ICV */
  2601. append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
  2602. LDST_SRCDST_BYTE_CONTEXT);
  2603. @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
  2604. * @desc: pointer to buffer used for descriptor construction
  2605. * @cdata: pointer to block cipher transform definitions
  2606. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2607. + * @ivsize: initialization vector size
  2608. * @icvsize: integrity check value (ICV) size (truncated or full)
  2609. + * @is_qi: true when called from caam/qi
  2610. */
  2611. void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
  2612. - unsigned int icvsize)
  2613. + unsigned int ivsize, unsigned int icvsize,
  2614. + const bool is_qi)
  2615. {
  2616. u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
  2617. @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
  2618. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2619. OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  2620. + if (is_qi) {
  2621. + u32 *wait_load_cmd;
  2622. +
  2623. + /* REG3 = assoclen */
  2624. + append_seq_load(desc, 4, LDST_CLASS_DECO |
  2625. + LDST_SRCDST_WORD_DECO_MATH3 |
  2626. + (4 << LDST_OFFSET_SHIFT));
  2627. +
  2628. + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2629. + JUMP_COND_CALM | JUMP_COND_NCP |
  2630. + JUMP_COND_NOP | JUMP_COND_NIP |
  2631. + JUMP_COND_NIFP);
  2632. + set_jump_tgt_here(desc, wait_load_cmd);
  2633. +
  2634. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2635. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2636. + }
  2637. +
  2638. /* if assoclen is ZERO, skip reading the assoc data */
  2639. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  2640. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  2641. @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
  2642. * @desc: pointer to buffer used for descriptor construction
  2643. * @cdata: pointer to block cipher transform definitions
  2644. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2645. + * @ivsize: initialization vector size
  2646. * @icvsize: integrity check value (ICV) size (truncated or full)
  2647. + * @is_qi: true when called from caam/qi
  2648. */
  2649. void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
  2650. - unsigned int icvsize)
  2651. + unsigned int ivsize, unsigned int icvsize,
  2652. + const bool is_qi)
  2653. {
  2654. u32 *key_jump_cmd;
  2655. @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
  2656. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2657. OP_ALG_ENCRYPT);
  2658. - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  2659. + if (is_qi) {
  2660. + u32 *wait_load_cmd;
  2661. +
  2662. + /* REG3 = assoclen */
  2663. + append_seq_load(desc, 4, LDST_CLASS_DECO |
  2664. + LDST_SRCDST_WORD_DECO_MATH3 |
  2665. + (4 << LDST_OFFSET_SHIFT));
  2666. +
  2667. + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2668. + JUMP_COND_CALM | JUMP_COND_NCP |
  2669. + JUMP_COND_NOP | JUMP_COND_NIP |
  2670. + JUMP_COND_NIFP);
  2671. + set_jump_tgt_here(desc, wait_load_cmd);
  2672. +
  2673. + /* Read salt and IV */
  2674. + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
  2675. + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
  2676. + FIFOLD_TYPE_IV);
  2677. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2678. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2679. + }
  2680. +
  2681. + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
  2682. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  2683. /* Read assoc data */
  2684. @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
  2685. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  2686. /* Skip IV */
  2687. - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  2688. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
  2689. /* Will read cryptlen bytes */
  2690. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  2691. @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
  2692. * @desc: pointer to buffer used for descriptor construction
  2693. * @cdata: pointer to block cipher transform definitions
  2694. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2695. + * @ivsize: initialization vector size
  2696. * @icvsize: integrity check value (ICV) size (truncated or full)
  2697. + * @is_qi: true when called from caam/qi
  2698. */
  2699. void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
  2700. - unsigned int icvsize)
  2701. + unsigned int ivsize, unsigned int icvsize,
  2702. + const bool is_qi)
  2703. {
  2704. u32 *key_jump_cmd;
  2705. @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
  2706. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2707. OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  2708. - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  2709. + if (is_qi) {
  2710. + u32 *wait_load_cmd;
  2711. +
  2712. + /* REG3 = assoclen */
  2713. + append_seq_load(desc, 4, LDST_CLASS_DECO |
  2714. + LDST_SRCDST_WORD_DECO_MATH3 |
  2715. + (4 << LDST_OFFSET_SHIFT));
  2716. +
  2717. + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  2718. + JUMP_COND_CALM | JUMP_COND_NCP |
  2719. + JUMP_COND_NOP | JUMP_COND_NIP |
  2720. + JUMP_COND_NIFP);
  2721. + set_jump_tgt_here(desc, wait_load_cmd);
  2722. +
  2723. + /* Read salt and IV */
  2724. + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
  2725. + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
  2726. + FIFOLD_TYPE_IV);
  2727. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2728. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2729. + }
  2730. +
  2731. + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
  2732. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  2733. /* Read assoc data */
  2734. @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
  2735. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  2736. /* Skip IV */
  2737. - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  2738. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
  2739. /* Will read cryptlen bytes */
  2740. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
  2741. @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
  2742. * @desc: pointer to buffer used for descriptor construction
  2743. * @cdata: pointer to block cipher transform definitions
  2744. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2745. + * @ivsize: initialization vector size
  2746. * @icvsize: integrity check value (ICV) size (truncated or full)
  2747. + * @is_qi: true when called from caam/qi
  2748. */
  2749. void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
  2750. - unsigned int icvsize)
  2751. + unsigned int ivsize, unsigned int icvsize,
  2752. + const bool is_qi)
  2753. {
  2754. u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
  2755. @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
  2756. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2757. OP_ALG_ENCRYPT);
  2758. + if (is_qi) {
  2759. + /* assoclen is not needed, skip it */
  2760. + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
  2761. +
  2762. + /* Read salt and IV */
  2763. + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
  2764. + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
  2765. + FIFOLD_TYPE_IV);
  2766. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2767. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2768. + }
  2769. +
  2770. /* assoclen + cryptlen = seqinlen */
  2771. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  2772. @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
  2773. * @desc: pointer to buffer used for descriptor construction
  2774. * @cdata: pointer to block cipher transform definitions
  2775. * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
  2776. + * @ivsize: initialization vector size
  2777. * @icvsize: integrity check value (ICV) size (truncated or full)
  2778. + * @is_qi: true when called from caam/qi
  2779. */
  2780. void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
  2781. - unsigned int icvsize)
  2782. + unsigned int ivsize, unsigned int icvsize,
  2783. + const bool is_qi)
  2784. {
  2785. u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
  2786. @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
  2787. append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
  2788. OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  2789. + if (is_qi) {
  2790. + /* assoclen is not needed, skip it */
  2791. + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
  2792. +
  2793. + /* Read salt and IV */
  2794. + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
  2795. + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
  2796. + FIFOLD_TYPE_IV);
  2797. + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
  2798. + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
  2799. + }
  2800. +
  2801. /* assoclen + cryptlen = seqoutlen */
  2802. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  2803. @@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
  2804. /* Load nonce into CONTEXT1 reg */
  2805. if (is_rfc3686) {
  2806. - u8 *nonce = cdata->key_virt + cdata->keylen;
  2807. + const u8 *nonce = cdata->key_virt + cdata->keylen;
  2808. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  2809. LDST_CLASS_IND_CCB |
  2810. @@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
  2811. /* Load nonce into CONTEXT1 reg */
  2812. if (is_rfc3686) {
  2813. - u8 *nonce = cdata->key_virt + cdata->keylen;
  2814. + const u8 *nonce = cdata->key_virt + cdata->keylen;
  2815. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  2816. LDST_CLASS_IND_CCB |
  2817. @@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
  2818. /* Load Nonce into CONTEXT1 reg */
  2819. if (is_rfc3686) {
  2820. - u8 *nonce = cdata->key_virt + cdata->keylen;
  2821. + const u8 *nonce = cdata->key_virt + cdata->keylen;
  2822. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  2823. LDST_CLASS_IND_CCB |
  2824. --- a/drivers/crypto/caam/caamalg_desc.h
  2825. +++ b/drivers/crypto/caam/caamalg_desc.h
  2826. @@ -17,6 +17,9 @@
  2827. #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
  2828. #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
  2829. +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
  2830. +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
  2831. +
  2832. /* Note: Nonce is counted in cdata.keylen */
  2833. #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
  2834. @@ -27,14 +30,20 @@
  2835. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  2836. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  2837. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  2838. +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
  2839. +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
  2840. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  2841. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  2842. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  2843. +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
  2844. +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
  2845. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  2846. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  2847. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  2848. +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
  2849. +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
  2850. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  2851. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  2852. @@ -43,46 +52,62 @@
  2853. 15 * CAAM_CMD_SZ)
  2854. void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
  2855. - unsigned int icvsize);
  2856. + unsigned int icvsize, int era);
  2857. void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
  2858. - unsigned int icvsize);
  2859. + unsigned int icvsize, int era);
  2860. void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
  2861. struct alginfo *adata, unsigned int ivsize,
  2862. unsigned int icvsize, const bool is_rfc3686,
  2863. u32 *nonce, const u32 ctx1_iv_off,
  2864. - const bool is_qi);
  2865. + const bool is_qi, int era);
  2866. void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
  2867. struct alginfo *adata, unsigned int ivsize,
  2868. unsigned int icvsize, const bool geniv,
  2869. const bool is_rfc3686, u32 *nonce,
  2870. - const u32 ctx1_iv_off, const bool is_qi);
  2871. + const u32 ctx1_iv_off, const bool is_qi, int era);
  2872. void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
  2873. struct alginfo *adata, unsigned int ivsize,
  2874. unsigned int icvsize, const bool is_rfc3686,
  2875. u32 *nonce, const u32 ctx1_iv_off,
  2876. - const bool is_qi);
  2877. + const bool is_qi, int era);
  2878. +
  2879. +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
  2880. + struct alginfo *adata, unsigned int assoclen,
  2881. + unsigned int ivsize, unsigned int authsize,
  2882. + unsigned int blocksize, int era);
  2883. +
  2884. +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
  2885. + struct alginfo *adata, unsigned int assoclen,
  2886. + unsigned int ivsize, unsigned int authsize,
  2887. + unsigned int blocksize, int era);
  2888. void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
  2889. - unsigned int icvsize);
  2890. + unsigned int ivsize, unsigned int icvsize,
  2891. + const bool is_qi);
  2892. void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
  2893. - unsigned int icvsize);
  2894. + unsigned int ivsize, unsigned int icvsize,
  2895. + const bool is_qi);
  2896. void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
  2897. - unsigned int icvsize);
  2898. + unsigned int ivsize, unsigned int icvsize,
  2899. + const bool is_qi);
  2900. void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
  2901. - unsigned int icvsize);
  2902. + unsigned int ivsize, unsigned int icvsize,
  2903. + const bool is_qi);
  2904. void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
  2905. - unsigned int icvsize);
  2906. + unsigned int ivsize, unsigned int icvsize,
  2907. + const bool is_qi);
  2908. void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
  2909. - unsigned int icvsize);
  2910. + unsigned int ivsize, unsigned int icvsize,
  2911. + const bool is_qi);
  2912. void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
  2913. unsigned int ivsize, const bool is_rfc3686,
  2914. --- a/drivers/crypto/caam/caamalg_qi.c
  2915. +++ b/drivers/crypto/caam/caamalg_qi.c
  2916. @@ -7,7 +7,7 @@
  2917. */
  2918. #include "compat.h"
  2919. -
  2920. +#include "ctrl.h"
  2921. #include "regs.h"
  2922. #include "intern.h"
  2923. #include "desc_constr.h"
  2924. @@ -53,6 +53,7 @@ struct caam_ctx {
  2925. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  2926. u8 key[CAAM_MAX_KEY_SIZE];
  2927. dma_addr_t key_dma;
  2928. + enum dma_data_direction dir;
  2929. struct alginfo adata;
  2930. struct alginfo cdata;
  2931. unsigned int authsize;
  2932. @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
  2933. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  2934. OP_ALG_AAI_CTR_MOD128);
  2935. const bool is_rfc3686 = alg->caam.rfc3686;
  2936. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  2937. if (!ctx->cdata.keylen || !ctx->authsize)
  2938. return 0;
  2939. @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
  2940. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  2941. ivsize, ctx->authsize, is_rfc3686, nonce,
  2942. - ctx1_iv_off, true);
  2943. + ctx1_iv_off, true, ctrlpriv->era);
  2944. skip_enc:
  2945. /* aead_decrypt shared descriptor */
  2946. @@ -149,7 +151,8 @@ skip_enc:
  2947. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  2948. ivsize, ctx->authsize, alg->caam.geniv,
  2949. - is_rfc3686, nonce, ctx1_iv_off, true);
  2950. + is_rfc3686, nonce, ctx1_iv_off, true,
  2951. + ctrlpriv->era);
  2952. if (!alg->caam.geniv)
  2953. goto skip_givenc;
  2954. @@ -176,7 +179,7 @@ skip_enc:
  2955. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  2956. ivsize, ctx->authsize, is_rfc3686, nonce,
  2957. - ctx1_iv_off, true);
  2958. + ctx1_iv_off, true, ctrlpriv->era);
  2959. skip_givenc:
  2960. return 0;
  2961. @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
  2962. {
  2963. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2964. struct device *jrdev = ctx->jrdev;
  2965. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  2966. struct crypto_authenc_keys keys;
  2967. int ret = 0;
  2968. @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
  2969. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  2970. #endif
  2971. + /*
  2972. + * If DKP is supported, use it in the shared descriptor to generate
  2973. + * the split key.
  2974. + */
  2975. + if (ctrlpriv->era >= 6) {
  2976. + ctx->adata.keylen = keys.authkeylen;
  2977. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  2978. + OP_ALG_ALGSEL_MASK);
  2979. +
  2980. + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  2981. + goto badkey;
  2982. +
  2983. + memcpy(ctx->key, keys.authkey, keys.authkeylen);
  2984. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  2985. + keys.enckeylen);
  2986. + dma_sync_single_for_device(jrdev, ctx->key_dma,
  2987. + ctx->adata.keylen_pad +
  2988. + keys.enckeylen, ctx->dir);
  2989. + goto skip_split_key;
  2990. + }
  2991. +
  2992. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  2993. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  2994. keys.enckeylen);
  2995. @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
  2996. /* postpend encryption key to auth split key */
  2997. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  2998. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  2999. - keys.enckeylen, DMA_TO_DEVICE);
  3000. + keys.enckeylen, ctx->dir);
  3001. #ifdef DEBUG
  3002. print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  3003. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  3004. ctx->adata.keylen_pad + keys.enckeylen, 1);
  3005. #endif
  3006. +skip_split_key:
  3007. ctx->cdata.keylen = keys.enckeylen;
  3008. ret = aead_set_sh_desc(aead);
  3009. @@ -258,6 +284,468 @@ badkey:
  3010. return -EINVAL;
  3011. }
  3012. +static int tls_set_sh_desc(struct crypto_aead *tls)
  3013. +{
  3014. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  3015. + unsigned int ivsize = crypto_aead_ivsize(tls);
  3016. + unsigned int blocksize = crypto_aead_blocksize(tls);
  3017. + unsigned int assoclen = 13; /* always 13 bytes for TLS */
  3018. + unsigned int data_len[2];
  3019. + u32 inl_mask;
  3020. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  3021. +
  3022. + if (!ctx->cdata.keylen || !ctx->authsize)
  3023. + return 0;
  3024. +
  3025. + /*
  3026. + * TLS 1.0 encrypt shared descriptor
  3027. + * Job Descriptor and Shared Descriptor
  3028. + * must fit into the 64-word Descriptor h/w Buffer
  3029. + */
  3030. + data_len[0] = ctx->adata.keylen_pad;
  3031. + data_len[1] = ctx->cdata.keylen;
  3032. +
  3033. + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
  3034. + &inl_mask, ARRAY_SIZE(data_len)) < 0)
  3035. + return -EINVAL;
  3036. +
  3037. + if (inl_mask & 1)
  3038. + ctx->adata.key_virt = ctx->key;
  3039. + else
  3040. + ctx->adata.key_dma = ctx->key_dma;
  3041. +
  3042. + if (inl_mask & 2)
  3043. + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  3044. + else
  3045. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  3046. +
  3047. + ctx->adata.key_inline = !!(inl_mask & 1);
  3048. + ctx->cdata.key_inline = !!(inl_mask & 2);
  3049. +
  3050. + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  3051. + assoclen, ivsize, ctx->authsize, blocksize,
  3052. + ctrlpriv->era);
  3053. +
  3054. + /*
  3055. + * TLS 1.0 decrypt shared descriptor
  3056. + * Keys do not fit inline, regardless of algorithms used
  3057. + */
  3058. + ctx->adata.key_inline = false;
  3059. + ctx->adata.key_dma = ctx->key_dma;
  3060. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  3061. +
  3062. + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  3063. + assoclen, ivsize, ctx->authsize, blocksize,
  3064. + ctrlpriv->era);
  3065. +
  3066. + return 0;
  3067. +}
  3068. +
  3069. +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
  3070. +{
  3071. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  3072. +
  3073. + ctx->authsize = authsize;
  3074. + tls_set_sh_desc(tls);
  3075. +
  3076. + return 0;
  3077. +}
  3078. +
  3079. +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
  3080. + unsigned int keylen)
  3081. +{
  3082. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  3083. + struct device *jrdev = ctx->jrdev;
  3084. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  3085. + struct crypto_authenc_keys keys;
  3086. + int ret = 0;
  3087. +
  3088. + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  3089. + goto badkey;
  3090. +
  3091. +#ifdef DEBUG
  3092. + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  3093. + keys.authkeylen + keys.enckeylen, keys.enckeylen,
  3094. + keys.authkeylen);
  3095. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  3096. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  3097. +#endif
  3098. +
  3099. + /*
  3100. + * If DKP is supported, use it in the shared descriptor to generate
  3101. + * the split key.
  3102. + */
  3103. + if (ctrlpriv->era >= 6) {
  3104. + ctx->adata.keylen = keys.authkeylen;
  3105. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  3106. + OP_ALG_ALGSEL_MASK);
  3107. +
  3108. + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  3109. + goto badkey;
  3110. +
  3111. + memcpy(ctx->key, keys.authkey, keys.authkeylen);
  3112. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  3113. + keys.enckeylen);
  3114. + dma_sync_single_for_device(jrdev, ctx->key_dma,
  3115. + ctx->adata.keylen_pad +
  3116. + keys.enckeylen, ctx->dir);
  3117. + goto skip_split_key;
  3118. + }
  3119. +
  3120. + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  3121. + keys.authkeylen, CAAM_MAX_KEY_SIZE -
  3122. + keys.enckeylen);
  3123. + if (ret)
  3124. + goto badkey;
  3125. +
  3126. + /* postpend encryption key to auth split key */
  3127. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  3128. + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  3129. + keys.enckeylen, ctx->dir);
  3130. +
  3131. +#ifdef DEBUG
  3132. + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
  3133. + ctx->adata.keylen, ctx->adata.keylen_pad);
  3134. + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  3135. + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  3136. + ctx->adata.keylen_pad + keys.enckeylen, 1);
  3137. +#endif
  3138. +
  3139. +skip_split_key:
  3140. + ctx->cdata.keylen = keys.enckeylen;
  3141. +
  3142. + ret = tls_set_sh_desc(tls);
  3143. + if (ret)
  3144. + goto badkey;
  3145. +
  3146. + /* Now update the driver contexts with the new shared descriptor */
  3147. + if (ctx->drv_ctx[ENCRYPT]) {
  3148. + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  3149. + ctx->sh_desc_enc);
  3150. + if (ret) {
  3151. + dev_err(jrdev, "driver enc context update failed\n");
  3152. + goto badkey;
  3153. + }
  3154. + }
  3155. +
  3156. + if (ctx->drv_ctx[DECRYPT]) {
  3157. + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  3158. + ctx->sh_desc_dec);
  3159. + if (ret) {
  3160. + dev_err(jrdev, "driver dec context update failed\n");
  3161. + goto badkey;
  3162. + }
  3163. + }
  3164. +
  3165. + return ret;
  3166. +badkey:
  3167. + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3168. + return -EINVAL;
  3169. +}
  3170. +
  3171. +static int gcm_set_sh_desc(struct crypto_aead *aead)
  3172. +{
  3173. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3174. + unsigned int ivsize = crypto_aead_ivsize(aead);
  3175. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  3176. + ctx->cdata.keylen;
  3177. +
  3178. + if (!ctx->cdata.keylen || !ctx->authsize)
  3179. + return 0;
  3180. +
  3181. + /*
  3182. + * Job Descriptor and Shared Descriptor
  3183. + * must fit into the 64-word Descriptor h/w Buffer
  3184. + */
  3185. + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
  3186. + ctx->cdata.key_inline = true;
  3187. + ctx->cdata.key_virt = ctx->key;
  3188. + } else {
  3189. + ctx->cdata.key_inline = false;
  3190. + ctx->cdata.key_dma = ctx->key_dma;
  3191. + }
  3192. +
  3193. + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  3194. + ctx->authsize, true);
  3195. +
  3196. + /*
  3197. + * Job Descriptor and Shared Descriptor
  3198. + * must fit into the 64-word Descriptor h/w Buffer
  3199. + */
  3200. + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
  3201. + ctx->cdata.key_inline = true;
  3202. + ctx->cdata.key_virt = ctx->key;
  3203. + } else {
  3204. + ctx->cdata.key_inline = false;
  3205. + ctx->cdata.key_dma = ctx->key_dma;
  3206. + }
  3207. +
  3208. + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  3209. + ctx->authsize, true);
  3210. +
  3211. + return 0;
  3212. +}
  3213. +
  3214. +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  3215. +{
  3216. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  3217. +
  3218. + ctx->authsize = authsize;
  3219. + gcm_set_sh_desc(authenc);
  3220. +
  3221. + return 0;
  3222. +}
  3223. +
  3224. +static int gcm_setkey(struct crypto_aead *aead,
  3225. + const u8 *key, unsigned int keylen)
  3226. +{
  3227. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3228. + struct device *jrdev = ctx->jrdev;
  3229. + int ret;
  3230. +
  3231. +#ifdef DEBUG
  3232. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  3233. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  3234. +#endif
  3235. +
  3236. + memcpy(ctx->key, key, keylen);
  3237. + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  3238. + ctx->cdata.keylen = keylen;
  3239. +
  3240. + ret = gcm_set_sh_desc(aead);
  3241. + if (ret)
  3242. + return ret;
  3243. +
  3244. + /* Now update the driver contexts with the new shared descriptor */
  3245. + if (ctx->drv_ctx[ENCRYPT]) {
  3246. + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  3247. + ctx->sh_desc_enc);
  3248. + if (ret) {
  3249. + dev_err(jrdev, "driver enc context update failed\n");
  3250. + return ret;
  3251. + }
  3252. + }
  3253. +
  3254. + if (ctx->drv_ctx[DECRYPT]) {
  3255. + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  3256. + ctx->sh_desc_dec);
  3257. + if (ret) {
  3258. + dev_err(jrdev, "driver dec context update failed\n");
  3259. + return ret;
  3260. + }
  3261. + }
  3262. +
  3263. + return 0;
  3264. +}
  3265. +
  3266. +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  3267. +{
  3268. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3269. + unsigned int ivsize = crypto_aead_ivsize(aead);
  3270. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  3271. + ctx->cdata.keylen;
  3272. +
  3273. + if (!ctx->cdata.keylen || !ctx->authsize)
  3274. + return 0;
  3275. +
  3276. + ctx->cdata.key_virt = ctx->key;
  3277. +
  3278. + /*
  3279. + * Job Descriptor and Shared Descriptor
  3280. + * must fit into the 64-word Descriptor h/w Buffer
  3281. + */
  3282. + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
  3283. + ctx->cdata.key_inline = true;
  3284. + } else {
  3285. + ctx->cdata.key_inline = false;
  3286. + ctx->cdata.key_dma = ctx->key_dma;
  3287. + }
  3288. +
  3289. + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  3290. + ctx->authsize, true);
  3291. +
  3292. + /*
  3293. + * Job Descriptor and Shared Descriptor
  3294. + * must fit into the 64-word Descriptor h/w Buffer
  3295. + */
  3296. + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
  3297. + ctx->cdata.key_inline = true;
  3298. + } else {
  3299. + ctx->cdata.key_inline = false;
  3300. + ctx->cdata.key_dma = ctx->key_dma;
  3301. + }
  3302. +
  3303. + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  3304. + ctx->authsize, true);
  3305. +
  3306. + return 0;
  3307. +}
  3308. +
  3309. +static int rfc4106_setauthsize(struct crypto_aead *authenc,
  3310. + unsigned int authsize)
  3311. +{
  3312. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  3313. +
  3314. + ctx->authsize = authsize;
  3315. + rfc4106_set_sh_desc(authenc);
  3316. +
  3317. + return 0;
  3318. +}
  3319. +
  3320. +static int rfc4106_setkey(struct crypto_aead *aead,
  3321. + const u8 *key, unsigned int keylen)
  3322. +{
  3323. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3324. + struct device *jrdev = ctx->jrdev;
  3325. + int ret;
  3326. +
  3327. + if (keylen < 4)
  3328. + return -EINVAL;
  3329. +
  3330. +#ifdef DEBUG
  3331. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  3332. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  3333. +#endif
  3334. +
  3335. + memcpy(ctx->key, key, keylen);
  3336. + /*
  3337. + * The last four bytes of the key material are used as the salt value
  3338. + * in the nonce. Update the AES key length.
  3339. + */
  3340. + ctx->cdata.keylen = keylen - 4;
  3341. + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  3342. + ctx->dir);
  3343. +
  3344. + ret = rfc4106_set_sh_desc(aead);
  3345. + if (ret)
  3346. + return ret;
  3347. +
  3348. + /* Now update the driver contexts with the new shared descriptor */
  3349. + if (ctx->drv_ctx[ENCRYPT]) {
  3350. + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  3351. + ctx->sh_desc_enc);
  3352. + if (ret) {
  3353. + dev_err(jrdev, "driver enc context update failed\n");
  3354. + return ret;
  3355. + }
  3356. + }
  3357. +
  3358. + if (ctx->drv_ctx[DECRYPT]) {
  3359. + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  3360. + ctx->sh_desc_dec);
  3361. + if (ret) {
  3362. + dev_err(jrdev, "driver dec context update failed\n");
  3363. + return ret;
  3364. + }
  3365. + }
  3366. +
  3367. + return 0;
  3368. +}
  3369. +
  3370. +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  3371. +{
  3372. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3373. + unsigned int ivsize = crypto_aead_ivsize(aead);
  3374. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  3375. + ctx->cdata.keylen;
  3376. +
  3377. + if (!ctx->cdata.keylen || !ctx->authsize)
  3378. + return 0;
  3379. +
  3380. + ctx->cdata.key_virt = ctx->key;
  3381. +
  3382. + /*
  3383. + * Job Descriptor and Shared Descriptor
  3384. + * must fit into the 64-word Descriptor h/w Buffer
  3385. + */
  3386. + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
  3387. + ctx->cdata.key_inline = true;
  3388. + } else {
  3389. + ctx->cdata.key_inline = false;
  3390. + ctx->cdata.key_dma = ctx->key_dma;
  3391. + }
  3392. +
  3393. + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  3394. + ctx->authsize, true);
  3395. +
  3396. + /*
  3397. + * Job Descriptor and Shared Descriptor
  3398. + * must fit into the 64-word Descriptor h/w Buffer
  3399. + */
  3400. + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
  3401. + ctx->cdata.key_inline = true;
  3402. + } else {
  3403. + ctx->cdata.key_inline = false;
  3404. + ctx->cdata.key_dma = ctx->key_dma;
  3405. + }
  3406. +
  3407. + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  3408. + ctx->authsize, true);
  3409. +
  3410. + return 0;
  3411. +}
  3412. +
  3413. +static int rfc4543_setauthsize(struct crypto_aead *authenc,
  3414. + unsigned int authsize)
  3415. +{
  3416. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  3417. +
  3418. + ctx->authsize = authsize;
  3419. + rfc4543_set_sh_desc(authenc);
  3420. +
  3421. + return 0;
  3422. +}
  3423. +
  3424. +static int rfc4543_setkey(struct crypto_aead *aead,
  3425. + const u8 *key, unsigned int keylen)
  3426. +{
  3427. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3428. + struct device *jrdev = ctx->jrdev;
  3429. + int ret;
  3430. +
  3431. + if (keylen < 4)
  3432. + return -EINVAL;
  3433. +
  3434. +#ifdef DEBUG
  3435. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  3436. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  3437. +#endif
  3438. +
  3439. + memcpy(ctx->key, key, keylen);
  3440. + /*
  3441. + * The last four bytes of the key material are used as the salt value
  3442. + * in the nonce. Update the AES key length.
  3443. + */
  3444. + ctx->cdata.keylen = keylen - 4;
  3445. + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  3446. + ctx->dir);
  3447. +
  3448. + ret = rfc4543_set_sh_desc(aead);
  3449. + if (ret)
  3450. + return ret;
  3451. +
  3452. + /* Now update the driver contexts with the new shared descriptor */
  3453. + if (ctx->drv_ctx[ENCRYPT]) {
  3454. + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  3455. + ctx->sh_desc_enc);
  3456. + if (ret) {
  3457. + dev_err(jrdev, "driver enc context update failed\n");
  3458. + return ret;
  3459. + }
  3460. + }
  3461. +
  3462. + if (ctx->drv_ctx[DECRYPT]) {
  3463. + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  3464. + ctx->sh_desc_dec);
  3465. + if (ret) {
  3466. + dev_err(jrdev, "driver dec context update failed\n");
  3467. + return ret;
  3468. + }
  3469. + }
  3470. +
  3471. + return 0;
  3472. +}
  3473. +
  3474. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  3475. const u8 *key, unsigned int keylen)
  3476. {
  3477. @@ -414,6 +902,29 @@ struct aead_edesc {
  3478. };
  3479. /*
  3480. + * tls_edesc - s/w-extended tls descriptor
  3481. + * @src_nents: number of segments in input scatterlist
  3482. + * @dst_nents: number of segments in output scatterlist
  3483. + * @iv_dma: dma address of iv for checking continuity and link table
  3484. + * @qm_sg_bytes: length of dma mapped h/w link table
  3485. + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
  3486. + * @qm_sg_dma: bus physical mapped address of h/w link table
  3487. + * @drv_req: driver-specific request structure
  3488. + * @sgt: the h/w link table, followed by IV
  3489. + */
  3490. +struct tls_edesc {
  3491. + int src_nents;
  3492. + int dst_nents;
  3493. + dma_addr_t iv_dma;
  3494. + int qm_sg_bytes;
  3495. + dma_addr_t qm_sg_dma;
  3496. + struct scatterlist tmp[2];
  3497. + struct scatterlist *dst;
  3498. + struct caam_drv_req drv_req;
  3499. + struct qm_sg_entry sgt[0];
  3500. +};
  3501. +
  3502. +/*
  3503. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  3504. * @src_nents: number of segments in input scatterlist
  3505. * @dst_nents: number of segments in output scatterlist
  3506. @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
  3507. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  3508. }
  3509. +static void tls_unmap(struct device *dev,
  3510. + struct tls_edesc *edesc,
  3511. + struct aead_request *req)
  3512. +{
  3513. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  3514. + int ivsize = crypto_aead_ivsize(aead);
  3515. +
  3516. + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
  3517. + edesc->dst_nents, edesc->iv_dma, ivsize,
  3518. + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
  3519. + edesc->qm_sg_bytes);
  3520. +}
  3521. +
  3522. static void ablkcipher_unmap(struct device *dev,
  3523. struct ablkcipher_edesc *edesc,
  3524. struct ablkcipher_request *req)
  3525. @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
  3526. qidev = caam_ctx->qidev;
  3527. if (unlikely(status)) {
  3528. + u32 ssrc = status & JRSTA_SSRC_MASK;
  3529. + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
  3530. +
  3531. caam_jr_strstatus(qidev, status);
  3532. - ecode = -EIO;
  3533. + /*
  3534. + * verify hw auth check passed else return -EBADMSG
  3535. + */
  3536. + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
  3537. + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
  3538. + ecode = -EBADMSG;
  3539. + else
  3540. + ecode = -EIO;
  3541. }
  3542. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  3543. @@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ
  3544. return aead_crypt(req, false);
  3545. }
  3546. +static int ipsec_gcm_encrypt(struct aead_request *req)
  3547. +{
  3548. + if (req->assoclen < 8)
  3549. + return -EINVAL;
  3550. +
  3551. + return aead_crypt(req, true);
  3552. +}
  3553. +
  3554. +static int ipsec_gcm_decrypt(struct aead_request *req)
  3555. +{
  3556. + if (req->assoclen < 8)
  3557. + return -EINVAL;
  3558. +
  3559. + return aead_crypt(req, false);
  3560. +}
  3561. +
  3562. +static void tls_done(struct caam_drv_req *drv_req, u32 status)
  3563. +{
  3564. + struct device *qidev;
  3565. + struct tls_edesc *edesc;
  3566. + struct aead_request *aead_req = drv_req->app_ctx;
  3567. + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  3568. + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
  3569. + int ecode = 0;
  3570. +
  3571. + qidev = caam_ctx->qidev;
  3572. +
  3573. + if (unlikely(status)) {
  3574. + caam_jr_strstatus(qidev, status);
  3575. + ecode = -EIO;
  3576. + }
  3577. +
  3578. + edesc = container_of(drv_req, typeof(*edesc), drv_req);
  3579. + tls_unmap(qidev, edesc, aead_req);
  3580. +
  3581. + aead_request_complete(aead_req, ecode);
  3582. + qi_cache_free(edesc);
  3583. +}
  3584. +
  3585. +/*
  3586. + * allocate and map the tls extended descriptor
  3587. + */
  3588. +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
  3589. +{
  3590. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  3591. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3592. + unsigned int blocksize = crypto_aead_blocksize(aead);
  3593. + unsigned int padsize, authsize;
  3594. + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  3595. + typeof(*alg), aead);
  3596. + struct device *qidev = ctx->qidev;
  3597. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  3598. + GFP_KERNEL : GFP_ATOMIC;
  3599. + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  3600. + struct tls_edesc *edesc;
  3601. + dma_addr_t qm_sg_dma, iv_dma = 0;
  3602. + int ivsize = 0;
  3603. + u8 *iv;
  3604. + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
  3605. + int in_len, out_len;
  3606. + struct qm_sg_entry *sg_table, *fd_sgt;
  3607. + struct caam_drv_ctx *drv_ctx;
  3608. + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  3609. + struct scatterlist *dst;
  3610. +
  3611. + if (encrypt) {
  3612. + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
  3613. + blocksize);
  3614. + authsize = ctx->authsize + padsize;
  3615. + } else {
  3616. + authsize = ctx->authsize;
  3617. + }
  3618. +
  3619. + drv_ctx = get_drv_ctx(ctx, op_type);
  3620. + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  3621. + return (struct tls_edesc *)drv_ctx;
  3622. +
  3623. + /* allocate space for base edesc, link tables and IV */
  3624. + edesc = qi_cache_alloc(GFP_DMA | flags);
  3625. + if (unlikely(!edesc)) {
  3626. + dev_err(qidev, "could not allocate extended descriptor\n");
  3627. + return ERR_PTR(-ENOMEM);
  3628. + }
  3629. +
  3630. + if (likely(req->src == req->dst)) {
  3631. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  3632. + req->cryptlen +
  3633. + (encrypt ? authsize : 0));
  3634. + if (unlikely(src_nents < 0)) {
  3635. + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  3636. + req->assoclen + req->cryptlen +
  3637. + (encrypt ? authsize : 0));
  3638. + qi_cache_free(edesc);
  3639. + return ERR_PTR(src_nents);
  3640. + }
  3641. +
  3642. + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  3643. + DMA_BIDIRECTIONAL);
  3644. + if (unlikely(!mapped_src_nents)) {
  3645. + dev_err(qidev, "unable to map source\n");
  3646. + qi_cache_free(edesc);
  3647. + return ERR_PTR(-ENOMEM);
  3648. + }
  3649. + dst = req->dst;
  3650. + } else {
  3651. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  3652. + req->cryptlen);
  3653. + if (unlikely(src_nents < 0)) {
  3654. + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  3655. + req->assoclen + req->cryptlen);
  3656. + qi_cache_free(edesc);
  3657. + return ERR_PTR(src_nents);
  3658. + }
  3659. +
  3660. + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
  3661. + dst_nents = sg_nents_for_len(dst, req->cryptlen +
  3662. + (encrypt ? authsize : 0));
  3663. + if (unlikely(dst_nents < 0)) {
  3664. + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  3665. + req->cryptlen +
  3666. + (encrypt ? authsize : 0));
  3667. + qi_cache_free(edesc);
  3668. + return ERR_PTR(dst_nents);
  3669. + }
  3670. +
  3671. + if (src_nents) {
  3672. + mapped_src_nents = dma_map_sg(qidev, req->src,
  3673. + src_nents, DMA_TO_DEVICE);
  3674. + if (unlikely(!mapped_src_nents)) {
  3675. + dev_err(qidev, "unable to map source\n");
  3676. + qi_cache_free(edesc);
  3677. + return ERR_PTR(-ENOMEM);
  3678. + }
  3679. + } else {
  3680. + mapped_src_nents = 0;
  3681. + }
  3682. +
  3683. + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
  3684. + DMA_FROM_DEVICE);
  3685. + if (unlikely(!mapped_dst_nents)) {
  3686. + dev_err(qidev, "unable to map destination\n");
  3687. + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  3688. + qi_cache_free(edesc);
  3689. + return ERR_PTR(-ENOMEM);
  3690. + }
  3691. + }
  3692. +
  3693. + /*
  3694. + * Create S/G table: IV, src, dst.
  3695. + * Input is not contiguous.
  3696. + */
  3697. + qm_sg_ents = 1 + mapped_src_nents +
  3698. + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  3699. + sg_table = &edesc->sgt[0];
  3700. + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  3701. +
  3702. + ivsize = crypto_aead_ivsize(aead);
  3703. + iv = (u8 *)(sg_table + qm_sg_ents);
  3704. + /* Make sure IV is located in a DMAable area */
  3705. + memcpy(iv, req->iv, ivsize);
  3706. + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  3707. + if (dma_mapping_error(qidev, iv_dma)) {
  3708. + dev_err(qidev, "unable to map IV\n");
  3709. + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
  3710. + 0, 0);
  3711. + qi_cache_free(edesc);
  3712. + return ERR_PTR(-ENOMEM);
  3713. + }
  3714. +
  3715. + edesc->src_nents = src_nents;
  3716. + edesc->dst_nents = dst_nents;
  3717. + edesc->dst = dst;
  3718. + edesc->iv_dma = iv_dma;
  3719. + edesc->drv_req.app_ctx = req;
  3720. + edesc->drv_req.cbk = tls_done;
  3721. + edesc->drv_req.drv_ctx = drv_ctx;
  3722. +
  3723. + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  3724. + qm_sg_index = 1;
  3725. +
  3726. + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  3727. + qm_sg_index += mapped_src_nents;
  3728. +
  3729. + if (mapped_dst_nents > 1)
  3730. + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
  3731. + qm_sg_index, 0);
  3732. +
  3733. + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  3734. + if (dma_mapping_error(qidev, qm_sg_dma)) {
  3735. + dev_err(qidev, "unable to map S/G table\n");
  3736. + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
  3737. + ivsize, op_type, 0, 0);
  3738. + qi_cache_free(edesc);
  3739. + return ERR_PTR(-ENOMEM);
  3740. + }
  3741. +
  3742. + edesc->qm_sg_dma = qm_sg_dma;
  3743. + edesc->qm_sg_bytes = qm_sg_bytes;
  3744. +
  3745. + out_len = req->cryptlen + (encrypt ? authsize : 0);
  3746. + in_len = ivsize + req->assoclen + req->cryptlen;
  3747. +
  3748. + fd_sgt = &edesc->drv_req.fd_sgt[0];
  3749. +
  3750. + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  3751. +
  3752. + if (req->dst == req->src)
  3753. + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  3754. + (sg_nents_for_len(req->src, req->assoclen) +
  3755. + 1) * sizeof(*sg_table), out_len, 0);
  3756. + else if (mapped_dst_nents == 1)
  3757. + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
  3758. + else
  3759. + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  3760. + qm_sg_index, out_len, 0);
  3761. +
  3762. + return edesc;
  3763. +}
  3764. +
  3765. +static int tls_crypt(struct aead_request *req, bool encrypt)
  3766. +{
  3767. + struct tls_edesc *edesc;
  3768. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  3769. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  3770. + int ret;
  3771. +
  3772. + if (unlikely(caam_congested))
  3773. + return -EAGAIN;
  3774. +
  3775. + edesc = tls_edesc_alloc(req, encrypt);
  3776. + if (IS_ERR_OR_NULL(edesc))
  3777. + return PTR_ERR(edesc);
  3778. +
  3779. + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  3780. + if (!ret) {
  3781. + ret = -EINPROGRESS;
  3782. + } else {
  3783. + tls_unmap(ctx->qidev, edesc, req);
  3784. + qi_cache_free(edesc);
  3785. + }
  3786. +
  3787. + return ret;
  3788. +}
  3789. +
  3790. +static int tls_encrypt(struct aead_request *req)
  3791. +{
  3792. + return tls_crypt(req, true);
  3793. +}
  3794. +
  3795. +static int tls_decrypt(struct aead_request *req)
  3796. +{
  3797. + return tls_crypt(req, false);
  3798. +}
  3799. +
  3800. static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
  3801. {
  3802. struct ablkcipher_edesc *edesc;
  3803. @@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a
  3804. };
  3805. static struct caam_aead_alg driver_aeads[] = {
  3806. + {
  3807. + .aead = {
  3808. + .base = {
  3809. + .cra_name = "rfc4106(gcm(aes))",
  3810. + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
  3811. + .cra_blocksize = 1,
  3812. + },
  3813. + .setkey = rfc4106_setkey,
  3814. + .setauthsize = rfc4106_setauthsize,
  3815. + .encrypt = ipsec_gcm_encrypt,
  3816. + .decrypt = ipsec_gcm_decrypt,
  3817. + .ivsize = 8,
  3818. + .maxauthsize = AES_BLOCK_SIZE,
  3819. + },
  3820. + .caam = {
  3821. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3822. + },
  3823. + },
  3824. + {
  3825. + .aead = {
  3826. + .base = {
  3827. + .cra_name = "rfc4543(gcm(aes))",
  3828. + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
  3829. + .cra_blocksize = 1,
  3830. + },
  3831. + .setkey = rfc4543_setkey,
  3832. + .setauthsize = rfc4543_setauthsize,
  3833. + .encrypt = ipsec_gcm_encrypt,
  3834. + .decrypt = ipsec_gcm_decrypt,
  3835. + .ivsize = 8,
  3836. + .maxauthsize = AES_BLOCK_SIZE,
  3837. + },
  3838. + .caam = {
  3839. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3840. + },
  3841. + },
  3842. + /* Galois Counter Mode */
  3843. + {
  3844. + .aead = {
  3845. + .base = {
  3846. + .cra_name = "gcm(aes)",
  3847. + .cra_driver_name = "gcm-aes-caam-qi",
  3848. + .cra_blocksize = 1,
  3849. + },
  3850. + .setkey = gcm_setkey,
  3851. + .setauthsize = gcm_setauthsize,
  3852. + .encrypt = aead_encrypt,
  3853. + .decrypt = aead_decrypt,
  3854. + .ivsize = 12,
  3855. + .maxauthsize = AES_BLOCK_SIZE,
  3856. + },
  3857. + .caam = {
  3858. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3859. + }
  3860. + },
  3861. /* single-pass ipsec_esp descriptor */
  3862. {
  3863. .aead = {
  3864. @@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads
  3865. .geniv = true,
  3866. }
  3867. },
  3868. + {
  3869. + .aead = {
  3870. + .base = {
  3871. + .cra_name = "tls10(hmac(sha1),cbc(aes))",
  3872. + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
  3873. + .cra_blocksize = AES_BLOCK_SIZE,
  3874. + },
  3875. + .setkey = tls_setkey,
  3876. + .setauthsize = tls_setauthsize,
  3877. + .encrypt = tls_encrypt,
  3878. + .decrypt = tls_decrypt,
  3879. + .ivsize = AES_BLOCK_SIZE,
  3880. + .maxauthsize = SHA1_DIGEST_SIZE,
  3881. + },
  3882. + .caam = {
  3883. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3884. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3885. + OP_ALG_AAI_HMAC_PRECOMP,
  3886. + }
  3887. + }
  3888. };
  3889. struct caam_crypto_alg {
  3890. @@ -2126,9 +2989,20 @@ struct caam_crypto_alg {
  3891. struct caam_alg_entry caam;
  3892. };
  3893. -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3894. +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  3895. + bool uses_dkp)
  3896. {
  3897. struct caam_drv_private *priv;
  3898. + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
  3899. + static const u8 digest_size[] = {
  3900. + MD5_DIGEST_SIZE,
  3901. + SHA1_DIGEST_SIZE,
  3902. + SHA224_DIGEST_SIZE,
  3903. + SHA256_DIGEST_SIZE,
  3904. + SHA384_DIGEST_SIZE,
  3905. + SHA512_DIGEST_SIZE
  3906. + };
  3907. + u8 op_id;
  3908. /*
  3909. * distribute tfms across job rings to ensure in-order
  3910. @@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_
  3911. return PTR_ERR(ctx->jrdev);
  3912. }
  3913. + priv = dev_get_drvdata(ctx->jrdev->parent);
  3914. + if (priv->era >= 6 && uses_dkp)
  3915. + ctx->dir = DMA_BIDIRECTIONAL;
  3916. + else
  3917. + ctx->dir = DMA_TO_DEVICE;
  3918. +
  3919. ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
  3920. - DMA_TO_DEVICE);
  3921. + ctx->dir);
  3922. if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
  3923. dev_err(ctx->jrdev, "unable to map key\n");
  3924. caam_jr_free(ctx->jrdev);
  3925. @@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_
  3926. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3927. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3928. - priv = dev_get_drvdata(ctx->jrdev->parent);
  3929. + if (ctx->adata.algtype) {
  3930. + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
  3931. + >> OP_ALG_ALGSEL_SHIFT;
  3932. + if (op_id < ARRAY_SIZE(digest_size)) {
  3933. + ctx->authsize = digest_size[op_id];
  3934. + } else {
  3935. + dev_err(ctx->jrdev,
  3936. + "incorrect op_id %d; must be less than %zu\n",
  3937. + op_id, ARRAY_SIZE(digest_size));
  3938. + caam_jr_free(ctx->jrdev);
  3939. + return -EINVAL;
  3940. + }
  3941. + } else {
  3942. + ctx->authsize = 0;
  3943. + }
  3944. +
  3945. ctx->qidev = priv->qidev;
  3946. spin_lock_init(&ctx->lock);
  3947. @@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t
  3948. crypto_alg);
  3949. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3950. - return caam_init_common(ctx, &caam_alg->caam);
  3951. + return caam_init_common(ctx, &caam_alg->caam, false);
  3952. }
  3953. static int caam_aead_init(struct crypto_aead *tfm)
  3954. @@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_
  3955. aead);
  3956. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3957. - return caam_init_common(ctx, &caam_alg->caam);
  3958. + return caam_init_common(ctx, &caam_alg->caam,
  3959. + (alg->setkey == aead_setkey) ||
  3960. + (alg->setkey == tls_setkey));
  3961. }
  3962. static void caam_exit_common(struct caam_ctx *ctx)
  3963. @@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam
  3964. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  3965. caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
  3966. - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
  3967. - DMA_TO_DEVICE);
  3968. + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
  3969. caam_jr_free(ctx->jrdev);
  3970. }
  3971. @@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo
  3972. if (!priv || !priv->qi_present)
  3973. return -ENODEV;
  3974. + if (caam_dpaa2) {
  3975. + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
  3976. + return -ENODEV;
  3977. + }
  3978. +
  3979. INIT_LIST_HEAD(&alg_list);
  3980. /*
  3981. --- /dev/null
  3982. +++ b/drivers/crypto/caam/caamalg_qi2.c
  3983. @@ -0,0 +1,5691 @@
  3984. +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  3985. +/*
  3986. + * Copyright 2015-2016 Freescale Semiconductor Inc.
  3987. + * Copyright 2017-2018 NXP
  3988. + */
  3989. +
  3990. +#include <linux/fsl/mc.h>
  3991. +#include "compat.h"
  3992. +#include "regs.h"
  3993. +#include "caamalg_qi2.h"
  3994. +#include "dpseci_cmd.h"
  3995. +#include "desc_constr.h"
  3996. +#include "error.h"
  3997. +#include "sg_sw_sec4.h"
  3998. +#include "sg_sw_qm2.h"
  3999. +#include "key_gen.h"
  4000. +#include "caamalg_desc.h"
  4001. +#include "caamhash_desc.h"
  4002. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
  4003. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
  4004. +
  4005. +#define CAAM_CRA_PRIORITY 2000
  4006. +
  4007. +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  4008. +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
  4009. + SHA512_DIGEST_SIZE * 2)
  4010. +
  4011. +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
  4012. +bool caam_little_end;
  4013. +EXPORT_SYMBOL(caam_little_end);
  4014. +bool caam_imx;
  4015. +EXPORT_SYMBOL(caam_imx);
  4016. +#endif
  4017. +
  4018. +/*
  4019. + * This is a a cache of buffers, from which the users of CAAM QI driver
  4020. + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
  4021. + * NOTE: A more elegant solution would be to have some headroom in the frames
  4022. + * being processed. This can be added by the dpaa2-eth driver. This would
  4023. + * pose a problem for userspace application processing which cannot
  4024. + * know of this limitation. So for now, this will work.
  4025. + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
  4026. + */
  4027. +static struct kmem_cache *qi_cache;
  4028. +
  4029. +struct caam_alg_entry {
  4030. + struct device *dev;
  4031. + int class1_alg_type;
  4032. + int class2_alg_type;
  4033. + bool rfc3686;
  4034. + bool geniv;
  4035. +};
  4036. +
  4037. +struct caam_aead_alg {
  4038. + struct aead_alg aead;
  4039. + struct caam_alg_entry caam;
  4040. + bool registered;
  4041. +};
  4042. +
  4043. +struct caam_skcipher_alg {
  4044. + struct skcipher_alg skcipher;
  4045. + struct caam_alg_entry caam;
  4046. + bool registered;
  4047. +};
  4048. +
  4049. +/**
  4050. + * caam_ctx - per-session context
  4051. + * @flc: Flow Contexts array
  4052. + * @key: virtual address of the key(s): [authentication key], encryption key
  4053. + * @flc_dma: I/O virtual addresses of the Flow Contexts
  4054. + * @key_dma: I/O virtual address of the key
  4055. + * @dir: DMA direction for mapping key and Flow Contexts
  4056. + * @dev: dpseci device
  4057. + * @adata: authentication algorithm details
  4058. + * @cdata: encryption algorithm details
  4059. + * @authsize: authentication tag (a.k.a. ICV / MAC) size
  4060. + */
  4061. +struct caam_ctx {
  4062. + struct caam_flc flc[NUM_OP];
  4063. + u8 key[CAAM_MAX_KEY_SIZE];
  4064. + dma_addr_t flc_dma[NUM_OP];
  4065. + dma_addr_t key_dma;
  4066. + enum dma_data_direction dir;
  4067. + struct device *dev;
  4068. + struct alginfo adata;
  4069. + struct alginfo cdata;
  4070. + unsigned int authsize;
  4071. +};
  4072. +
  4073. +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
  4074. + dma_addr_t iova_addr)
  4075. +{
  4076. + phys_addr_t phys_addr;
  4077. +
  4078. + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
  4079. + iova_addr;
  4080. +
  4081. + return phys_to_virt(phys_addr);
  4082. +}
  4083. +
  4084. +/*
  4085. + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
  4086. + *
  4087. + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
  4088. + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
  4089. + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
  4090. + * hosting 16 SG entries.
  4091. + *
  4092. + * @flags - flags that would be used for the equivalent kmalloc(..) call
  4093. + *
  4094. + * Returns a pointer to a retrieved buffer on success or NULL on failure.
  4095. + */
  4096. +static inline void *qi_cache_zalloc(gfp_t flags)
  4097. +{
  4098. + return kmem_cache_zalloc(qi_cache, flags);
  4099. +}
  4100. +
  4101. +/*
  4102. + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
  4103. + *
  4104. + * @obj - buffer previously allocated by qi_cache_zalloc
  4105. + *
  4106. + * No checking is being done, the call is a passthrough call to
  4107. + * kmem_cache_free(...)
  4108. + */
  4109. +static inline void qi_cache_free(void *obj)
  4110. +{
  4111. + kmem_cache_free(qi_cache, obj);
  4112. +}
  4113. +
  4114. +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
  4115. +{
  4116. + switch (crypto_tfm_alg_type(areq->tfm)) {
  4117. + case CRYPTO_ALG_TYPE_SKCIPHER:
  4118. + return skcipher_request_ctx(skcipher_request_cast(areq));
  4119. + case CRYPTO_ALG_TYPE_AEAD:
  4120. + return aead_request_ctx(container_of(areq, struct aead_request,
  4121. + base));
  4122. + case CRYPTO_ALG_TYPE_AHASH:
  4123. + return ahash_request_ctx(ahash_request_cast(areq));
  4124. + default:
  4125. + return ERR_PTR(-EINVAL);
  4126. + }
  4127. +}
  4128. +
  4129. +static void caam_unmap(struct device *dev, struct scatterlist *src,
  4130. + struct scatterlist *dst, int src_nents,
  4131. + int dst_nents, dma_addr_t iv_dma, int ivsize,
  4132. + dma_addr_t qm_sg_dma, int qm_sg_bytes)
  4133. +{
  4134. + if (dst != src) {
  4135. + if (src_nents)
  4136. + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  4137. + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  4138. + } else {
  4139. + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  4140. + }
  4141. +
  4142. + if (iv_dma)
  4143. + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  4144. +
  4145. + if (qm_sg_bytes)
  4146. + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  4147. +}
  4148. +
  4149. +static int aead_set_sh_desc(struct crypto_aead *aead)
  4150. +{
  4151. + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  4152. + typeof(*alg), aead);
  4153. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4154. + unsigned int ivsize = crypto_aead_ivsize(aead);
  4155. + struct device *dev = ctx->dev;
  4156. + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
  4157. + struct caam_flc *flc;
  4158. + u32 *desc;
  4159. + u32 ctx1_iv_off = 0;
  4160. + u32 *nonce = NULL;
  4161. + unsigned int data_len[2];
  4162. + u32 inl_mask;
  4163. + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  4164. + OP_ALG_AAI_CTR_MOD128);
  4165. + const bool is_rfc3686 = alg->caam.rfc3686;
  4166. +
  4167. + if (!ctx->cdata.keylen || !ctx->authsize)
  4168. + return 0;
  4169. +
  4170. + /*
  4171. + * AES-CTR needs to load IV in CONTEXT1 reg
  4172. + * at an offset of 128bits (16bytes)
  4173. + * CONTEXT1[255:128] = IV
  4174. + */
  4175. + if (ctr_mode)
  4176. + ctx1_iv_off = 16;
  4177. +
  4178. + /*
  4179. + * RFC3686 specific:
  4180. + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  4181. + */
  4182. + if (is_rfc3686) {
  4183. + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  4184. + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  4185. + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  4186. + }
  4187. +
  4188. + data_len[0] = ctx->adata.keylen_pad;
  4189. + data_len[1] = ctx->cdata.keylen;
  4190. +
  4191. + /* aead_encrypt shared descriptor */
  4192. + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
  4193. + DESC_QI_AEAD_ENC_LEN) +
  4194. + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  4195. + DESC_JOB_IO_LEN, data_len, &inl_mask,
  4196. + ARRAY_SIZE(data_len)) < 0)
  4197. + return -EINVAL;
  4198. +
  4199. + if (inl_mask & 1)
  4200. + ctx->adata.key_virt = ctx->key;
  4201. + else
  4202. + ctx->adata.key_dma = ctx->key_dma;
  4203. +
  4204. + if (inl_mask & 2)
  4205. + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  4206. + else
  4207. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  4208. +
  4209. + ctx->adata.key_inline = !!(inl_mask & 1);
  4210. + ctx->cdata.key_inline = !!(inl_mask & 2);
  4211. +
  4212. + flc = &ctx->flc[ENCRYPT];
  4213. + desc = flc->sh_desc;
  4214. +
  4215. + if (alg->caam.geniv)
  4216. + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
  4217. + ivsize, ctx->authsize, is_rfc3686,
  4218. + nonce, ctx1_iv_off, true,
  4219. + priv->sec_attr.era);
  4220. + else
  4221. + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
  4222. + ivsize, ctx->authsize, is_rfc3686, nonce,
  4223. + ctx1_iv_off, true, priv->sec_attr.era);
  4224. +
  4225. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4226. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  4227. + sizeof(flc->flc) + desc_bytes(desc),
  4228. + ctx->dir);
  4229. +
  4230. + /* aead_decrypt shared descriptor */
  4231. + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  4232. + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  4233. + DESC_JOB_IO_LEN, data_len, &inl_mask,
  4234. + ARRAY_SIZE(data_len)) < 0)
  4235. + return -EINVAL;
  4236. +
  4237. + if (inl_mask & 1)
  4238. + ctx->adata.key_virt = ctx->key;
  4239. + else
  4240. + ctx->adata.key_dma = ctx->key_dma;
  4241. +
  4242. + if (inl_mask & 2)
  4243. + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  4244. + else
  4245. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  4246. +
  4247. + ctx->adata.key_inline = !!(inl_mask & 1);
  4248. + ctx->cdata.key_inline = !!(inl_mask & 2);
  4249. +
  4250. + flc = &ctx->flc[DECRYPT];
  4251. + desc = flc->sh_desc;
  4252. + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
  4253. + ivsize, ctx->authsize, alg->caam.geniv,
  4254. + is_rfc3686, nonce, ctx1_iv_off, true,
  4255. + priv->sec_attr.era);
  4256. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4257. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  4258. + sizeof(flc->flc) + desc_bytes(desc),
  4259. + ctx->dir);
  4260. +
  4261. + return 0;
  4262. +}
  4263. +
  4264. +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  4265. +{
  4266. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  4267. +
  4268. + ctx->authsize = authsize;
  4269. + aead_set_sh_desc(authenc);
  4270. +
  4271. + return 0;
  4272. +}
  4273. +
  4274. +struct split_key_sh_result {
  4275. + struct completion completion;
  4276. + int err;
  4277. + struct device *dev;
  4278. +};
  4279. +
  4280. +static void split_key_sh_done(void *cbk_ctx, u32 err)
  4281. +{
  4282. + struct split_key_sh_result *res = cbk_ctx;
  4283. +
  4284. +#ifdef DEBUG
  4285. + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  4286. +#endif
  4287. +
  4288. + if (err)
  4289. + caam_qi2_strstatus(res->dev, err);
  4290. +
  4291. + res->err = err;
  4292. + complete(&res->completion);
  4293. +}
  4294. +
  4295. +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  4296. + unsigned int keylen)
  4297. +{
  4298. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4299. + struct device *dev = ctx->dev;
  4300. + struct crypto_authenc_keys keys;
  4301. +
  4302. + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  4303. + goto badkey;
  4304. +
  4305. +#ifdef DEBUG
  4306. + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
  4307. + keys.authkeylen + keys.enckeylen, keys.enckeylen,
  4308. + keys.authkeylen);
  4309. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  4310. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  4311. +#endif
  4312. +
  4313. + ctx->adata.keylen = keys.authkeylen;
  4314. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  4315. + OP_ALG_ALGSEL_MASK);
  4316. +
  4317. + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  4318. + goto badkey;
  4319. +
  4320. + memcpy(ctx->key, keys.authkey, keys.authkeylen);
  4321. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  4322. + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
  4323. + keys.enckeylen, ctx->dir);
  4324. +#ifdef DEBUG
  4325. + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  4326. + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  4327. + ctx->adata.keylen_pad + keys.enckeylen, 1);
  4328. +#endif
  4329. +
  4330. + ctx->cdata.keylen = keys.enckeylen;
  4331. +
  4332. + return aead_set_sh_desc(aead);
  4333. +badkey:
  4334. + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  4335. + return -EINVAL;
  4336. +}
  4337. +
  4338. +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  4339. + bool encrypt)
  4340. +{
  4341. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  4342. + struct caam_request *req_ctx = aead_request_ctx(req);
  4343. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  4344. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  4345. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4346. + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  4347. + typeof(*alg), aead);
  4348. + struct device *dev = ctx->dev;
  4349. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  4350. + GFP_KERNEL : GFP_ATOMIC;
  4351. + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  4352. + struct aead_edesc *edesc;
  4353. + dma_addr_t qm_sg_dma, iv_dma = 0;
  4354. + int ivsize = 0;
  4355. + unsigned int authsize = ctx->authsize;
  4356. + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
  4357. + int in_len, out_len;
  4358. + struct dpaa2_sg_entry *sg_table;
  4359. +
  4360. + /* allocate space for base edesc, link tables and IV */
  4361. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  4362. + if (unlikely(!edesc)) {
  4363. + dev_err(dev, "could not allocate extended descriptor\n");
  4364. + return ERR_PTR(-ENOMEM);
  4365. + }
  4366. +
  4367. + if (unlikely(req->dst != req->src)) {
  4368. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  4369. + req->cryptlen);
  4370. + if (unlikely(src_nents < 0)) {
  4371. + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
  4372. + req->assoclen + req->cryptlen);
  4373. + qi_cache_free(edesc);
  4374. + return ERR_PTR(src_nents);
  4375. + }
  4376. +
  4377. + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  4378. + req->cryptlen +
  4379. + (encrypt ? authsize :
  4380. + (-authsize)));
  4381. + if (unlikely(dst_nents < 0)) {
  4382. + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
  4383. + req->assoclen + req->cryptlen +
  4384. + (encrypt ? authsize : (-authsize)));
  4385. + qi_cache_free(edesc);
  4386. + return ERR_PTR(dst_nents);
  4387. + }
  4388. +
  4389. + if (src_nents) {
  4390. + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
  4391. + DMA_TO_DEVICE);
  4392. + if (unlikely(!mapped_src_nents)) {
  4393. + dev_err(dev, "unable to map source\n");
  4394. + qi_cache_free(edesc);
  4395. + return ERR_PTR(-ENOMEM);
  4396. + }
  4397. + } else {
  4398. + mapped_src_nents = 0;
  4399. + }
  4400. +
  4401. + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
  4402. + DMA_FROM_DEVICE);
  4403. + if (unlikely(!mapped_dst_nents)) {
  4404. + dev_err(dev, "unable to map destination\n");
  4405. + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
  4406. + qi_cache_free(edesc);
  4407. + return ERR_PTR(-ENOMEM);
  4408. + }
  4409. + } else {
  4410. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  4411. + req->cryptlen +
  4412. + (encrypt ? authsize : 0));
  4413. + if (unlikely(src_nents < 0)) {
  4414. + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
  4415. + req->assoclen + req->cryptlen +
  4416. + (encrypt ? authsize : 0));
  4417. + qi_cache_free(edesc);
  4418. + return ERR_PTR(src_nents);
  4419. + }
  4420. +
  4421. + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
  4422. + DMA_BIDIRECTIONAL);
  4423. + if (unlikely(!mapped_src_nents)) {
  4424. + dev_err(dev, "unable to map source\n");
  4425. + qi_cache_free(edesc);
  4426. + return ERR_PTR(-ENOMEM);
  4427. + }
  4428. + }
  4429. +
  4430. + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
  4431. + ivsize = crypto_aead_ivsize(aead);
  4432. +
  4433. + /*
  4434. + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  4435. + * Input is not contiguous.
  4436. + */
  4437. + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
  4438. + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  4439. + sg_table = &edesc->sgt[0];
  4440. + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
  4441. + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
  4442. + CAAM_QI_MEMCACHE_SIZE)) {
  4443. + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
  4444. + qm_sg_nents, ivsize);
  4445. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
  4446. + 0, 0, 0);
  4447. + qi_cache_free(edesc);
  4448. + return ERR_PTR(-ENOMEM);
  4449. + }
  4450. +
  4451. + if (ivsize) {
  4452. + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
  4453. +
  4454. + /* Make sure IV is located in a DMAable area */
  4455. + memcpy(iv, req->iv, ivsize);
  4456. +
  4457. + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
  4458. + if (dma_mapping_error(dev, iv_dma)) {
  4459. + dev_err(dev, "unable to map IV\n");
  4460. + caam_unmap(dev, req->src, req->dst, src_nents,
  4461. + dst_nents, 0, 0, 0, 0);
  4462. + qi_cache_free(edesc);
  4463. + return ERR_PTR(-ENOMEM);
  4464. + }
  4465. + }
  4466. +
  4467. + edesc->src_nents = src_nents;
  4468. + edesc->dst_nents = dst_nents;
  4469. + edesc->iv_dma = iv_dma;
  4470. +
  4471. + edesc->assoclen = cpu_to_caam32(req->assoclen);
  4472. + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
  4473. + DMA_TO_DEVICE);
  4474. + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
  4475. + dev_err(dev, "unable to map assoclen\n");
  4476. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
  4477. + iv_dma, ivsize, 0, 0);
  4478. + qi_cache_free(edesc);
  4479. + return ERR_PTR(-ENOMEM);
  4480. + }
  4481. +
  4482. + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  4483. + qm_sg_index++;
  4484. + if (ivsize) {
  4485. + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  4486. + qm_sg_index++;
  4487. + }
  4488. + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  4489. + qm_sg_index += mapped_src_nents;
  4490. +
  4491. + if (mapped_dst_nents > 1)
  4492. + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  4493. + qm_sg_index, 0);
  4494. +
  4495. + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  4496. + if (dma_mapping_error(dev, qm_sg_dma)) {
  4497. + dev_err(dev, "unable to map S/G table\n");
  4498. + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  4499. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
  4500. + iv_dma, ivsize, 0, 0);
  4501. + qi_cache_free(edesc);
  4502. + return ERR_PTR(-ENOMEM);
  4503. + }
  4504. +
  4505. + edesc->qm_sg_dma = qm_sg_dma;
  4506. + edesc->qm_sg_bytes = qm_sg_bytes;
  4507. +
  4508. + out_len = req->assoclen + req->cryptlen +
  4509. + (encrypt ? ctx->authsize : (-ctx->authsize));
  4510. + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  4511. +
  4512. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  4513. + dpaa2_fl_set_final(in_fle, true);
  4514. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  4515. + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
  4516. + dpaa2_fl_set_len(in_fle, in_len);
  4517. +
  4518. + if (req->dst == req->src) {
  4519. + if (mapped_src_nents == 1) {
  4520. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  4521. + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
  4522. + } else {
  4523. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  4524. + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
  4525. + (1 + !!ivsize) * sizeof(*sg_table));
  4526. + }
  4527. + } else if (mapped_dst_nents == 1) {
  4528. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  4529. + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
  4530. + } else {
  4531. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  4532. + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
  4533. + sizeof(*sg_table));
  4534. + }
  4535. +
  4536. + dpaa2_fl_set_len(out_fle, out_len);
  4537. +
  4538. + return edesc;
  4539. +}
  4540. +
  4541. +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
  4542. + bool encrypt)
  4543. +{
  4544. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  4545. + unsigned int blocksize = crypto_aead_blocksize(tls);
  4546. + unsigned int padsize, authsize;
  4547. + struct caam_request *req_ctx = aead_request_ctx(req);
  4548. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  4549. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  4550. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  4551. + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
  4552. + typeof(*alg), aead);
  4553. + struct device *dev = ctx->dev;
  4554. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  4555. + GFP_KERNEL : GFP_ATOMIC;
  4556. + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  4557. + struct tls_edesc *edesc;
  4558. + dma_addr_t qm_sg_dma, iv_dma = 0;
  4559. + int ivsize = 0;
  4560. + u8 *iv;
  4561. + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
  4562. + int in_len, out_len;
  4563. + struct dpaa2_sg_entry *sg_table;
  4564. + struct scatterlist *dst;
  4565. +
  4566. + if (encrypt) {
  4567. + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
  4568. + blocksize);
  4569. + authsize = ctx->authsize + padsize;
  4570. + } else {
  4571. + authsize = ctx->authsize;
  4572. + }
  4573. +
  4574. + /* allocate space for base edesc, link tables and IV */
  4575. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  4576. + if (unlikely(!edesc)) {
  4577. + dev_err(dev, "could not allocate extended descriptor\n");
  4578. + return ERR_PTR(-ENOMEM);
  4579. + }
  4580. +
  4581. + if (likely(req->src == req->dst)) {
  4582. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  4583. + req->cryptlen +
  4584. + (encrypt ? authsize : 0));
  4585. + if (unlikely(src_nents < 0)) {
  4586. + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
  4587. + req->assoclen + req->cryptlen +
  4588. + (encrypt ? authsize : 0));
  4589. + qi_cache_free(edesc);
  4590. + return ERR_PTR(src_nents);
  4591. + }
  4592. +
  4593. + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
  4594. + DMA_BIDIRECTIONAL);
  4595. + if (unlikely(!mapped_src_nents)) {
  4596. + dev_err(dev, "unable to map source\n");
  4597. + qi_cache_free(edesc);
  4598. + return ERR_PTR(-ENOMEM);
  4599. + }
  4600. + dst = req->dst;
  4601. + } else {
  4602. + src_nents = sg_nents_for_len(req->src, req->assoclen +
  4603. + req->cryptlen);
  4604. + if (unlikely(src_nents < 0)) {
  4605. + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
  4606. + req->assoclen + req->cryptlen);
  4607. + qi_cache_free(edesc);
  4608. + return ERR_PTR(src_nents);
  4609. + }
  4610. +
  4611. + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
  4612. + dst_nents = sg_nents_for_len(dst, req->cryptlen +
  4613. + (encrypt ? authsize : 0));
  4614. + if (unlikely(dst_nents < 0)) {
  4615. + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
  4616. + req->cryptlen +
  4617. + (encrypt ? authsize : 0));
  4618. + qi_cache_free(edesc);
  4619. + return ERR_PTR(dst_nents);
  4620. + }
  4621. +
  4622. + if (src_nents) {
  4623. + mapped_src_nents = dma_map_sg(dev, req->src,
  4624. + src_nents, DMA_TO_DEVICE);
  4625. + if (unlikely(!mapped_src_nents)) {
  4626. + dev_err(dev, "unable to map source\n");
  4627. + qi_cache_free(edesc);
  4628. + return ERR_PTR(-ENOMEM);
  4629. + }
  4630. + } else {
  4631. + mapped_src_nents = 0;
  4632. + }
  4633. +
  4634. + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
  4635. + DMA_FROM_DEVICE);
  4636. + if (unlikely(!mapped_dst_nents)) {
  4637. + dev_err(dev, "unable to map destination\n");
  4638. + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
  4639. + qi_cache_free(edesc);
  4640. + return ERR_PTR(-ENOMEM);
  4641. + }
  4642. + }
  4643. +
  4644. + /*
  4645. + * Create S/G table: IV, src, dst.
  4646. + * Input is not contiguous.
  4647. + */
  4648. + qm_sg_ents = 1 + mapped_src_nents +
  4649. + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  4650. + sg_table = &edesc->sgt[0];
  4651. + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  4652. +
  4653. + ivsize = crypto_aead_ivsize(tls);
  4654. + iv = (u8 *)(sg_table + qm_sg_ents);
  4655. + /* Make sure IV is located in a DMAable area */
  4656. + memcpy(iv, req->iv, ivsize);
  4657. + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
  4658. + if (dma_mapping_error(dev, iv_dma)) {
  4659. + dev_err(dev, "unable to map IV\n");
  4660. + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
  4661. + 0);
  4662. + qi_cache_free(edesc);
  4663. + return ERR_PTR(-ENOMEM);
  4664. + }
  4665. +
  4666. + edesc->src_nents = src_nents;
  4667. + edesc->dst_nents = dst_nents;
  4668. + edesc->dst = dst;
  4669. + edesc->iv_dma = iv_dma;
  4670. +
  4671. + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  4672. + qm_sg_index = 1;
  4673. +
  4674. + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  4675. + qm_sg_index += mapped_src_nents;
  4676. +
  4677. + if (mapped_dst_nents > 1)
  4678. + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
  4679. + qm_sg_index, 0);
  4680. +
  4681. + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  4682. + if (dma_mapping_error(dev, qm_sg_dma)) {
  4683. + dev_err(dev, "unable to map S/G table\n");
  4684. + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
  4685. + ivsize, 0, 0);
  4686. + qi_cache_free(edesc);
  4687. + return ERR_PTR(-ENOMEM);
  4688. + }
  4689. +
  4690. + edesc->qm_sg_dma = qm_sg_dma;
  4691. + edesc->qm_sg_bytes = qm_sg_bytes;
  4692. +
  4693. + out_len = req->cryptlen + (encrypt ? authsize : 0);
  4694. + in_len = ivsize + req->assoclen + req->cryptlen;
  4695. +
  4696. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  4697. + dpaa2_fl_set_final(in_fle, true);
  4698. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  4699. + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
  4700. + dpaa2_fl_set_len(in_fle, in_len);
  4701. +
  4702. + if (req->dst == req->src) {
  4703. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  4704. + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
  4705. + (sg_nents_for_len(req->src, req->assoclen) +
  4706. + 1) * sizeof(*sg_table));
  4707. + } else if (mapped_dst_nents == 1) {
  4708. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  4709. + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
  4710. + } else {
  4711. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  4712. + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
  4713. + sizeof(*sg_table));
  4714. + }
  4715. +
  4716. + dpaa2_fl_set_len(out_fle, out_len);
  4717. +
  4718. + return edesc;
  4719. +}
  4720. +
  4721. +static int tls_set_sh_desc(struct crypto_aead *tls)
  4722. +{
  4723. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  4724. + unsigned int ivsize = crypto_aead_ivsize(tls);
  4725. + unsigned int blocksize = crypto_aead_blocksize(tls);
  4726. + struct device *dev = ctx->dev;
  4727. + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
  4728. + struct caam_flc *flc;
  4729. + u32 *desc;
  4730. + unsigned int assoclen = 13; /* always 13 bytes for TLS */
  4731. + unsigned int data_len[2];
  4732. + u32 inl_mask;
  4733. +
  4734. + if (!ctx->cdata.keylen || !ctx->authsize)
  4735. + return 0;
  4736. +
  4737. + /*
  4738. + * TLS 1.0 encrypt shared descriptor
  4739. + * Job Descriptor and Shared Descriptor
  4740. + * must fit into the 64-word Descriptor h/w Buffer
  4741. + */
  4742. + data_len[0] = ctx->adata.keylen_pad;
  4743. + data_len[1] = ctx->cdata.keylen;
  4744. +
  4745. + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
  4746. + &inl_mask, ARRAY_SIZE(data_len)) < 0)
  4747. + return -EINVAL;
  4748. +
  4749. + if (inl_mask & 1)
  4750. + ctx->adata.key_virt = ctx->key;
  4751. + else
  4752. + ctx->adata.key_dma = ctx->key_dma;
  4753. +
  4754. + if (inl_mask & 2)
  4755. + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  4756. + else
  4757. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  4758. +
  4759. + ctx->adata.key_inline = !!(inl_mask & 1);
  4760. + ctx->cdata.key_inline = !!(inl_mask & 2);
  4761. +
  4762. + flc = &ctx->flc[ENCRYPT];
  4763. + desc = flc->sh_desc;
  4764. + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
  4765. + assoclen, ivsize, ctx->authsize, blocksize,
  4766. + priv->sec_attr.era);
  4767. + flc->flc[1] = cpu_to_caam32(desc_len(desc));
  4768. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  4769. + sizeof(flc->flc) + desc_bytes(desc),
  4770. + ctx->dir);
  4771. +
  4772. + /*
  4773. + * TLS 1.0 decrypt shared descriptor
  4774. + * Keys do not fit inline, regardless of algorithms used
  4775. + */
  4776. + ctx->adata.key_inline = false;
  4777. + ctx->adata.key_dma = ctx->key_dma;
  4778. + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  4779. +
  4780. + flc = &ctx->flc[DECRYPT];
  4781. + desc = flc->sh_desc;
  4782. + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
  4783. + ctx->authsize, blocksize, priv->sec_attr.era);
  4784. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4785. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  4786. + sizeof(flc->flc) + desc_bytes(desc),
  4787. + ctx->dir);
  4788. +
  4789. + return 0;
  4790. +}
  4791. +
  4792. +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
  4793. + unsigned int keylen)
  4794. +{
  4795. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  4796. + struct device *dev = ctx->dev;
  4797. + struct crypto_authenc_keys keys;
  4798. +
  4799. + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  4800. + goto badkey;
  4801. +
  4802. +#ifdef DEBUG
  4803. + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
  4804. + keys.authkeylen + keys.enckeylen, keys.enckeylen,
  4805. + keys.authkeylen);
  4806. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  4807. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  4808. +#endif
  4809. +
  4810. + ctx->adata.keylen = keys.authkeylen;
  4811. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  4812. + OP_ALG_ALGSEL_MASK);
  4813. +
  4814. + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  4815. + goto badkey;
  4816. +
  4817. + memcpy(ctx->key, keys.authkey, keys.authkeylen);
  4818. + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  4819. + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
  4820. + keys.enckeylen, ctx->dir);
  4821. +#ifdef DEBUG
  4822. + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  4823. + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  4824. + ctx->adata.keylen_pad + keys.enckeylen, 1);
  4825. +#endif
  4826. +
  4827. + ctx->cdata.keylen = keys.enckeylen;
  4828. +
  4829. + return tls_set_sh_desc(tls);
  4830. +badkey:
  4831. + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
  4832. + return -EINVAL;
  4833. +}
  4834. +
  4835. +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
  4836. +{
  4837. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  4838. +
  4839. + ctx->authsize = authsize;
  4840. + tls_set_sh_desc(tls);
  4841. +
  4842. + return 0;
  4843. +}
  4844. +
  4845. +static int gcm_set_sh_desc(struct crypto_aead *aead)
  4846. +{
  4847. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4848. + struct device *dev = ctx->dev;
  4849. + unsigned int ivsize = crypto_aead_ivsize(aead);
  4850. + struct caam_flc *flc;
  4851. + u32 *desc;
  4852. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  4853. + ctx->cdata.keylen;
  4854. +
  4855. + if (!ctx->cdata.keylen || !ctx->authsize)
  4856. + return 0;
  4857. +
  4858. + /*
  4859. + * AES GCM encrypt shared descriptor
  4860. + * Job Descriptor and Shared Descriptor
  4861. + * must fit into the 64-word Descriptor h/w Buffer
  4862. + */
  4863. + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
  4864. + ctx->cdata.key_inline = true;
  4865. + ctx->cdata.key_virt = ctx->key;
  4866. + } else {
  4867. + ctx->cdata.key_inline = false;
  4868. + ctx->cdata.key_dma = ctx->key_dma;
  4869. + }
  4870. +
  4871. + flc = &ctx->flc[ENCRYPT];
  4872. + desc = flc->sh_desc;
  4873. + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
  4874. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4875. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  4876. + sizeof(flc->flc) + desc_bytes(desc),
  4877. + ctx->dir);
  4878. +
  4879. + /*
  4880. + * Job Descriptor and Shared Descriptors
  4881. + * must all fit into the 64-word Descriptor h/w Buffer
  4882. + */
  4883. + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
  4884. + ctx->cdata.key_inline = true;
  4885. + ctx->cdata.key_virt = ctx->key;
  4886. + } else {
  4887. + ctx->cdata.key_inline = false;
  4888. + ctx->cdata.key_dma = ctx->key_dma;
  4889. + }
  4890. +
  4891. + flc = &ctx->flc[DECRYPT];
  4892. + desc = flc->sh_desc;
  4893. + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
  4894. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4895. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  4896. + sizeof(flc->flc) + desc_bytes(desc),
  4897. + ctx->dir);
  4898. +
  4899. + return 0;
  4900. +}
  4901. +
  4902. +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  4903. +{
  4904. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  4905. +
  4906. + ctx->authsize = authsize;
  4907. + gcm_set_sh_desc(authenc);
  4908. +
  4909. + return 0;
  4910. +}
  4911. +
  4912. +static int gcm_setkey(struct crypto_aead *aead,
  4913. + const u8 *key, unsigned int keylen)
  4914. +{
  4915. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4916. + struct device *dev = ctx->dev;
  4917. +
  4918. +#ifdef DEBUG
  4919. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  4920. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  4921. +#endif
  4922. +
  4923. + memcpy(ctx->key, key, keylen);
  4924. + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
  4925. + ctx->cdata.keylen = keylen;
  4926. +
  4927. + return gcm_set_sh_desc(aead);
  4928. +}
  4929. +
  4930. +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  4931. +{
  4932. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  4933. + struct device *dev = ctx->dev;
  4934. + unsigned int ivsize = crypto_aead_ivsize(aead);
  4935. + struct caam_flc *flc;
  4936. + u32 *desc;
  4937. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  4938. + ctx->cdata.keylen;
  4939. +
  4940. + if (!ctx->cdata.keylen || !ctx->authsize)
  4941. + return 0;
  4942. +
  4943. + ctx->cdata.key_virt = ctx->key;
  4944. +
  4945. + /*
  4946. + * RFC4106 encrypt shared descriptor
  4947. + * Job Descriptor and Shared Descriptor
  4948. + * must fit into the 64-word Descriptor h/w Buffer
  4949. + */
  4950. + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
  4951. + ctx->cdata.key_inline = true;
  4952. + } else {
  4953. + ctx->cdata.key_inline = false;
  4954. + ctx->cdata.key_dma = ctx->key_dma;
  4955. + }
  4956. +
  4957. + flc = &ctx->flc[ENCRYPT];
  4958. + desc = flc->sh_desc;
  4959. + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  4960. + true);
  4961. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4962. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  4963. + sizeof(flc->flc) + desc_bytes(desc),
  4964. + ctx->dir);
  4965. +
  4966. + /*
  4967. + * Job Descriptor and Shared Descriptors
  4968. + * must all fit into the 64-word Descriptor h/w Buffer
  4969. + */
  4970. + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
  4971. + ctx->cdata.key_inline = true;
  4972. + } else {
  4973. + ctx->cdata.key_inline = false;
  4974. + ctx->cdata.key_dma = ctx->key_dma;
  4975. + }
  4976. +
  4977. + flc = &ctx->flc[DECRYPT];
  4978. + desc = flc->sh_desc;
  4979. + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  4980. + true);
  4981. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  4982. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  4983. + sizeof(flc->flc) + desc_bytes(desc),
  4984. + ctx->dir);
  4985. +
  4986. + return 0;
  4987. +}
  4988. +
  4989. +static int rfc4106_setauthsize(struct crypto_aead *authenc,
  4990. + unsigned int authsize)
  4991. +{
  4992. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  4993. +
  4994. + ctx->authsize = authsize;
  4995. + rfc4106_set_sh_desc(authenc);
  4996. +
  4997. + return 0;
  4998. +}
  4999. +
  5000. +static int rfc4106_setkey(struct crypto_aead *aead,
  5001. + const u8 *key, unsigned int keylen)
  5002. +{
  5003. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5004. + struct device *dev = ctx->dev;
  5005. +
  5006. + if (keylen < 4)
  5007. + return -EINVAL;
  5008. +
  5009. +#ifdef DEBUG
  5010. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  5011. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  5012. +#endif
  5013. +
  5014. + memcpy(ctx->key, key, keylen);
  5015. + /*
  5016. + * The last four bytes of the key material are used as the salt value
  5017. + * in the nonce. Update the AES key length.
  5018. + */
  5019. + ctx->cdata.keylen = keylen - 4;
  5020. + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
  5021. + ctx->dir);
  5022. +
  5023. + return rfc4106_set_sh_desc(aead);
  5024. +}
  5025. +
  5026. +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  5027. +{
  5028. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5029. + struct device *dev = ctx->dev;
  5030. + unsigned int ivsize = crypto_aead_ivsize(aead);
  5031. + struct caam_flc *flc;
  5032. + u32 *desc;
  5033. + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  5034. + ctx->cdata.keylen;
  5035. +
  5036. + if (!ctx->cdata.keylen || !ctx->authsize)
  5037. + return 0;
  5038. +
  5039. + ctx->cdata.key_virt = ctx->key;
  5040. +
  5041. + /*
  5042. + * RFC4543 encrypt shared descriptor
  5043. + * Job Descriptor and Shared Descriptor
  5044. + * must fit into the 64-word Descriptor h/w Buffer
  5045. + */
  5046. + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
  5047. + ctx->cdata.key_inline = true;
  5048. + } else {
  5049. + ctx->cdata.key_inline = false;
  5050. + ctx->cdata.key_dma = ctx->key_dma;
  5051. + }
  5052. +
  5053. + flc = &ctx->flc[ENCRYPT];
  5054. + desc = flc->sh_desc;
  5055. + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  5056. + true);
  5057. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5058. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  5059. + sizeof(flc->flc) + desc_bytes(desc),
  5060. + ctx->dir);
  5061. +
  5062. + /*
  5063. + * Job Descriptor and Shared Descriptors
  5064. + * must all fit into the 64-word Descriptor h/w Buffer
  5065. + */
  5066. + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
  5067. + ctx->cdata.key_inline = true;
  5068. + } else {
  5069. + ctx->cdata.key_inline = false;
  5070. + ctx->cdata.key_dma = ctx->key_dma;
  5071. + }
  5072. +
  5073. + flc = &ctx->flc[DECRYPT];
  5074. + desc = flc->sh_desc;
  5075. + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  5076. + true);
  5077. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5078. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  5079. + sizeof(flc->flc) + desc_bytes(desc),
  5080. + ctx->dir);
  5081. +
  5082. + return 0;
  5083. +}
  5084. +
  5085. +static int rfc4543_setauthsize(struct crypto_aead *authenc,
  5086. + unsigned int authsize)
  5087. +{
  5088. + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  5089. +
  5090. + ctx->authsize = authsize;
  5091. + rfc4543_set_sh_desc(authenc);
  5092. +
  5093. + return 0;
  5094. +}
  5095. +
  5096. +static int rfc4543_setkey(struct crypto_aead *aead,
  5097. + const u8 *key, unsigned int keylen)
  5098. +{
  5099. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5100. + struct device *dev = ctx->dev;
  5101. +
  5102. + if (keylen < 4)
  5103. + return -EINVAL;
  5104. +
  5105. +#ifdef DEBUG
  5106. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  5107. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  5108. +#endif
  5109. +
  5110. + memcpy(ctx->key, key, keylen);
  5111. + /*
  5112. + * The last four bytes of the key material are used as the salt value
  5113. + * in the nonce. Update the AES key length.
  5114. + */
  5115. + ctx->cdata.keylen = keylen - 4;
  5116. + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
  5117. + ctx->dir);
  5118. +
  5119. + return rfc4543_set_sh_desc(aead);
  5120. +}
  5121. +
  5122. +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  5123. + unsigned int keylen)
  5124. +{
  5125. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5126. + struct caam_skcipher_alg *alg =
  5127. + container_of(crypto_skcipher_alg(skcipher),
  5128. + struct caam_skcipher_alg, skcipher);
  5129. + struct device *dev = ctx->dev;
  5130. + struct caam_flc *flc;
  5131. + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  5132. + u32 *desc;
  5133. + u32 ctx1_iv_off = 0;
  5134. + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  5135. + OP_ALG_AAI_CTR_MOD128);
  5136. + const bool is_rfc3686 = alg->caam.rfc3686;
  5137. +
  5138. +#ifdef DEBUG
  5139. + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  5140. + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  5141. +#endif
  5142. + /*
  5143. + * AES-CTR needs to load IV in CONTEXT1 reg
  5144. + * at an offset of 128bits (16bytes)
  5145. + * CONTEXT1[255:128] = IV
  5146. + */
  5147. + if (ctr_mode)
  5148. + ctx1_iv_off = 16;
  5149. +
  5150. + /*
  5151. + * RFC3686 specific:
  5152. + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  5153. + * | *key = {KEY, NONCE}
  5154. + */
  5155. + if (is_rfc3686) {
  5156. + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  5157. + keylen -= CTR_RFC3686_NONCE_SIZE;
  5158. + }
  5159. +
  5160. + ctx->cdata.keylen = keylen;
  5161. + ctx->cdata.key_virt = key;
  5162. + ctx->cdata.key_inline = true;
  5163. +
  5164. + /* skcipher_encrypt shared descriptor */
  5165. + flc = &ctx->flc[ENCRYPT];
  5166. + desc = flc->sh_desc;
  5167. + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
  5168. + is_rfc3686, ctx1_iv_off);
  5169. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5170. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  5171. + sizeof(flc->flc) + desc_bytes(desc),
  5172. + ctx->dir);
  5173. +
  5174. + /* skcipher_decrypt shared descriptor */
  5175. + flc = &ctx->flc[DECRYPT];
  5176. + desc = flc->sh_desc;
  5177. + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
  5178. + is_rfc3686, ctx1_iv_off);
  5179. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5180. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  5181. + sizeof(flc->flc) + desc_bytes(desc),
  5182. + ctx->dir);
  5183. +
  5184. + return 0;
  5185. +}
  5186. +
  5187. +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  5188. + unsigned int keylen)
  5189. +{
  5190. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5191. + struct device *dev = ctx->dev;
  5192. + struct caam_flc *flc;
  5193. + u32 *desc;
  5194. +
  5195. + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  5196. + dev_err(dev, "key size mismatch\n");
  5197. + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  5198. + return -EINVAL;
  5199. + }
  5200. +
  5201. + ctx->cdata.keylen = keylen;
  5202. + ctx->cdata.key_virt = key;
  5203. + ctx->cdata.key_inline = true;
  5204. +
  5205. + /* xts_skcipher_encrypt shared descriptor */
  5206. + flc = &ctx->flc[ENCRYPT];
  5207. + desc = flc->sh_desc;
  5208. + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
  5209. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5210. + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
  5211. + sizeof(flc->flc) + desc_bytes(desc),
  5212. + ctx->dir);
  5213. +
  5214. + /* xts_skcipher_decrypt shared descriptor */
  5215. + flc = &ctx->flc[DECRYPT];
  5216. + desc = flc->sh_desc;
  5217. + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
  5218. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  5219. + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
  5220. + sizeof(flc->flc) + desc_bytes(desc),
  5221. + ctx->dir);
  5222. +
  5223. + return 0;
  5224. +}
  5225. +
  5226. +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
  5227. +{
  5228. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5229. + struct caam_request *req_ctx = skcipher_request_ctx(req);
  5230. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  5231. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  5232. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5233. + struct device *dev = ctx->dev;
  5234. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  5235. + GFP_KERNEL : GFP_ATOMIC;
  5236. + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  5237. + struct skcipher_edesc *edesc;
  5238. + dma_addr_t iv_dma;
  5239. + u8 *iv;
  5240. + int ivsize = crypto_skcipher_ivsize(skcipher);
  5241. + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
  5242. + struct dpaa2_sg_entry *sg_table;
  5243. +
  5244. + src_nents = sg_nents_for_len(req->src, req->cryptlen);
  5245. + if (unlikely(src_nents < 0)) {
  5246. + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
  5247. + req->cryptlen);
  5248. + return ERR_PTR(src_nents);
  5249. + }
  5250. +
  5251. + if (unlikely(req->dst != req->src)) {
  5252. + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
  5253. + if (unlikely(dst_nents < 0)) {
  5254. + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
  5255. + req->cryptlen);
  5256. + return ERR_PTR(dst_nents);
  5257. + }
  5258. +
  5259. + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
  5260. + DMA_TO_DEVICE);
  5261. + if (unlikely(!mapped_src_nents)) {
  5262. + dev_err(dev, "unable to map source\n");
  5263. + return ERR_PTR(-ENOMEM);
  5264. + }
  5265. +
  5266. + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
  5267. + DMA_FROM_DEVICE);
  5268. + if (unlikely(!mapped_dst_nents)) {
  5269. + dev_err(dev, "unable to map destination\n");
  5270. + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
  5271. + return ERR_PTR(-ENOMEM);
  5272. + }
  5273. + } else {
  5274. + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
  5275. + DMA_BIDIRECTIONAL);
  5276. + if (unlikely(!mapped_src_nents)) {
  5277. + dev_err(dev, "unable to map source\n");
  5278. + return ERR_PTR(-ENOMEM);
  5279. + }
  5280. + }
  5281. +
  5282. + qm_sg_ents = 1 + mapped_src_nents;
  5283. + dst_sg_idx = qm_sg_ents;
  5284. +
  5285. + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  5286. + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
  5287. + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
  5288. + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
  5289. + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
  5290. + qm_sg_ents, ivsize);
  5291. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
  5292. + 0, 0, 0);
  5293. + return ERR_PTR(-ENOMEM);
  5294. + }
  5295. +
  5296. + /* allocate space for base edesc, link tables and IV */
  5297. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  5298. + if (unlikely(!edesc)) {
  5299. + dev_err(dev, "could not allocate extended descriptor\n");
  5300. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
  5301. + 0, 0, 0);
  5302. + return ERR_PTR(-ENOMEM);
  5303. + }
  5304. +
  5305. + /* Make sure IV is located in a DMAable area */
  5306. + sg_table = &edesc->sgt[0];
  5307. + iv = (u8 *)(sg_table + qm_sg_ents);
  5308. + memcpy(iv, req->iv, ivsize);
  5309. +
  5310. + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
  5311. + if (dma_mapping_error(dev, iv_dma)) {
  5312. + dev_err(dev, "unable to map IV\n");
  5313. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
  5314. + 0, 0, 0);
  5315. + qi_cache_free(edesc);
  5316. + return ERR_PTR(-ENOMEM);
  5317. + }
  5318. +
  5319. + edesc->src_nents = src_nents;
  5320. + edesc->dst_nents = dst_nents;
  5321. + edesc->iv_dma = iv_dma;
  5322. + edesc->qm_sg_bytes = qm_sg_bytes;
  5323. +
  5324. + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  5325. + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
  5326. +
  5327. + if (mapped_dst_nents > 1)
  5328. + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  5329. + dst_sg_idx, 0);
  5330. +
  5331. + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
  5332. + DMA_TO_DEVICE);
  5333. + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
  5334. + dev_err(dev, "unable to map S/G table\n");
  5335. + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
  5336. + iv_dma, ivsize, 0, 0);
  5337. + qi_cache_free(edesc);
  5338. + return ERR_PTR(-ENOMEM);
  5339. + }
  5340. +
  5341. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  5342. + dpaa2_fl_set_final(in_fle, true);
  5343. + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
  5344. + dpaa2_fl_set_len(out_fle, req->cryptlen);
  5345. +
  5346. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  5347. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  5348. +
  5349. + if (req->src == req->dst) {
  5350. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  5351. + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
  5352. + sizeof(*sg_table));
  5353. + } else if (mapped_dst_nents > 1) {
  5354. + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
  5355. + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
  5356. + sizeof(*sg_table));
  5357. + } else {
  5358. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  5359. + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
  5360. + }
  5361. +
  5362. + return edesc;
  5363. +}
  5364. +
  5365. +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
  5366. + struct aead_request *req)
  5367. +{
  5368. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  5369. + int ivsize = crypto_aead_ivsize(aead);
  5370. +
  5371. + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  5372. + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
  5373. + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  5374. +}
  5375. +
  5376. +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
  5377. + struct aead_request *req)
  5378. +{
  5379. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  5380. + int ivsize = crypto_aead_ivsize(tls);
  5381. +
  5382. + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
  5383. + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
  5384. + edesc->qm_sg_bytes);
  5385. +}
  5386. +
  5387. +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
  5388. + struct skcipher_request *req)
  5389. +{
  5390. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5391. + int ivsize = crypto_skcipher_ivsize(skcipher);
  5392. +
  5393. + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  5394. + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
  5395. +}
  5396. +
  5397. +static void aead_encrypt_done(void *cbk_ctx, u32 status)
  5398. +{
  5399. + struct crypto_async_request *areq = cbk_ctx;
  5400. + struct aead_request *req = container_of(areq, struct aead_request,
  5401. + base);
  5402. + struct caam_request *req_ctx = to_caam_req(areq);
  5403. + struct aead_edesc *edesc = req_ctx->edesc;
  5404. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  5405. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5406. + int ecode = 0;
  5407. +
  5408. +#ifdef DEBUG
  5409. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5410. +#endif
  5411. +
  5412. + if (unlikely(status)) {
  5413. + caam_qi2_strstatus(ctx->dev, status);
  5414. + ecode = -EIO;
  5415. + }
  5416. +
  5417. + aead_unmap(ctx->dev, edesc, req);
  5418. + qi_cache_free(edesc);
  5419. + aead_request_complete(req, ecode);
  5420. +}
  5421. +
  5422. +static void aead_decrypt_done(void *cbk_ctx, u32 status)
  5423. +{
  5424. + struct crypto_async_request *areq = cbk_ctx;
  5425. + struct aead_request *req = container_of(areq, struct aead_request,
  5426. + base);
  5427. + struct caam_request *req_ctx = to_caam_req(areq);
  5428. + struct aead_edesc *edesc = req_ctx->edesc;
  5429. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  5430. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5431. + int ecode = 0;
  5432. +
  5433. +#ifdef DEBUG
  5434. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5435. +#endif
  5436. +
  5437. + if (unlikely(status)) {
  5438. + caam_qi2_strstatus(ctx->dev, status);
  5439. + /*
  5440. + * verify hw auth check passed else return -EBADMSG
  5441. + */
  5442. + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
  5443. + JRSTA_CCBERR_ERRID_ICVCHK)
  5444. + ecode = -EBADMSG;
  5445. + else
  5446. + ecode = -EIO;
  5447. + }
  5448. +
  5449. + aead_unmap(ctx->dev, edesc, req);
  5450. + qi_cache_free(edesc);
  5451. + aead_request_complete(req, ecode);
  5452. +}
  5453. +
  5454. +static int aead_encrypt(struct aead_request *req)
  5455. +{
  5456. + struct aead_edesc *edesc;
  5457. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  5458. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5459. + struct caam_request *caam_req = aead_request_ctx(req);
  5460. + int ret;
  5461. +
  5462. + /* allocate extended descriptor */
  5463. + edesc = aead_edesc_alloc(req, true);
  5464. + if (IS_ERR(edesc))
  5465. + return PTR_ERR(edesc);
  5466. +
  5467. + caam_req->flc = &ctx->flc[ENCRYPT];
  5468. + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
  5469. + caam_req->cbk = aead_encrypt_done;
  5470. + caam_req->ctx = &req->base;
  5471. + caam_req->edesc = edesc;
  5472. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5473. + if (ret != -EINPROGRESS &&
  5474. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5475. + aead_unmap(ctx->dev, edesc, req);
  5476. + qi_cache_free(edesc);
  5477. + }
  5478. +
  5479. + return ret;
  5480. +}
  5481. +
  5482. +static int aead_decrypt(struct aead_request *req)
  5483. +{
  5484. + struct aead_edesc *edesc;
  5485. + struct crypto_aead *aead = crypto_aead_reqtfm(req);
  5486. + struct caam_ctx *ctx = crypto_aead_ctx(aead);
  5487. + struct caam_request *caam_req = aead_request_ctx(req);
  5488. + int ret;
  5489. +
  5490. + /* allocate extended descriptor */
  5491. + edesc = aead_edesc_alloc(req, false);
  5492. + if (IS_ERR(edesc))
  5493. + return PTR_ERR(edesc);
  5494. +
  5495. + caam_req->flc = &ctx->flc[DECRYPT];
  5496. + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
  5497. + caam_req->cbk = aead_decrypt_done;
  5498. + caam_req->ctx = &req->base;
  5499. + caam_req->edesc = edesc;
  5500. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5501. + if (ret != -EINPROGRESS &&
  5502. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5503. + aead_unmap(ctx->dev, edesc, req);
  5504. + qi_cache_free(edesc);
  5505. + }
  5506. +
  5507. + return ret;
  5508. +}
  5509. +
  5510. +static void tls_encrypt_done(void *cbk_ctx, u32 status)
  5511. +{
  5512. + struct crypto_async_request *areq = cbk_ctx;
  5513. + struct aead_request *req = container_of(areq, struct aead_request,
  5514. + base);
  5515. + struct caam_request *req_ctx = to_caam_req(areq);
  5516. + struct tls_edesc *edesc = req_ctx->edesc;
  5517. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  5518. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  5519. + int ecode = 0;
  5520. +
  5521. +#ifdef DEBUG
  5522. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5523. +#endif
  5524. +
  5525. + if (unlikely(status)) {
  5526. + caam_qi2_strstatus(ctx->dev, status);
  5527. + ecode = -EIO;
  5528. + }
  5529. +
  5530. + tls_unmap(ctx->dev, edesc, req);
  5531. + qi_cache_free(edesc);
  5532. + aead_request_complete(req, ecode);
  5533. +}
  5534. +
  5535. +static void tls_decrypt_done(void *cbk_ctx, u32 status)
  5536. +{
  5537. + struct crypto_async_request *areq = cbk_ctx;
  5538. + struct aead_request *req = container_of(areq, struct aead_request,
  5539. + base);
  5540. + struct caam_request *req_ctx = to_caam_req(areq);
  5541. + struct tls_edesc *edesc = req_ctx->edesc;
  5542. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  5543. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  5544. + int ecode = 0;
  5545. +
  5546. +#ifdef DEBUG
  5547. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5548. +#endif
  5549. +
  5550. + if (unlikely(status)) {
  5551. + caam_qi2_strstatus(ctx->dev, status);
  5552. + /*
  5553. + * verify hw auth check passed else return -EBADMSG
  5554. + */
  5555. + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
  5556. + JRSTA_CCBERR_ERRID_ICVCHK)
  5557. + ecode = -EBADMSG;
  5558. + else
  5559. + ecode = -EIO;
  5560. + }
  5561. +
  5562. + tls_unmap(ctx->dev, edesc, req);
  5563. + qi_cache_free(edesc);
  5564. + aead_request_complete(req, ecode);
  5565. +}
  5566. +
  5567. +static int tls_encrypt(struct aead_request *req)
  5568. +{
  5569. + struct tls_edesc *edesc;
  5570. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  5571. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  5572. + struct caam_request *caam_req = aead_request_ctx(req);
  5573. + int ret;
  5574. +
  5575. + /* allocate extended descriptor */
  5576. + edesc = tls_edesc_alloc(req, true);
  5577. + if (IS_ERR(edesc))
  5578. + return PTR_ERR(edesc);
  5579. +
  5580. + caam_req->flc = &ctx->flc[ENCRYPT];
  5581. + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
  5582. + caam_req->cbk = tls_encrypt_done;
  5583. + caam_req->ctx = &req->base;
  5584. + caam_req->edesc = edesc;
  5585. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5586. + if (ret != -EINPROGRESS &&
  5587. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5588. + tls_unmap(ctx->dev, edesc, req);
  5589. + qi_cache_free(edesc);
  5590. + }
  5591. +
  5592. + return ret;
  5593. +}
  5594. +
  5595. +static int tls_decrypt(struct aead_request *req)
  5596. +{
  5597. + struct tls_edesc *edesc;
  5598. + struct crypto_aead *tls = crypto_aead_reqtfm(req);
  5599. + struct caam_ctx *ctx = crypto_aead_ctx(tls);
  5600. + struct caam_request *caam_req = aead_request_ctx(req);
  5601. + int ret;
  5602. +
  5603. + /* allocate extended descriptor */
  5604. + edesc = tls_edesc_alloc(req, false);
  5605. + if (IS_ERR(edesc))
  5606. + return PTR_ERR(edesc);
  5607. +
  5608. + caam_req->flc = &ctx->flc[DECRYPT];
  5609. + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
  5610. + caam_req->cbk = tls_decrypt_done;
  5611. + caam_req->ctx = &req->base;
  5612. + caam_req->edesc = edesc;
  5613. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5614. + if (ret != -EINPROGRESS &&
  5615. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5616. + tls_unmap(ctx->dev, edesc, req);
  5617. + qi_cache_free(edesc);
  5618. + }
  5619. +
  5620. + return ret;
  5621. +}
  5622. +
  5623. +static int ipsec_gcm_encrypt(struct aead_request *req)
  5624. +{
  5625. + if (req->assoclen < 8)
  5626. + return -EINVAL;
  5627. +
  5628. + return aead_encrypt(req);
  5629. +}
  5630. +
  5631. +static int ipsec_gcm_decrypt(struct aead_request *req)
  5632. +{
  5633. + if (req->assoclen < 8)
  5634. + return -EINVAL;
  5635. +
  5636. + return aead_decrypt(req);
  5637. +}
  5638. +
  5639. +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
  5640. +{
  5641. + struct crypto_async_request *areq = cbk_ctx;
  5642. + struct skcipher_request *req = skcipher_request_cast(areq);
  5643. + struct caam_request *req_ctx = to_caam_req(areq);
  5644. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5645. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5646. + struct skcipher_edesc *edesc = req_ctx->edesc;
  5647. + int ecode = 0;
  5648. + int ivsize = crypto_skcipher_ivsize(skcipher);
  5649. +
  5650. +#ifdef DEBUG
  5651. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5652. +#endif
  5653. +
  5654. + if (unlikely(status)) {
  5655. + caam_qi2_strstatus(ctx->dev, status);
  5656. + ecode = -EIO;
  5657. + }
  5658. +
  5659. +#ifdef DEBUG
  5660. + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  5661. + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  5662. + edesc->src_nents > 1 ? 100 : ivsize, 1);
  5663. + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  5664. + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  5665. + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
  5666. +#endif
  5667. +
  5668. + skcipher_unmap(ctx->dev, edesc, req);
  5669. +
  5670. + /*
  5671. + * The crypto API expects us to set the IV (req->iv) to the last
  5672. + * ciphertext block. This is used e.g. by the CTS mode.
  5673. + */
  5674. + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
  5675. + ivsize, 0);
  5676. +
  5677. + qi_cache_free(edesc);
  5678. + skcipher_request_complete(req, ecode);
  5679. +}
  5680. +
  5681. +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
  5682. +{
  5683. + struct crypto_async_request *areq = cbk_ctx;
  5684. + struct skcipher_request *req = skcipher_request_cast(areq);
  5685. + struct caam_request *req_ctx = to_caam_req(areq);
  5686. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5687. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5688. + struct skcipher_edesc *edesc = req_ctx->edesc;
  5689. + int ecode = 0;
  5690. +#ifdef DEBUG
  5691. + int ivsize = crypto_skcipher_ivsize(skcipher);
  5692. +
  5693. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  5694. +#endif
  5695. +
  5696. + if (unlikely(status)) {
  5697. + caam_qi2_strstatus(ctx->dev, status);
  5698. + ecode = -EIO;
  5699. + }
  5700. +
  5701. +#ifdef DEBUG
  5702. + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  5703. + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  5704. + edesc->src_nents > 1 ? 100 : ivsize, 1);
  5705. + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  5706. + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  5707. + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
  5708. +#endif
  5709. +
  5710. + skcipher_unmap(ctx->dev, edesc, req);
  5711. + qi_cache_free(edesc);
  5712. + skcipher_request_complete(req, ecode);
  5713. +}
  5714. +
  5715. +static int skcipher_encrypt(struct skcipher_request *req)
  5716. +{
  5717. + struct skcipher_edesc *edesc;
  5718. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5719. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5720. + struct caam_request *caam_req = skcipher_request_ctx(req);
  5721. + int ret;
  5722. +
  5723. + /* allocate extended descriptor */
  5724. + edesc = skcipher_edesc_alloc(req);
  5725. + if (IS_ERR(edesc))
  5726. + return PTR_ERR(edesc);
  5727. +
  5728. + caam_req->flc = &ctx->flc[ENCRYPT];
  5729. + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
  5730. + caam_req->cbk = skcipher_encrypt_done;
  5731. + caam_req->ctx = &req->base;
  5732. + caam_req->edesc = edesc;
  5733. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5734. + if (ret != -EINPROGRESS &&
  5735. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5736. + skcipher_unmap(ctx->dev, edesc, req);
  5737. + qi_cache_free(edesc);
  5738. + }
  5739. +
  5740. + return ret;
  5741. +}
  5742. +
  5743. +static int skcipher_decrypt(struct skcipher_request *req)
  5744. +{
  5745. + struct skcipher_edesc *edesc;
  5746. + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  5747. + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  5748. + struct caam_request *caam_req = skcipher_request_ctx(req);
  5749. + int ivsize = crypto_skcipher_ivsize(skcipher);
  5750. + int ret;
  5751. +
  5752. + /* allocate extended descriptor */
  5753. + edesc = skcipher_edesc_alloc(req);
  5754. + if (IS_ERR(edesc))
  5755. + return PTR_ERR(edesc);
  5756. +
  5757. + /*
  5758. + * The crypto API expects us to set the IV (req->iv) to the last
  5759. + * ciphertext block.
  5760. + */
  5761. + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
  5762. + ivsize, 0);
  5763. +
  5764. + caam_req->flc = &ctx->flc[DECRYPT];
  5765. + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
  5766. + caam_req->cbk = skcipher_decrypt_done;
  5767. + caam_req->ctx = &req->base;
  5768. + caam_req->edesc = edesc;
  5769. + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
  5770. + if (ret != -EINPROGRESS &&
  5771. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  5772. + skcipher_unmap(ctx->dev, edesc, req);
  5773. + qi_cache_free(edesc);
  5774. + }
  5775. +
  5776. + return ret;
  5777. +}
  5778. +
  5779. +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  5780. + bool uses_dkp)
  5781. +{
  5782. + dma_addr_t dma_addr;
  5783. + int i;
  5784. +
  5785. + /* copy descriptor header template value */
  5786. + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  5787. + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  5788. +
  5789. + ctx->dev = caam->dev;
  5790. + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  5791. +
  5792. + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
  5793. + offsetof(struct caam_ctx, flc_dma),
  5794. + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  5795. + if (dma_mapping_error(ctx->dev, dma_addr)) {
  5796. + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
  5797. + return -ENOMEM;
  5798. + }
  5799. +
  5800. + for (i = 0; i < NUM_OP; i++)
  5801. + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
  5802. + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
  5803. +
  5804. + return 0;
  5805. +}
  5806. +
  5807. +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
  5808. +{
  5809. + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  5810. + struct caam_skcipher_alg *caam_alg =
  5811. + container_of(alg, typeof(*caam_alg), skcipher);
  5812. +
  5813. + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
  5814. + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
  5815. +}
  5816. +
  5817. +static int caam_cra_init_aead(struct crypto_aead *tfm)
  5818. +{
  5819. + struct aead_alg *alg = crypto_aead_alg(tfm);
  5820. + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  5821. + aead);
  5822. +
  5823. + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
  5824. + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
  5825. + (alg->setkey == aead_setkey) ||
  5826. + (alg->setkey == tls_setkey));
  5827. +}
  5828. +
  5829. +static void caam_exit_common(struct caam_ctx *ctx)
  5830. +{
  5831. + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
  5832. + offsetof(struct caam_ctx, flc_dma), ctx->dir,
  5833. + DMA_ATTR_SKIP_CPU_SYNC);
  5834. +}
  5835. +
  5836. +static void caam_cra_exit(struct crypto_skcipher *tfm)
  5837. +{
  5838. + caam_exit_common(crypto_skcipher_ctx(tfm));
  5839. +}
  5840. +
  5841. +static void caam_cra_exit_aead(struct crypto_aead *tfm)
  5842. +{
  5843. + caam_exit_common(crypto_aead_ctx(tfm));
  5844. +}
  5845. +
  5846. +static struct caam_skcipher_alg driver_algs[] = {
  5847. + {
  5848. + .skcipher = {
  5849. + .base = {
  5850. + .cra_name = "cbc(aes)",
  5851. + .cra_driver_name = "cbc-aes-caam-qi2",
  5852. + .cra_blocksize = AES_BLOCK_SIZE,
  5853. + },
  5854. + .setkey = skcipher_setkey,
  5855. + .encrypt = skcipher_encrypt,
  5856. + .decrypt = skcipher_decrypt,
  5857. + .min_keysize = AES_MIN_KEY_SIZE,
  5858. + .max_keysize = AES_MAX_KEY_SIZE,
  5859. + .ivsize = AES_BLOCK_SIZE,
  5860. + },
  5861. + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  5862. + },
  5863. + {
  5864. + .skcipher = {
  5865. + .base = {
  5866. + .cra_name = "cbc(des3_ede)",
  5867. + .cra_driver_name = "cbc-3des-caam-qi2",
  5868. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  5869. + },
  5870. + .setkey = skcipher_setkey,
  5871. + .encrypt = skcipher_encrypt,
  5872. + .decrypt = skcipher_decrypt,
  5873. + .min_keysize = DES3_EDE_KEY_SIZE,
  5874. + .max_keysize = DES3_EDE_KEY_SIZE,
  5875. + .ivsize = DES3_EDE_BLOCK_SIZE,
  5876. + },
  5877. + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  5878. + },
  5879. + {
  5880. + .skcipher = {
  5881. + .base = {
  5882. + .cra_name = "cbc(des)",
  5883. + .cra_driver_name = "cbc-des-caam-qi2",
  5884. + .cra_blocksize = DES_BLOCK_SIZE,
  5885. + },
  5886. + .setkey = skcipher_setkey,
  5887. + .encrypt = skcipher_encrypt,
  5888. + .decrypt = skcipher_decrypt,
  5889. + .min_keysize = DES_KEY_SIZE,
  5890. + .max_keysize = DES_KEY_SIZE,
  5891. + .ivsize = DES_BLOCK_SIZE,
  5892. + },
  5893. + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  5894. + },
  5895. + {
  5896. + .skcipher = {
  5897. + .base = {
  5898. + .cra_name = "ctr(aes)",
  5899. + .cra_driver_name = "ctr-aes-caam-qi2",
  5900. + .cra_blocksize = 1,
  5901. + },
  5902. + .setkey = skcipher_setkey,
  5903. + .encrypt = skcipher_encrypt,
  5904. + .decrypt = skcipher_decrypt,
  5905. + .min_keysize = AES_MIN_KEY_SIZE,
  5906. + .max_keysize = AES_MAX_KEY_SIZE,
  5907. + .ivsize = AES_BLOCK_SIZE,
  5908. + .chunksize = AES_BLOCK_SIZE,
  5909. + },
  5910. + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
  5911. + OP_ALG_AAI_CTR_MOD128,
  5912. + },
  5913. + {
  5914. + .skcipher = {
  5915. + .base = {
  5916. + .cra_name = "rfc3686(ctr(aes))",
  5917. + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
  5918. + .cra_blocksize = 1,
  5919. + },
  5920. + .setkey = skcipher_setkey,
  5921. + .encrypt = skcipher_encrypt,
  5922. + .decrypt = skcipher_decrypt,
  5923. + .min_keysize = AES_MIN_KEY_SIZE +
  5924. + CTR_RFC3686_NONCE_SIZE,
  5925. + .max_keysize = AES_MAX_KEY_SIZE +
  5926. + CTR_RFC3686_NONCE_SIZE,
  5927. + .ivsize = CTR_RFC3686_IV_SIZE,
  5928. + .chunksize = AES_BLOCK_SIZE,
  5929. + },
  5930. + .caam = {
  5931. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  5932. + OP_ALG_AAI_CTR_MOD128,
  5933. + .rfc3686 = true,
  5934. + },
  5935. + },
  5936. + {
  5937. + .skcipher = {
  5938. + .base = {
  5939. + .cra_name = "xts(aes)",
  5940. + .cra_driver_name = "xts-aes-caam-qi2",
  5941. + .cra_blocksize = AES_BLOCK_SIZE,
  5942. + },
  5943. + .setkey = xts_skcipher_setkey,
  5944. + .encrypt = skcipher_encrypt,
  5945. + .decrypt = skcipher_decrypt,
  5946. + .min_keysize = 2 * AES_MIN_KEY_SIZE,
  5947. + .max_keysize = 2 * AES_MAX_KEY_SIZE,
  5948. + .ivsize = AES_BLOCK_SIZE,
  5949. + },
  5950. + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  5951. + }
  5952. +};
  5953. +
  5954. +static struct caam_aead_alg driver_aeads[] = {
  5955. + {
  5956. + .aead = {
  5957. + .base = {
  5958. + .cra_name = "rfc4106(gcm(aes))",
  5959. + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
  5960. + .cra_blocksize = 1,
  5961. + },
  5962. + .setkey = rfc4106_setkey,
  5963. + .setauthsize = rfc4106_setauthsize,
  5964. + .encrypt = ipsec_gcm_encrypt,
  5965. + .decrypt = ipsec_gcm_decrypt,
  5966. + .ivsize = 8,
  5967. + .maxauthsize = AES_BLOCK_SIZE,
  5968. + },
  5969. + .caam = {
  5970. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  5971. + },
  5972. + },
  5973. + {
  5974. + .aead = {
  5975. + .base = {
  5976. + .cra_name = "rfc4543(gcm(aes))",
  5977. + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
  5978. + .cra_blocksize = 1,
  5979. + },
  5980. + .setkey = rfc4543_setkey,
  5981. + .setauthsize = rfc4543_setauthsize,
  5982. + .encrypt = ipsec_gcm_encrypt,
  5983. + .decrypt = ipsec_gcm_decrypt,
  5984. + .ivsize = 8,
  5985. + .maxauthsize = AES_BLOCK_SIZE,
  5986. + },
  5987. + .caam = {
  5988. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  5989. + },
  5990. + },
  5991. + /* Galois Counter Mode */
  5992. + {
  5993. + .aead = {
  5994. + .base = {
  5995. + .cra_name = "gcm(aes)",
  5996. + .cra_driver_name = "gcm-aes-caam-qi2",
  5997. + .cra_blocksize = 1,
  5998. + },
  5999. + .setkey = gcm_setkey,
  6000. + .setauthsize = gcm_setauthsize,
  6001. + .encrypt = aead_encrypt,
  6002. + .decrypt = aead_decrypt,
  6003. + .ivsize = 12,
  6004. + .maxauthsize = AES_BLOCK_SIZE,
  6005. + },
  6006. + .caam = {
  6007. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  6008. + }
  6009. + },
  6010. + /* single-pass ipsec_esp descriptor */
  6011. + {
  6012. + .aead = {
  6013. + .base = {
  6014. + .cra_name = "authenc(hmac(md5),cbc(aes))",
  6015. + .cra_driver_name = "authenc-hmac-md5-"
  6016. + "cbc-aes-caam-qi2",
  6017. + .cra_blocksize = AES_BLOCK_SIZE,
  6018. + },
  6019. + .setkey = aead_setkey,
  6020. + .setauthsize = aead_setauthsize,
  6021. + .encrypt = aead_encrypt,
  6022. + .decrypt = aead_decrypt,
  6023. + .ivsize = AES_BLOCK_SIZE,
  6024. + .maxauthsize = MD5_DIGEST_SIZE,
  6025. + },
  6026. + .caam = {
  6027. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6028. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6029. + OP_ALG_AAI_HMAC_PRECOMP,
  6030. + }
  6031. + },
  6032. + {
  6033. + .aead = {
  6034. + .base = {
  6035. + .cra_name = "echainiv(authenc(hmac(md5),"
  6036. + "cbc(aes)))",
  6037. + .cra_driver_name = "echainiv-authenc-hmac-md5-"
  6038. + "cbc-aes-caam-qi2",
  6039. + .cra_blocksize = AES_BLOCK_SIZE,
  6040. + },
  6041. + .setkey = aead_setkey,
  6042. + .setauthsize = aead_setauthsize,
  6043. + .encrypt = aead_encrypt,
  6044. + .decrypt = aead_decrypt,
  6045. + .ivsize = AES_BLOCK_SIZE,
  6046. + .maxauthsize = MD5_DIGEST_SIZE,
  6047. + },
  6048. + .caam = {
  6049. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6050. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6051. + OP_ALG_AAI_HMAC_PRECOMP,
  6052. + .geniv = true,
  6053. + }
  6054. + },
  6055. + {
  6056. + .aead = {
  6057. + .base = {
  6058. + .cra_name = "authenc(hmac(sha1),cbc(aes))",
  6059. + .cra_driver_name = "authenc-hmac-sha1-"
  6060. + "cbc-aes-caam-qi2",
  6061. + .cra_blocksize = AES_BLOCK_SIZE,
  6062. + },
  6063. + .setkey = aead_setkey,
  6064. + .setauthsize = aead_setauthsize,
  6065. + .encrypt = aead_encrypt,
  6066. + .decrypt = aead_decrypt,
  6067. + .ivsize = AES_BLOCK_SIZE,
  6068. + .maxauthsize = SHA1_DIGEST_SIZE,
  6069. + },
  6070. + .caam = {
  6071. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6072. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6073. + OP_ALG_AAI_HMAC_PRECOMP,
  6074. + }
  6075. + },
  6076. + {
  6077. + .aead = {
  6078. + .base = {
  6079. + .cra_name = "echainiv(authenc(hmac(sha1),"
  6080. + "cbc(aes)))",
  6081. + .cra_driver_name = "echainiv-authenc-"
  6082. + "hmac-sha1-cbc-aes-caam-qi2",
  6083. + .cra_blocksize = AES_BLOCK_SIZE,
  6084. + },
  6085. + .setkey = aead_setkey,
  6086. + .setauthsize = aead_setauthsize,
  6087. + .encrypt = aead_encrypt,
  6088. + .decrypt = aead_decrypt,
  6089. + .ivsize = AES_BLOCK_SIZE,
  6090. + .maxauthsize = SHA1_DIGEST_SIZE,
  6091. + },
  6092. + .caam = {
  6093. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6094. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6095. + OP_ALG_AAI_HMAC_PRECOMP,
  6096. + .geniv = true,
  6097. + },
  6098. + },
  6099. + {
  6100. + .aead = {
  6101. + .base = {
  6102. + .cra_name = "authenc(hmac(sha224),cbc(aes))",
  6103. + .cra_driver_name = "authenc-hmac-sha224-"
  6104. + "cbc-aes-caam-qi2",
  6105. + .cra_blocksize = AES_BLOCK_SIZE,
  6106. + },
  6107. + .setkey = aead_setkey,
  6108. + .setauthsize = aead_setauthsize,
  6109. + .encrypt = aead_encrypt,
  6110. + .decrypt = aead_decrypt,
  6111. + .ivsize = AES_BLOCK_SIZE,
  6112. + .maxauthsize = SHA224_DIGEST_SIZE,
  6113. + },
  6114. + .caam = {
  6115. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6116. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6117. + OP_ALG_AAI_HMAC_PRECOMP,
  6118. + }
  6119. + },
  6120. + {
  6121. + .aead = {
  6122. + .base = {
  6123. + .cra_name = "echainiv(authenc(hmac(sha224),"
  6124. + "cbc(aes)))",
  6125. + .cra_driver_name = "echainiv-authenc-"
  6126. + "hmac-sha224-cbc-aes-caam-qi2",
  6127. + .cra_blocksize = AES_BLOCK_SIZE,
  6128. + },
  6129. + .setkey = aead_setkey,
  6130. + .setauthsize = aead_setauthsize,
  6131. + .encrypt = aead_encrypt,
  6132. + .decrypt = aead_decrypt,
  6133. + .ivsize = AES_BLOCK_SIZE,
  6134. + .maxauthsize = SHA224_DIGEST_SIZE,
  6135. + },
  6136. + .caam = {
  6137. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6138. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6139. + OP_ALG_AAI_HMAC_PRECOMP,
  6140. + .geniv = true,
  6141. + }
  6142. + },
  6143. + {
  6144. + .aead = {
  6145. + .base = {
  6146. + .cra_name = "authenc(hmac(sha256),cbc(aes))",
  6147. + .cra_driver_name = "authenc-hmac-sha256-"
  6148. + "cbc-aes-caam-qi2",
  6149. + .cra_blocksize = AES_BLOCK_SIZE,
  6150. + },
  6151. + .setkey = aead_setkey,
  6152. + .setauthsize = aead_setauthsize,
  6153. + .encrypt = aead_encrypt,
  6154. + .decrypt = aead_decrypt,
  6155. + .ivsize = AES_BLOCK_SIZE,
  6156. + .maxauthsize = SHA256_DIGEST_SIZE,
  6157. + },
  6158. + .caam = {
  6159. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6160. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6161. + OP_ALG_AAI_HMAC_PRECOMP,
  6162. + }
  6163. + },
  6164. + {
  6165. + .aead = {
  6166. + .base = {
  6167. + .cra_name = "echainiv(authenc(hmac(sha256),"
  6168. + "cbc(aes)))",
  6169. + .cra_driver_name = "echainiv-authenc-"
  6170. + "hmac-sha256-cbc-aes-"
  6171. + "caam-qi2",
  6172. + .cra_blocksize = AES_BLOCK_SIZE,
  6173. + },
  6174. + .setkey = aead_setkey,
  6175. + .setauthsize = aead_setauthsize,
  6176. + .encrypt = aead_encrypt,
  6177. + .decrypt = aead_decrypt,
  6178. + .ivsize = AES_BLOCK_SIZE,
  6179. + .maxauthsize = SHA256_DIGEST_SIZE,
  6180. + },
  6181. + .caam = {
  6182. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6183. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6184. + OP_ALG_AAI_HMAC_PRECOMP,
  6185. + .geniv = true,
  6186. + }
  6187. + },
  6188. + {
  6189. + .aead = {
  6190. + .base = {
  6191. + .cra_name = "authenc(hmac(sha384),cbc(aes))",
  6192. + .cra_driver_name = "authenc-hmac-sha384-"
  6193. + "cbc-aes-caam-qi2",
  6194. + .cra_blocksize = AES_BLOCK_SIZE,
  6195. + },
  6196. + .setkey = aead_setkey,
  6197. + .setauthsize = aead_setauthsize,
  6198. + .encrypt = aead_encrypt,
  6199. + .decrypt = aead_decrypt,
  6200. + .ivsize = AES_BLOCK_SIZE,
  6201. + .maxauthsize = SHA384_DIGEST_SIZE,
  6202. + },
  6203. + .caam = {
  6204. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6205. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6206. + OP_ALG_AAI_HMAC_PRECOMP,
  6207. + }
  6208. + },
  6209. + {
  6210. + .aead = {
  6211. + .base = {
  6212. + .cra_name = "echainiv(authenc(hmac(sha384),"
  6213. + "cbc(aes)))",
  6214. + .cra_driver_name = "echainiv-authenc-"
  6215. + "hmac-sha384-cbc-aes-"
  6216. + "caam-qi2",
  6217. + .cra_blocksize = AES_BLOCK_SIZE,
  6218. + },
  6219. + .setkey = aead_setkey,
  6220. + .setauthsize = aead_setauthsize,
  6221. + .encrypt = aead_encrypt,
  6222. + .decrypt = aead_decrypt,
  6223. + .ivsize = AES_BLOCK_SIZE,
  6224. + .maxauthsize = SHA384_DIGEST_SIZE,
  6225. + },
  6226. + .caam = {
  6227. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6228. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6229. + OP_ALG_AAI_HMAC_PRECOMP,
  6230. + .geniv = true,
  6231. + }
  6232. + },
  6233. + {
  6234. + .aead = {
  6235. + .base = {
  6236. + .cra_name = "authenc(hmac(sha512),cbc(aes))",
  6237. + .cra_driver_name = "authenc-hmac-sha512-"
  6238. + "cbc-aes-caam-qi2",
  6239. + .cra_blocksize = AES_BLOCK_SIZE,
  6240. + },
  6241. + .setkey = aead_setkey,
  6242. + .setauthsize = aead_setauthsize,
  6243. + .encrypt = aead_encrypt,
  6244. + .decrypt = aead_decrypt,
  6245. + .ivsize = AES_BLOCK_SIZE,
  6246. + .maxauthsize = SHA512_DIGEST_SIZE,
  6247. + },
  6248. + .caam = {
  6249. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6250. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6251. + OP_ALG_AAI_HMAC_PRECOMP,
  6252. + }
  6253. + },
  6254. + {
  6255. + .aead = {
  6256. + .base = {
  6257. + .cra_name = "echainiv(authenc(hmac(sha512),"
  6258. + "cbc(aes)))",
  6259. + .cra_driver_name = "echainiv-authenc-"
  6260. + "hmac-sha512-cbc-aes-"
  6261. + "caam-qi2",
  6262. + .cra_blocksize = AES_BLOCK_SIZE,
  6263. + },
  6264. + .setkey = aead_setkey,
  6265. + .setauthsize = aead_setauthsize,
  6266. + .encrypt = aead_encrypt,
  6267. + .decrypt = aead_decrypt,
  6268. + .ivsize = AES_BLOCK_SIZE,
  6269. + .maxauthsize = SHA512_DIGEST_SIZE,
  6270. + },
  6271. + .caam = {
  6272. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  6273. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6274. + OP_ALG_AAI_HMAC_PRECOMP,
  6275. + .geniv = true,
  6276. + }
  6277. + },
  6278. + {
  6279. + .aead = {
  6280. + .base = {
  6281. + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  6282. + .cra_driver_name = "authenc-hmac-md5-"
  6283. + "cbc-des3_ede-caam-qi2",
  6284. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6285. + },
  6286. + .setkey = aead_setkey,
  6287. + .setauthsize = aead_setauthsize,
  6288. + .encrypt = aead_encrypt,
  6289. + .decrypt = aead_decrypt,
  6290. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6291. + .maxauthsize = MD5_DIGEST_SIZE,
  6292. + },
  6293. + .caam = {
  6294. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6295. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6296. + OP_ALG_AAI_HMAC_PRECOMP,
  6297. + }
  6298. + },
  6299. + {
  6300. + .aead = {
  6301. + .base = {
  6302. + .cra_name = "echainiv(authenc(hmac(md5),"
  6303. + "cbc(des3_ede)))",
  6304. + .cra_driver_name = "echainiv-authenc-hmac-md5-"
  6305. + "cbc-des3_ede-caam-qi2",
  6306. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6307. + },
  6308. + .setkey = aead_setkey,
  6309. + .setauthsize = aead_setauthsize,
  6310. + .encrypt = aead_encrypt,
  6311. + .decrypt = aead_decrypt,
  6312. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6313. + .maxauthsize = MD5_DIGEST_SIZE,
  6314. + },
  6315. + .caam = {
  6316. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6317. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6318. + OP_ALG_AAI_HMAC_PRECOMP,
  6319. + .geniv = true,
  6320. + }
  6321. + },
  6322. + {
  6323. + .aead = {
  6324. + .base = {
  6325. + .cra_name = "authenc(hmac(sha1),"
  6326. + "cbc(des3_ede))",
  6327. + .cra_driver_name = "authenc-hmac-sha1-"
  6328. + "cbc-des3_ede-caam-qi2",
  6329. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6330. + },
  6331. + .setkey = aead_setkey,
  6332. + .setauthsize = aead_setauthsize,
  6333. + .encrypt = aead_encrypt,
  6334. + .decrypt = aead_decrypt,
  6335. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6336. + .maxauthsize = SHA1_DIGEST_SIZE,
  6337. + },
  6338. + .caam = {
  6339. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6340. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6341. + OP_ALG_AAI_HMAC_PRECOMP,
  6342. + },
  6343. + },
  6344. + {
  6345. + .aead = {
  6346. + .base = {
  6347. + .cra_name = "echainiv(authenc(hmac(sha1),"
  6348. + "cbc(des3_ede)))",
  6349. + .cra_driver_name = "echainiv-authenc-"
  6350. + "hmac-sha1-"
  6351. + "cbc-des3_ede-caam-qi2",
  6352. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6353. + },
  6354. + .setkey = aead_setkey,
  6355. + .setauthsize = aead_setauthsize,
  6356. + .encrypt = aead_encrypt,
  6357. + .decrypt = aead_decrypt,
  6358. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6359. + .maxauthsize = SHA1_DIGEST_SIZE,
  6360. + },
  6361. + .caam = {
  6362. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6363. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6364. + OP_ALG_AAI_HMAC_PRECOMP,
  6365. + .geniv = true,
  6366. + }
  6367. + },
  6368. + {
  6369. + .aead = {
  6370. + .base = {
  6371. + .cra_name = "authenc(hmac(sha224),"
  6372. + "cbc(des3_ede))",
  6373. + .cra_driver_name = "authenc-hmac-sha224-"
  6374. + "cbc-des3_ede-caam-qi2",
  6375. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6376. + },
  6377. + .setkey = aead_setkey,
  6378. + .setauthsize = aead_setauthsize,
  6379. + .encrypt = aead_encrypt,
  6380. + .decrypt = aead_decrypt,
  6381. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6382. + .maxauthsize = SHA224_DIGEST_SIZE,
  6383. + },
  6384. + .caam = {
  6385. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6386. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6387. + OP_ALG_AAI_HMAC_PRECOMP,
  6388. + },
  6389. + },
  6390. + {
  6391. + .aead = {
  6392. + .base = {
  6393. + .cra_name = "echainiv(authenc(hmac(sha224),"
  6394. + "cbc(des3_ede)))",
  6395. + .cra_driver_name = "echainiv-authenc-"
  6396. + "hmac-sha224-"
  6397. + "cbc-des3_ede-caam-qi2",
  6398. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6399. + },
  6400. + .setkey = aead_setkey,
  6401. + .setauthsize = aead_setauthsize,
  6402. + .encrypt = aead_encrypt,
  6403. + .decrypt = aead_decrypt,
  6404. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6405. + .maxauthsize = SHA224_DIGEST_SIZE,
  6406. + },
  6407. + .caam = {
  6408. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6409. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6410. + OP_ALG_AAI_HMAC_PRECOMP,
  6411. + .geniv = true,
  6412. + }
  6413. + },
  6414. + {
  6415. + .aead = {
  6416. + .base = {
  6417. + .cra_name = "authenc(hmac(sha256),"
  6418. + "cbc(des3_ede))",
  6419. + .cra_driver_name = "authenc-hmac-sha256-"
  6420. + "cbc-des3_ede-caam-qi2",
  6421. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6422. + },
  6423. + .setkey = aead_setkey,
  6424. + .setauthsize = aead_setauthsize,
  6425. + .encrypt = aead_encrypt,
  6426. + .decrypt = aead_decrypt,
  6427. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6428. + .maxauthsize = SHA256_DIGEST_SIZE,
  6429. + },
  6430. + .caam = {
  6431. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6432. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6433. + OP_ALG_AAI_HMAC_PRECOMP,
  6434. + },
  6435. + },
  6436. + {
  6437. + .aead = {
  6438. + .base = {
  6439. + .cra_name = "echainiv(authenc(hmac(sha256),"
  6440. + "cbc(des3_ede)))",
  6441. + .cra_driver_name = "echainiv-authenc-"
  6442. + "hmac-sha256-"
  6443. + "cbc-des3_ede-caam-qi2",
  6444. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6445. + },
  6446. + .setkey = aead_setkey,
  6447. + .setauthsize = aead_setauthsize,
  6448. + .encrypt = aead_encrypt,
  6449. + .decrypt = aead_decrypt,
  6450. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6451. + .maxauthsize = SHA256_DIGEST_SIZE,
  6452. + },
  6453. + .caam = {
  6454. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6455. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6456. + OP_ALG_AAI_HMAC_PRECOMP,
  6457. + .geniv = true,
  6458. + }
  6459. + },
  6460. + {
  6461. + .aead = {
  6462. + .base = {
  6463. + .cra_name = "authenc(hmac(sha384),"
  6464. + "cbc(des3_ede))",
  6465. + .cra_driver_name = "authenc-hmac-sha384-"
  6466. + "cbc-des3_ede-caam-qi2",
  6467. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6468. + },
  6469. + .setkey = aead_setkey,
  6470. + .setauthsize = aead_setauthsize,
  6471. + .encrypt = aead_encrypt,
  6472. + .decrypt = aead_decrypt,
  6473. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6474. + .maxauthsize = SHA384_DIGEST_SIZE,
  6475. + },
  6476. + .caam = {
  6477. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6478. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6479. + OP_ALG_AAI_HMAC_PRECOMP,
  6480. + },
  6481. + },
  6482. + {
  6483. + .aead = {
  6484. + .base = {
  6485. + .cra_name = "echainiv(authenc(hmac(sha384),"
  6486. + "cbc(des3_ede)))",
  6487. + .cra_driver_name = "echainiv-authenc-"
  6488. + "hmac-sha384-"
  6489. + "cbc-des3_ede-caam-qi2",
  6490. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6491. + },
  6492. + .setkey = aead_setkey,
  6493. + .setauthsize = aead_setauthsize,
  6494. + .encrypt = aead_encrypt,
  6495. + .decrypt = aead_decrypt,
  6496. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6497. + .maxauthsize = SHA384_DIGEST_SIZE,
  6498. + },
  6499. + .caam = {
  6500. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6501. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6502. + OP_ALG_AAI_HMAC_PRECOMP,
  6503. + .geniv = true,
  6504. + }
  6505. + },
  6506. + {
  6507. + .aead = {
  6508. + .base = {
  6509. + .cra_name = "authenc(hmac(sha512),"
  6510. + "cbc(des3_ede))",
  6511. + .cra_driver_name = "authenc-hmac-sha512-"
  6512. + "cbc-des3_ede-caam-qi2",
  6513. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6514. + },
  6515. + .setkey = aead_setkey,
  6516. + .setauthsize = aead_setauthsize,
  6517. + .encrypt = aead_encrypt,
  6518. + .decrypt = aead_decrypt,
  6519. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6520. + .maxauthsize = SHA512_DIGEST_SIZE,
  6521. + },
  6522. + .caam = {
  6523. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6524. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6525. + OP_ALG_AAI_HMAC_PRECOMP,
  6526. + },
  6527. + },
  6528. + {
  6529. + .aead = {
  6530. + .base = {
  6531. + .cra_name = "echainiv(authenc(hmac(sha512),"
  6532. + "cbc(des3_ede)))",
  6533. + .cra_driver_name = "echainiv-authenc-"
  6534. + "hmac-sha512-"
  6535. + "cbc-des3_ede-caam-qi2",
  6536. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  6537. + },
  6538. + .setkey = aead_setkey,
  6539. + .setauthsize = aead_setauthsize,
  6540. + .encrypt = aead_encrypt,
  6541. + .decrypt = aead_decrypt,
  6542. + .ivsize = DES3_EDE_BLOCK_SIZE,
  6543. + .maxauthsize = SHA512_DIGEST_SIZE,
  6544. + },
  6545. + .caam = {
  6546. + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  6547. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6548. + OP_ALG_AAI_HMAC_PRECOMP,
  6549. + .geniv = true,
  6550. + }
  6551. + },
  6552. + {
  6553. + .aead = {
  6554. + .base = {
  6555. + .cra_name = "authenc(hmac(md5),cbc(des))",
  6556. + .cra_driver_name = "authenc-hmac-md5-"
  6557. + "cbc-des-caam-qi2",
  6558. + .cra_blocksize = DES_BLOCK_SIZE,
  6559. + },
  6560. + .setkey = aead_setkey,
  6561. + .setauthsize = aead_setauthsize,
  6562. + .encrypt = aead_encrypt,
  6563. + .decrypt = aead_decrypt,
  6564. + .ivsize = DES_BLOCK_SIZE,
  6565. + .maxauthsize = MD5_DIGEST_SIZE,
  6566. + },
  6567. + .caam = {
  6568. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6569. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6570. + OP_ALG_AAI_HMAC_PRECOMP,
  6571. + },
  6572. + },
  6573. + {
  6574. + .aead = {
  6575. + .base = {
  6576. + .cra_name = "echainiv(authenc(hmac(md5),"
  6577. + "cbc(des)))",
  6578. + .cra_driver_name = "echainiv-authenc-hmac-md5-"
  6579. + "cbc-des-caam-qi2",
  6580. + .cra_blocksize = DES_BLOCK_SIZE,
  6581. + },
  6582. + .setkey = aead_setkey,
  6583. + .setauthsize = aead_setauthsize,
  6584. + .encrypt = aead_encrypt,
  6585. + .decrypt = aead_decrypt,
  6586. + .ivsize = DES_BLOCK_SIZE,
  6587. + .maxauthsize = MD5_DIGEST_SIZE,
  6588. + },
  6589. + .caam = {
  6590. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6591. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6592. + OP_ALG_AAI_HMAC_PRECOMP,
  6593. + .geniv = true,
  6594. + }
  6595. + },
  6596. + {
  6597. + .aead = {
  6598. + .base = {
  6599. + .cra_name = "authenc(hmac(sha1),cbc(des))",
  6600. + .cra_driver_name = "authenc-hmac-sha1-"
  6601. + "cbc-des-caam-qi2",
  6602. + .cra_blocksize = DES_BLOCK_SIZE,
  6603. + },
  6604. + .setkey = aead_setkey,
  6605. + .setauthsize = aead_setauthsize,
  6606. + .encrypt = aead_encrypt,
  6607. + .decrypt = aead_decrypt,
  6608. + .ivsize = DES_BLOCK_SIZE,
  6609. + .maxauthsize = SHA1_DIGEST_SIZE,
  6610. + },
  6611. + .caam = {
  6612. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6613. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6614. + OP_ALG_AAI_HMAC_PRECOMP,
  6615. + },
  6616. + },
  6617. + {
  6618. + .aead = {
  6619. + .base = {
  6620. + .cra_name = "echainiv(authenc(hmac(sha1),"
  6621. + "cbc(des)))",
  6622. + .cra_driver_name = "echainiv-authenc-"
  6623. + "hmac-sha1-cbc-des-caam-qi2",
  6624. + .cra_blocksize = DES_BLOCK_SIZE,
  6625. + },
  6626. + .setkey = aead_setkey,
  6627. + .setauthsize = aead_setauthsize,
  6628. + .encrypt = aead_encrypt,
  6629. + .decrypt = aead_decrypt,
  6630. + .ivsize = DES_BLOCK_SIZE,
  6631. + .maxauthsize = SHA1_DIGEST_SIZE,
  6632. + },
  6633. + .caam = {
  6634. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6635. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6636. + OP_ALG_AAI_HMAC_PRECOMP,
  6637. + .geniv = true,
  6638. + }
  6639. + },
  6640. + {
  6641. + .aead = {
  6642. + .base = {
  6643. + .cra_name = "authenc(hmac(sha224),cbc(des))",
  6644. + .cra_driver_name = "authenc-hmac-sha224-"
  6645. + "cbc-des-caam-qi2",
  6646. + .cra_blocksize = DES_BLOCK_SIZE,
  6647. + },
  6648. + .setkey = aead_setkey,
  6649. + .setauthsize = aead_setauthsize,
  6650. + .encrypt = aead_encrypt,
  6651. + .decrypt = aead_decrypt,
  6652. + .ivsize = DES_BLOCK_SIZE,
  6653. + .maxauthsize = SHA224_DIGEST_SIZE,
  6654. + },
  6655. + .caam = {
  6656. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6657. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6658. + OP_ALG_AAI_HMAC_PRECOMP,
  6659. + },
  6660. + },
  6661. + {
  6662. + .aead = {
  6663. + .base = {
  6664. + .cra_name = "echainiv(authenc(hmac(sha224),"
  6665. + "cbc(des)))",
  6666. + .cra_driver_name = "echainiv-authenc-"
  6667. + "hmac-sha224-cbc-des-"
  6668. + "caam-qi2",
  6669. + .cra_blocksize = DES_BLOCK_SIZE,
  6670. + },
  6671. + .setkey = aead_setkey,
  6672. + .setauthsize = aead_setauthsize,
  6673. + .encrypt = aead_encrypt,
  6674. + .decrypt = aead_decrypt,
  6675. + .ivsize = DES_BLOCK_SIZE,
  6676. + .maxauthsize = SHA224_DIGEST_SIZE,
  6677. + },
  6678. + .caam = {
  6679. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6680. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6681. + OP_ALG_AAI_HMAC_PRECOMP,
  6682. + .geniv = true,
  6683. + }
  6684. + },
  6685. + {
  6686. + .aead = {
  6687. + .base = {
  6688. + .cra_name = "authenc(hmac(sha256),cbc(des))",
  6689. + .cra_driver_name = "authenc-hmac-sha256-"
  6690. + "cbc-des-caam-qi2",
  6691. + .cra_blocksize = DES_BLOCK_SIZE,
  6692. + },
  6693. + .setkey = aead_setkey,
  6694. + .setauthsize = aead_setauthsize,
  6695. + .encrypt = aead_encrypt,
  6696. + .decrypt = aead_decrypt,
  6697. + .ivsize = DES_BLOCK_SIZE,
  6698. + .maxauthsize = SHA256_DIGEST_SIZE,
  6699. + },
  6700. + .caam = {
  6701. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6702. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6703. + OP_ALG_AAI_HMAC_PRECOMP,
  6704. + },
  6705. + },
  6706. + {
  6707. + .aead = {
  6708. + .base = {
  6709. + .cra_name = "echainiv(authenc(hmac(sha256),"
  6710. + "cbc(des)))",
  6711. + .cra_driver_name = "echainiv-authenc-"
  6712. + "hmac-sha256-cbc-desi-"
  6713. + "caam-qi2",
  6714. + .cra_blocksize = DES_BLOCK_SIZE,
  6715. + },
  6716. + .setkey = aead_setkey,
  6717. + .setauthsize = aead_setauthsize,
  6718. + .encrypt = aead_encrypt,
  6719. + .decrypt = aead_decrypt,
  6720. + .ivsize = DES_BLOCK_SIZE,
  6721. + .maxauthsize = SHA256_DIGEST_SIZE,
  6722. + },
  6723. + .caam = {
  6724. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6725. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6726. + OP_ALG_AAI_HMAC_PRECOMP,
  6727. + .geniv = true,
  6728. + },
  6729. + },
  6730. + {
  6731. + .aead = {
  6732. + .base = {
  6733. + .cra_name = "authenc(hmac(sha384),cbc(des))",
  6734. + .cra_driver_name = "authenc-hmac-sha384-"
  6735. + "cbc-des-caam-qi2",
  6736. + .cra_blocksize = DES_BLOCK_SIZE,
  6737. + },
  6738. + .setkey = aead_setkey,
  6739. + .setauthsize = aead_setauthsize,
  6740. + .encrypt = aead_encrypt,
  6741. + .decrypt = aead_decrypt,
  6742. + .ivsize = DES_BLOCK_SIZE,
  6743. + .maxauthsize = SHA384_DIGEST_SIZE,
  6744. + },
  6745. + .caam = {
  6746. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6747. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6748. + OP_ALG_AAI_HMAC_PRECOMP,
  6749. + },
  6750. + },
  6751. + {
  6752. + .aead = {
  6753. + .base = {
  6754. + .cra_name = "echainiv(authenc(hmac(sha384),"
  6755. + "cbc(des)))",
  6756. + .cra_driver_name = "echainiv-authenc-"
  6757. + "hmac-sha384-cbc-des-"
  6758. + "caam-qi2",
  6759. + .cra_blocksize = DES_BLOCK_SIZE,
  6760. + },
  6761. + .setkey = aead_setkey,
  6762. + .setauthsize = aead_setauthsize,
  6763. + .encrypt = aead_encrypt,
  6764. + .decrypt = aead_decrypt,
  6765. + .ivsize = DES_BLOCK_SIZE,
  6766. + .maxauthsize = SHA384_DIGEST_SIZE,
  6767. + },
  6768. + .caam = {
  6769. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6770. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  6771. + OP_ALG_AAI_HMAC_PRECOMP,
  6772. + .geniv = true,
  6773. + }
  6774. + },
  6775. + {
  6776. + .aead = {
  6777. + .base = {
  6778. + .cra_name = "authenc(hmac(sha512),cbc(des))",
  6779. + .cra_driver_name = "authenc-hmac-sha512-"
  6780. + "cbc-des-caam-qi2",
  6781. + .cra_blocksize = DES_BLOCK_SIZE,
  6782. + },
  6783. + .setkey = aead_setkey,
  6784. + .setauthsize = aead_setauthsize,
  6785. + .encrypt = aead_encrypt,
  6786. + .decrypt = aead_decrypt,
  6787. + .ivsize = DES_BLOCK_SIZE,
  6788. + .maxauthsize = SHA512_DIGEST_SIZE,
  6789. + },
  6790. + .caam = {
  6791. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6792. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6793. + OP_ALG_AAI_HMAC_PRECOMP,
  6794. + }
  6795. + },
  6796. + {
  6797. + .aead = {
  6798. + .base = {
  6799. + .cra_name = "echainiv(authenc(hmac(sha512),"
  6800. + "cbc(des)))",
  6801. + .cra_driver_name = "echainiv-authenc-"
  6802. + "hmac-sha512-cbc-des-"
  6803. + "caam-qi2",
  6804. + .cra_blocksize = DES_BLOCK_SIZE,
  6805. + },
  6806. + .setkey = aead_setkey,
  6807. + .setauthsize = aead_setauthsize,
  6808. + .encrypt = aead_encrypt,
  6809. + .decrypt = aead_decrypt,
  6810. + .ivsize = DES_BLOCK_SIZE,
  6811. + .maxauthsize = SHA512_DIGEST_SIZE,
  6812. + },
  6813. + .caam = {
  6814. + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  6815. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  6816. + OP_ALG_AAI_HMAC_PRECOMP,
  6817. + .geniv = true,
  6818. + }
  6819. + },
  6820. + {
  6821. + .aead = {
  6822. + .base = {
  6823. + .cra_name = "authenc(hmac(md5),"
  6824. + "rfc3686(ctr(aes)))",
  6825. + .cra_driver_name = "authenc-hmac-md5-"
  6826. + "rfc3686-ctr-aes-caam-qi2",
  6827. + .cra_blocksize = 1,
  6828. + },
  6829. + .setkey = aead_setkey,
  6830. + .setauthsize = aead_setauthsize,
  6831. + .encrypt = aead_encrypt,
  6832. + .decrypt = aead_decrypt,
  6833. + .ivsize = CTR_RFC3686_IV_SIZE,
  6834. + .maxauthsize = MD5_DIGEST_SIZE,
  6835. + },
  6836. + .caam = {
  6837. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6838. + OP_ALG_AAI_CTR_MOD128,
  6839. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6840. + OP_ALG_AAI_HMAC_PRECOMP,
  6841. + .rfc3686 = true,
  6842. + },
  6843. + },
  6844. + {
  6845. + .aead = {
  6846. + .base = {
  6847. + .cra_name = "seqiv(authenc("
  6848. + "hmac(md5),rfc3686(ctr(aes))))",
  6849. + .cra_driver_name = "seqiv-authenc-hmac-md5-"
  6850. + "rfc3686-ctr-aes-caam-qi2",
  6851. + .cra_blocksize = 1,
  6852. + },
  6853. + .setkey = aead_setkey,
  6854. + .setauthsize = aead_setauthsize,
  6855. + .encrypt = aead_encrypt,
  6856. + .decrypt = aead_decrypt,
  6857. + .ivsize = CTR_RFC3686_IV_SIZE,
  6858. + .maxauthsize = MD5_DIGEST_SIZE,
  6859. + },
  6860. + .caam = {
  6861. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6862. + OP_ALG_AAI_CTR_MOD128,
  6863. + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  6864. + OP_ALG_AAI_HMAC_PRECOMP,
  6865. + .rfc3686 = true,
  6866. + .geniv = true,
  6867. + },
  6868. + },
  6869. + {
  6870. + .aead = {
  6871. + .base = {
  6872. + .cra_name = "authenc(hmac(sha1),"
  6873. + "rfc3686(ctr(aes)))",
  6874. + .cra_driver_name = "authenc-hmac-sha1-"
  6875. + "rfc3686-ctr-aes-caam-qi2",
  6876. + .cra_blocksize = 1,
  6877. + },
  6878. + .setkey = aead_setkey,
  6879. + .setauthsize = aead_setauthsize,
  6880. + .encrypt = aead_encrypt,
  6881. + .decrypt = aead_decrypt,
  6882. + .ivsize = CTR_RFC3686_IV_SIZE,
  6883. + .maxauthsize = SHA1_DIGEST_SIZE,
  6884. + },
  6885. + .caam = {
  6886. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6887. + OP_ALG_AAI_CTR_MOD128,
  6888. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6889. + OP_ALG_AAI_HMAC_PRECOMP,
  6890. + .rfc3686 = true,
  6891. + },
  6892. + },
  6893. + {
  6894. + .aead = {
  6895. + .base = {
  6896. + .cra_name = "seqiv(authenc("
  6897. + "hmac(sha1),rfc3686(ctr(aes))))",
  6898. + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  6899. + "rfc3686-ctr-aes-caam-qi2",
  6900. + .cra_blocksize = 1,
  6901. + },
  6902. + .setkey = aead_setkey,
  6903. + .setauthsize = aead_setauthsize,
  6904. + .encrypt = aead_encrypt,
  6905. + .decrypt = aead_decrypt,
  6906. + .ivsize = CTR_RFC3686_IV_SIZE,
  6907. + .maxauthsize = SHA1_DIGEST_SIZE,
  6908. + },
  6909. + .caam = {
  6910. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6911. + OP_ALG_AAI_CTR_MOD128,
  6912. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  6913. + OP_ALG_AAI_HMAC_PRECOMP,
  6914. + .rfc3686 = true,
  6915. + .geniv = true,
  6916. + },
  6917. + },
  6918. + {
  6919. + .aead = {
  6920. + .base = {
  6921. + .cra_name = "authenc(hmac(sha224),"
  6922. + "rfc3686(ctr(aes)))",
  6923. + .cra_driver_name = "authenc-hmac-sha224-"
  6924. + "rfc3686-ctr-aes-caam-qi2",
  6925. + .cra_blocksize = 1,
  6926. + },
  6927. + .setkey = aead_setkey,
  6928. + .setauthsize = aead_setauthsize,
  6929. + .encrypt = aead_encrypt,
  6930. + .decrypt = aead_decrypt,
  6931. + .ivsize = CTR_RFC3686_IV_SIZE,
  6932. + .maxauthsize = SHA224_DIGEST_SIZE,
  6933. + },
  6934. + .caam = {
  6935. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6936. + OP_ALG_AAI_CTR_MOD128,
  6937. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6938. + OP_ALG_AAI_HMAC_PRECOMP,
  6939. + .rfc3686 = true,
  6940. + },
  6941. + },
  6942. + {
  6943. + .aead = {
  6944. + .base = {
  6945. + .cra_name = "seqiv(authenc("
  6946. + "hmac(sha224),rfc3686(ctr(aes))))",
  6947. + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  6948. + "rfc3686-ctr-aes-caam-qi2",
  6949. + .cra_blocksize = 1,
  6950. + },
  6951. + .setkey = aead_setkey,
  6952. + .setauthsize = aead_setauthsize,
  6953. + .encrypt = aead_encrypt,
  6954. + .decrypt = aead_decrypt,
  6955. + .ivsize = CTR_RFC3686_IV_SIZE,
  6956. + .maxauthsize = SHA224_DIGEST_SIZE,
  6957. + },
  6958. + .caam = {
  6959. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6960. + OP_ALG_AAI_CTR_MOD128,
  6961. + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  6962. + OP_ALG_AAI_HMAC_PRECOMP,
  6963. + .rfc3686 = true,
  6964. + .geniv = true,
  6965. + },
  6966. + },
  6967. + {
  6968. + .aead = {
  6969. + .base = {
  6970. + .cra_name = "authenc(hmac(sha256),"
  6971. + "rfc3686(ctr(aes)))",
  6972. + .cra_driver_name = "authenc-hmac-sha256-"
  6973. + "rfc3686-ctr-aes-caam-qi2",
  6974. + .cra_blocksize = 1,
  6975. + },
  6976. + .setkey = aead_setkey,
  6977. + .setauthsize = aead_setauthsize,
  6978. + .encrypt = aead_encrypt,
  6979. + .decrypt = aead_decrypt,
  6980. + .ivsize = CTR_RFC3686_IV_SIZE,
  6981. + .maxauthsize = SHA256_DIGEST_SIZE,
  6982. + },
  6983. + .caam = {
  6984. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  6985. + OP_ALG_AAI_CTR_MOD128,
  6986. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  6987. + OP_ALG_AAI_HMAC_PRECOMP,
  6988. + .rfc3686 = true,
  6989. + },
  6990. + },
  6991. + {
  6992. + .aead = {
  6993. + .base = {
  6994. + .cra_name = "seqiv(authenc(hmac(sha256),"
  6995. + "rfc3686(ctr(aes))))",
  6996. + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  6997. + "rfc3686-ctr-aes-caam-qi2",
  6998. + .cra_blocksize = 1,
  6999. + },
  7000. + .setkey = aead_setkey,
  7001. + .setauthsize = aead_setauthsize,
  7002. + .encrypt = aead_encrypt,
  7003. + .decrypt = aead_decrypt,
  7004. + .ivsize = CTR_RFC3686_IV_SIZE,
  7005. + .maxauthsize = SHA256_DIGEST_SIZE,
  7006. + },
  7007. + .caam = {
  7008. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  7009. + OP_ALG_AAI_CTR_MOD128,
  7010. + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  7011. + OP_ALG_AAI_HMAC_PRECOMP,
  7012. + .rfc3686 = true,
  7013. + .geniv = true,
  7014. + },
  7015. + },
  7016. + {
  7017. + .aead = {
  7018. + .base = {
  7019. + .cra_name = "authenc(hmac(sha384),"
  7020. + "rfc3686(ctr(aes)))",
  7021. + .cra_driver_name = "authenc-hmac-sha384-"
  7022. + "rfc3686-ctr-aes-caam-qi2",
  7023. + .cra_blocksize = 1,
  7024. + },
  7025. + .setkey = aead_setkey,
  7026. + .setauthsize = aead_setauthsize,
  7027. + .encrypt = aead_encrypt,
  7028. + .decrypt = aead_decrypt,
  7029. + .ivsize = CTR_RFC3686_IV_SIZE,
  7030. + .maxauthsize = SHA384_DIGEST_SIZE,
  7031. + },
  7032. + .caam = {
  7033. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  7034. + OP_ALG_AAI_CTR_MOD128,
  7035. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  7036. + OP_ALG_AAI_HMAC_PRECOMP,
  7037. + .rfc3686 = true,
  7038. + },
  7039. + },
  7040. + {
  7041. + .aead = {
  7042. + .base = {
  7043. + .cra_name = "seqiv(authenc(hmac(sha384),"
  7044. + "rfc3686(ctr(aes))))",
  7045. + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  7046. + "rfc3686-ctr-aes-caam-qi2",
  7047. + .cra_blocksize = 1,
  7048. + },
  7049. + .setkey = aead_setkey,
  7050. + .setauthsize = aead_setauthsize,
  7051. + .encrypt = aead_encrypt,
  7052. + .decrypt = aead_decrypt,
  7053. + .ivsize = CTR_RFC3686_IV_SIZE,
  7054. + .maxauthsize = SHA384_DIGEST_SIZE,
  7055. + },
  7056. + .caam = {
  7057. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  7058. + OP_ALG_AAI_CTR_MOD128,
  7059. + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  7060. + OP_ALG_AAI_HMAC_PRECOMP,
  7061. + .rfc3686 = true,
  7062. + .geniv = true,
  7063. + },
  7064. + },
  7065. + {
  7066. + .aead = {
  7067. + .base = {
  7068. + .cra_name = "authenc(hmac(sha512),"
  7069. + "rfc3686(ctr(aes)))",
  7070. + .cra_driver_name = "authenc-hmac-sha512-"
  7071. + "rfc3686-ctr-aes-caam-qi2",
  7072. + .cra_blocksize = 1,
  7073. + },
  7074. + .setkey = aead_setkey,
  7075. + .setauthsize = aead_setauthsize,
  7076. + .encrypt = aead_encrypt,
  7077. + .decrypt = aead_decrypt,
  7078. + .ivsize = CTR_RFC3686_IV_SIZE,
  7079. + .maxauthsize = SHA512_DIGEST_SIZE,
  7080. + },
  7081. + .caam = {
  7082. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  7083. + OP_ALG_AAI_CTR_MOD128,
  7084. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  7085. + OP_ALG_AAI_HMAC_PRECOMP,
  7086. + .rfc3686 = true,
  7087. + },
  7088. + },
  7089. + {
  7090. + .aead = {
  7091. + .base = {
  7092. + .cra_name = "seqiv(authenc(hmac(sha512),"
  7093. + "rfc3686(ctr(aes))))",
  7094. + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  7095. + "rfc3686-ctr-aes-caam-qi2",
  7096. + .cra_blocksize = 1,
  7097. + },
  7098. + .setkey = aead_setkey,
  7099. + .setauthsize = aead_setauthsize,
  7100. + .encrypt = aead_encrypt,
  7101. + .decrypt = aead_decrypt,
  7102. + .ivsize = CTR_RFC3686_IV_SIZE,
  7103. + .maxauthsize = SHA512_DIGEST_SIZE,
  7104. + },
  7105. + .caam = {
  7106. + .class1_alg_type = OP_ALG_ALGSEL_AES |
  7107. + OP_ALG_AAI_CTR_MOD128,
  7108. + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  7109. + OP_ALG_AAI_HMAC_PRECOMP,
  7110. + .rfc3686 = true,
  7111. + .geniv = true,
  7112. + },
  7113. + },
  7114. + {
  7115. + .aead = {
  7116. + .base = {
  7117. + .cra_name = "tls10(hmac(sha1),cbc(aes))",
  7118. + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
  7119. + .cra_blocksize = AES_BLOCK_SIZE,
  7120. + },
  7121. + .setkey = tls_setkey,
  7122. + .setauthsize = tls_setauthsize,
  7123. + .encrypt = tls_encrypt,
  7124. + .decrypt = tls_decrypt,
  7125. + .ivsize = AES_BLOCK_SIZE,
  7126. + .maxauthsize = SHA1_DIGEST_SIZE,
  7127. + },
  7128. + .caam = {
  7129. + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  7130. + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  7131. + OP_ALG_AAI_HMAC_PRECOMP,
  7132. + },
  7133. + },
  7134. +};
  7135. +
  7136. +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
  7137. +{
  7138. + struct skcipher_alg *alg = &t_alg->skcipher;
  7139. +
  7140. + alg->base.cra_module = THIS_MODULE;
  7141. + alg->base.cra_priority = CAAM_CRA_PRIORITY;
  7142. + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  7143. + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  7144. +
  7145. + alg->init = caam_cra_init_skcipher;
  7146. + alg->exit = caam_cra_exit;
  7147. +}
  7148. +
  7149. +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  7150. +{
  7151. + struct aead_alg *alg = &t_alg->aead;
  7152. +
  7153. + alg->base.cra_module = THIS_MODULE;
  7154. + alg->base.cra_priority = CAAM_CRA_PRIORITY;
  7155. + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  7156. + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  7157. +
  7158. + alg->init = caam_cra_init_aead;
  7159. + alg->exit = caam_cra_exit_aead;
  7160. +}
  7161. +
  7162. +/* max hash key is max split key size */
  7163. +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
  7164. +
  7165. +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
  7166. +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
  7167. +
  7168. +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
  7169. + CAAM_MAX_HASH_KEY_SIZE)
  7170. +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
  7171. +
  7172. +/* caam context sizes for hashes: running digest + 8 */
  7173. +#define HASH_MSG_LEN 8
  7174. +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
  7175. +
  7176. +enum hash_optype {
  7177. + UPDATE = 0,
  7178. + UPDATE_FIRST,
  7179. + FINALIZE,
  7180. + DIGEST,
  7181. + HASH_NUM_OP
  7182. +};
  7183. +
  7184. +/**
  7185. + * caam_hash_ctx - ahash per-session context
  7186. + * @flc: Flow Contexts array
  7187. + * @flc_dma: I/O virtual addresses of the Flow Contexts
  7188. + * @key: virtual address of the authentication key
  7189. + * @dev: dpseci device
  7190. + * @ctx_len: size of Context Register
  7191. + * @adata: hashing algorithm details
  7192. + */
  7193. +struct caam_hash_ctx {
  7194. + struct caam_flc flc[HASH_NUM_OP];
  7195. + dma_addr_t flc_dma[HASH_NUM_OP];
  7196. + u8 key[CAAM_MAX_HASH_KEY_SIZE];
  7197. + struct device *dev;
  7198. + int ctx_len;
  7199. + struct alginfo adata;
  7200. +};
  7201. +
  7202. +/* ahash state */
  7203. +struct caam_hash_state {
  7204. + struct caam_request caam_req;
  7205. + dma_addr_t buf_dma;
  7206. + dma_addr_t ctx_dma;
  7207. + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
  7208. + int buflen_0;
  7209. + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
  7210. + int buflen_1;
  7211. + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
  7212. + int (*update)(struct ahash_request *req);
  7213. + int (*final)(struct ahash_request *req);
  7214. + int (*finup)(struct ahash_request *req);
  7215. + int current_buf;
  7216. +};
  7217. +
  7218. +struct caam_export_state {
  7219. + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
  7220. + u8 caam_ctx[MAX_CTX_LEN];
  7221. + int buflen;
  7222. + int (*update)(struct ahash_request *req);
  7223. + int (*final)(struct ahash_request *req);
  7224. + int (*finup)(struct ahash_request *req);
  7225. +};
  7226. +
  7227. +static inline void switch_buf(struct caam_hash_state *state)
  7228. +{
  7229. + state->current_buf ^= 1;
  7230. +}
  7231. +
  7232. +static inline u8 *current_buf(struct caam_hash_state *state)
  7233. +{
  7234. + return state->current_buf ? state->buf_1 : state->buf_0;
  7235. +}
  7236. +
  7237. +static inline u8 *alt_buf(struct caam_hash_state *state)
  7238. +{
  7239. + return state->current_buf ? state->buf_0 : state->buf_1;
  7240. +}
  7241. +
  7242. +static inline int *current_buflen(struct caam_hash_state *state)
  7243. +{
  7244. + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
  7245. +}
  7246. +
  7247. +static inline int *alt_buflen(struct caam_hash_state *state)
  7248. +{
  7249. + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
  7250. +}
  7251. +
  7252. +/* Map current buffer in state (if length > 0) and put it in link table */
  7253. +static inline int buf_map_to_qm_sg(struct device *dev,
  7254. + struct dpaa2_sg_entry *qm_sg,
  7255. + struct caam_hash_state *state)
  7256. +{
  7257. + int buflen = *current_buflen(state);
  7258. +
  7259. + if (!buflen)
  7260. + return 0;
  7261. +
  7262. + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
  7263. + DMA_TO_DEVICE);
  7264. + if (dma_mapping_error(dev, state->buf_dma)) {
  7265. + dev_err(dev, "unable to map buf\n");
  7266. + state->buf_dma = 0;
  7267. + return -ENOMEM;
  7268. + }
  7269. +
  7270. + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
  7271. +
  7272. + return 0;
  7273. +}
  7274. +
  7275. +/* Map state->caam_ctx, and add it to link table */
  7276. +static inline int ctx_map_to_qm_sg(struct device *dev,
  7277. + struct caam_hash_state *state, int ctx_len,
  7278. + struct dpaa2_sg_entry *qm_sg, u32 flag)
  7279. +{
  7280. + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
  7281. + if (dma_mapping_error(dev, state->ctx_dma)) {
  7282. + dev_err(dev, "unable to map ctx\n");
  7283. + state->ctx_dma = 0;
  7284. + return -ENOMEM;
  7285. + }
  7286. +
  7287. + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
  7288. +
  7289. + return 0;
  7290. +}
  7291. +
  7292. +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
  7293. +{
  7294. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7295. + int digestsize = crypto_ahash_digestsize(ahash);
  7296. + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
  7297. + struct caam_flc *flc;
  7298. + u32 *desc;
  7299. +
  7300. + ctx->adata.key_virt = ctx->key;
  7301. + ctx->adata.key_inline = true;
  7302. +
  7303. + /* ahash_update shared descriptor */
  7304. + flc = &ctx->flc[UPDATE];
  7305. + desc = flc->sh_desc;
  7306. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
  7307. + ctx->ctx_len, true, priv->sec_attr.era);
  7308. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  7309. + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
  7310. + desc_bytes(desc), DMA_BIDIRECTIONAL);
  7311. +#ifdef DEBUG
  7312. + print_hex_dump(KERN_ERR,
  7313. + "ahash update shdesc@" __stringify(__LINE__)": ",
  7314. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  7315. +#endif
  7316. +
  7317. + /* ahash_update_first shared descriptor */
  7318. + flc = &ctx->flc[UPDATE_FIRST];
  7319. + desc = flc->sh_desc;
  7320. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
  7321. + ctx->ctx_len, false, priv->sec_attr.era);
  7322. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  7323. + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
  7324. + desc_bytes(desc), DMA_BIDIRECTIONAL);
  7325. +#ifdef DEBUG
  7326. + print_hex_dump(KERN_ERR,
  7327. + "ahash update first shdesc@" __stringify(__LINE__)": ",
  7328. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  7329. +#endif
  7330. +
  7331. + /* ahash_final shared descriptor */
  7332. + flc = &ctx->flc[FINALIZE];
  7333. + desc = flc->sh_desc;
  7334. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
  7335. + ctx->ctx_len, true, priv->sec_attr.era);
  7336. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  7337. + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
  7338. + desc_bytes(desc), DMA_BIDIRECTIONAL);
  7339. +#ifdef DEBUG
  7340. + print_hex_dump(KERN_ERR,
  7341. + "ahash final shdesc@" __stringify(__LINE__)": ",
  7342. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  7343. +#endif
  7344. +
  7345. + /* ahash_digest shared descriptor */
  7346. + flc = &ctx->flc[DIGEST];
  7347. + desc = flc->sh_desc;
  7348. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
  7349. + ctx->ctx_len, false, priv->sec_attr.era);
  7350. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  7351. + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
  7352. + desc_bytes(desc), DMA_BIDIRECTIONAL);
  7353. +#ifdef DEBUG
  7354. + print_hex_dump(KERN_ERR,
  7355. + "ahash digest shdesc@" __stringify(__LINE__)": ",
  7356. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  7357. +#endif
  7358. +
  7359. + return 0;
  7360. +}
  7361. +
  7362. +/* Digest hash size if it is too large */
  7363. +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
  7364. + u32 *keylen, u8 *key_out, u32 digestsize)
  7365. +{
  7366. + struct caam_request *req_ctx;
  7367. + u32 *desc;
  7368. + struct split_key_sh_result result;
  7369. + dma_addr_t src_dma, dst_dma;
  7370. + struct caam_flc *flc;
  7371. + dma_addr_t flc_dma;
  7372. + int ret = -ENOMEM;
  7373. + struct dpaa2_fl_entry *in_fle, *out_fle;
  7374. +
  7375. + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
  7376. + if (!req_ctx)
  7377. + return -ENOMEM;
  7378. +
  7379. + in_fle = &req_ctx->fd_flt[1];
  7380. + out_fle = &req_ctx->fd_flt[0];
  7381. +
  7382. + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
  7383. + if (!flc)
  7384. + goto err_flc;
  7385. +
  7386. + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
  7387. + DMA_TO_DEVICE);
  7388. + if (dma_mapping_error(ctx->dev, src_dma)) {
  7389. + dev_err(ctx->dev, "unable to map key input memory\n");
  7390. + goto err_src_dma;
  7391. + }
  7392. + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
  7393. + DMA_FROM_DEVICE);
  7394. + if (dma_mapping_error(ctx->dev, dst_dma)) {
  7395. + dev_err(ctx->dev, "unable to map key output memory\n");
  7396. + goto err_dst_dma;
  7397. + }
  7398. +
  7399. + desc = flc->sh_desc;
  7400. +
  7401. + init_sh_desc(desc, 0);
  7402. +
  7403. + /* descriptor to perform unkeyed hash on key_in */
  7404. + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
  7405. + OP_ALG_AS_INITFINAL);
  7406. + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
  7407. + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
  7408. + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
  7409. + LDST_SRCDST_BYTE_CONTEXT);
  7410. +
  7411. + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
  7412. + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
  7413. + desc_bytes(desc), DMA_TO_DEVICE);
  7414. + if (dma_mapping_error(ctx->dev, flc_dma)) {
  7415. + dev_err(ctx->dev, "unable to map shared descriptor\n");
  7416. + goto err_flc_dma;
  7417. + }
  7418. +
  7419. + dpaa2_fl_set_final(in_fle, true);
  7420. + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
  7421. + dpaa2_fl_set_addr(in_fle, src_dma);
  7422. + dpaa2_fl_set_len(in_fle, *keylen);
  7423. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  7424. + dpaa2_fl_set_addr(out_fle, dst_dma);
  7425. + dpaa2_fl_set_len(out_fle, digestsize);
  7426. +
  7427. +#ifdef DEBUG
  7428. + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
  7429. + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
  7430. + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
  7431. + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  7432. +#endif
  7433. +
  7434. + result.err = 0;
  7435. + init_completion(&result.completion);
  7436. + result.dev = ctx->dev;
  7437. +
  7438. + req_ctx->flc = flc;
  7439. + req_ctx->flc_dma = flc_dma;
  7440. + req_ctx->cbk = split_key_sh_done;
  7441. + req_ctx->ctx = &result;
  7442. +
  7443. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  7444. + if (ret == -EINPROGRESS) {
  7445. + /* in progress */
  7446. + wait_for_completion(&result.completion);
  7447. + ret = result.err;
  7448. +#ifdef DEBUG
  7449. + print_hex_dump(KERN_ERR,
  7450. + "digested key@" __stringify(__LINE__)": ",
  7451. + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
  7452. + 1);
  7453. +#endif
  7454. + }
  7455. +
  7456. + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
  7457. + DMA_TO_DEVICE);
  7458. +err_flc_dma:
  7459. + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
  7460. +err_dst_dma:
  7461. + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
  7462. +err_src_dma:
  7463. + kfree(flc);
  7464. +err_flc:
  7465. + kfree(req_ctx);
  7466. +
  7467. + *keylen = digestsize;
  7468. +
  7469. + return ret;
  7470. +}
  7471. +
  7472. +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
  7473. + unsigned int keylen)
  7474. +{
  7475. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7476. + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
  7477. + unsigned int digestsize = crypto_ahash_digestsize(ahash);
  7478. + int ret;
  7479. + u8 *hashed_key = NULL;
  7480. +
  7481. +#ifdef DEBUG
  7482. + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
  7483. +#endif
  7484. +
  7485. + if (keylen > blocksize) {
  7486. + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
  7487. + GFP_KERNEL | GFP_DMA);
  7488. + if (!hashed_key)
  7489. + return -ENOMEM;
  7490. + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
  7491. + digestsize);
  7492. + if (ret)
  7493. + goto bad_free_key;
  7494. + key = hashed_key;
  7495. + }
  7496. +
  7497. + ctx->adata.keylen = keylen;
  7498. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  7499. + OP_ALG_ALGSEL_MASK);
  7500. + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
  7501. + goto bad_free_key;
  7502. +
  7503. + memcpy(ctx->key, key, keylen);
  7504. +
  7505. + kfree(hashed_key);
  7506. + return ahash_set_sh_desc(ahash);
  7507. +bad_free_key:
  7508. + kfree(hashed_key);
  7509. + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
  7510. + return -EINVAL;
  7511. +}
  7512. +
  7513. +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
  7514. + struct ahash_request *req, int dst_len)
  7515. +{
  7516. + struct caam_hash_state *state = ahash_request_ctx(req);
  7517. +
  7518. + if (edesc->src_nents)
  7519. + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
  7520. + if (edesc->dst_dma)
  7521. + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
  7522. +
  7523. + if (edesc->qm_sg_bytes)
  7524. + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
  7525. + DMA_TO_DEVICE);
  7526. +
  7527. + if (state->buf_dma) {
  7528. + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
  7529. + DMA_TO_DEVICE);
  7530. + state->buf_dma = 0;
  7531. + }
  7532. +}
  7533. +
  7534. +static inline void ahash_unmap_ctx(struct device *dev,
  7535. + struct ahash_edesc *edesc,
  7536. + struct ahash_request *req, int dst_len,
  7537. + u32 flag)
  7538. +{
  7539. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7540. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7541. + struct caam_hash_state *state = ahash_request_ctx(req);
  7542. +
  7543. + if (state->ctx_dma) {
  7544. + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
  7545. + state->ctx_dma = 0;
  7546. + }
  7547. + ahash_unmap(dev, edesc, req, dst_len);
  7548. +}
  7549. +
  7550. +static void ahash_done(void *cbk_ctx, u32 status)
  7551. +{
  7552. + struct crypto_async_request *areq = cbk_ctx;
  7553. + struct ahash_request *req = ahash_request_cast(areq);
  7554. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7555. + struct caam_hash_state *state = ahash_request_ctx(req);
  7556. + struct ahash_edesc *edesc = state->caam_req.edesc;
  7557. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7558. + int digestsize = crypto_ahash_digestsize(ahash);
  7559. + int ecode = 0;
  7560. +
  7561. +#ifdef DEBUG
  7562. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  7563. +#endif
  7564. +
  7565. + if (unlikely(status)) {
  7566. + caam_qi2_strstatus(ctx->dev, status);
  7567. + ecode = -EIO;
  7568. + }
  7569. +
  7570. + ahash_unmap(ctx->dev, edesc, req, digestsize);
  7571. + qi_cache_free(edesc);
  7572. +
  7573. +#ifdef DEBUG
  7574. + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
  7575. + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  7576. + ctx->ctx_len, 1);
  7577. + if (req->result)
  7578. + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
  7579. + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
  7580. + digestsize, 1);
  7581. +#endif
  7582. +
  7583. + req->base.complete(&req->base, ecode);
  7584. +}
  7585. +
  7586. +static void ahash_done_bi(void *cbk_ctx, u32 status)
  7587. +{
  7588. + struct crypto_async_request *areq = cbk_ctx;
  7589. + struct ahash_request *req = ahash_request_cast(areq);
  7590. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7591. + struct caam_hash_state *state = ahash_request_ctx(req);
  7592. + struct ahash_edesc *edesc = state->caam_req.edesc;
  7593. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7594. + int ecode = 0;
  7595. +#ifdef DEBUG
  7596. + int digestsize = crypto_ahash_digestsize(ahash);
  7597. +
  7598. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  7599. +#endif
  7600. +
  7601. + if (unlikely(status)) {
  7602. + caam_qi2_strstatus(ctx->dev, status);
  7603. + ecode = -EIO;
  7604. + }
  7605. +
  7606. + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
  7607. + switch_buf(state);
  7608. + qi_cache_free(edesc);
  7609. +
  7610. +#ifdef DEBUG
  7611. + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
  7612. + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  7613. + ctx->ctx_len, 1);
  7614. + if (req->result)
  7615. + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
  7616. + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
  7617. + digestsize, 1);
  7618. +#endif
  7619. +
  7620. + req->base.complete(&req->base, ecode);
  7621. +}
  7622. +
  7623. +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
  7624. +{
  7625. + struct crypto_async_request *areq = cbk_ctx;
  7626. + struct ahash_request *req = ahash_request_cast(areq);
  7627. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7628. + struct caam_hash_state *state = ahash_request_ctx(req);
  7629. + struct ahash_edesc *edesc = state->caam_req.edesc;
  7630. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7631. + int digestsize = crypto_ahash_digestsize(ahash);
  7632. + int ecode = 0;
  7633. +
  7634. +#ifdef DEBUG
  7635. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  7636. +#endif
  7637. +
  7638. + if (unlikely(status)) {
  7639. + caam_qi2_strstatus(ctx->dev, status);
  7640. + ecode = -EIO;
  7641. + }
  7642. +
  7643. + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
  7644. + qi_cache_free(edesc);
  7645. +
  7646. +#ifdef DEBUG
  7647. + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
  7648. + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  7649. + ctx->ctx_len, 1);
  7650. + if (req->result)
  7651. + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
  7652. + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
  7653. + digestsize, 1);
  7654. +#endif
  7655. +
  7656. + req->base.complete(&req->base, ecode);
  7657. +}
  7658. +
  7659. +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
  7660. +{
  7661. + struct crypto_async_request *areq = cbk_ctx;
  7662. + struct ahash_request *req = ahash_request_cast(areq);
  7663. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7664. + struct caam_hash_state *state = ahash_request_ctx(req);
  7665. + struct ahash_edesc *edesc = state->caam_req.edesc;
  7666. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7667. + int ecode = 0;
  7668. +#ifdef DEBUG
  7669. + int digestsize = crypto_ahash_digestsize(ahash);
  7670. +
  7671. + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
  7672. +#endif
  7673. +
  7674. + if (unlikely(status)) {
  7675. + caam_qi2_strstatus(ctx->dev, status);
  7676. + ecode = -EIO;
  7677. + }
  7678. +
  7679. + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
  7680. + switch_buf(state);
  7681. + qi_cache_free(edesc);
  7682. +
  7683. +#ifdef DEBUG
  7684. + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
  7685. + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  7686. + ctx->ctx_len, 1);
  7687. + if (req->result)
  7688. + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
  7689. + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
  7690. + digestsize, 1);
  7691. +#endif
  7692. +
  7693. + req->base.complete(&req->base, ecode);
  7694. +}
  7695. +
  7696. +static int ahash_update_ctx(struct ahash_request *req)
  7697. +{
  7698. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7699. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7700. + struct caam_hash_state *state = ahash_request_ctx(req);
  7701. + struct caam_request *req_ctx = &state->caam_req;
  7702. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  7703. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  7704. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  7705. + GFP_KERNEL : GFP_ATOMIC;
  7706. + u8 *buf = current_buf(state);
  7707. + int *buflen = current_buflen(state);
  7708. + u8 *next_buf = alt_buf(state);
  7709. + int *next_buflen = alt_buflen(state), last_buflen;
  7710. + int in_len = *buflen + req->nbytes, to_hash;
  7711. + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
  7712. + struct ahash_edesc *edesc;
  7713. + int ret = 0;
  7714. +
  7715. + last_buflen = *next_buflen;
  7716. + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
  7717. + to_hash = in_len - *next_buflen;
  7718. +
  7719. + if (to_hash) {
  7720. + struct dpaa2_sg_entry *sg_table;
  7721. +
  7722. + src_nents = sg_nents_for_len(req->src,
  7723. + req->nbytes - (*next_buflen));
  7724. + if (src_nents < 0) {
  7725. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  7726. + return src_nents;
  7727. + }
  7728. +
  7729. + if (src_nents) {
  7730. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  7731. + DMA_TO_DEVICE);
  7732. + if (!mapped_nents) {
  7733. + dev_err(ctx->dev, "unable to DMA map source\n");
  7734. + return -ENOMEM;
  7735. + }
  7736. + } else {
  7737. + mapped_nents = 0;
  7738. + }
  7739. +
  7740. + /* allocate space for base edesc and link tables */
  7741. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  7742. + if (!edesc) {
  7743. + dma_unmap_sg(ctx->dev, req->src, src_nents,
  7744. + DMA_TO_DEVICE);
  7745. + return -ENOMEM;
  7746. + }
  7747. +
  7748. + edesc->src_nents = src_nents;
  7749. + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
  7750. + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
  7751. + sizeof(*sg_table);
  7752. + sg_table = &edesc->sgt[0];
  7753. +
  7754. + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
  7755. + DMA_BIDIRECTIONAL);
  7756. + if (ret)
  7757. + goto unmap_ctx;
  7758. +
  7759. + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
  7760. + if (ret)
  7761. + goto unmap_ctx;
  7762. +
  7763. + if (mapped_nents) {
  7764. + sg_to_qm_sg_last(req->src, mapped_nents,
  7765. + sg_table + qm_sg_src_index, 0);
  7766. + if (*next_buflen)
  7767. + scatterwalk_map_and_copy(next_buf, req->src,
  7768. + to_hash - *buflen,
  7769. + *next_buflen, 0);
  7770. + } else {
  7771. + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
  7772. + true);
  7773. + }
  7774. +
  7775. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
  7776. + qm_sg_bytes, DMA_TO_DEVICE);
  7777. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  7778. + dev_err(ctx->dev, "unable to map S/G table\n");
  7779. + ret = -ENOMEM;
  7780. + goto unmap_ctx;
  7781. + }
  7782. + edesc->qm_sg_bytes = qm_sg_bytes;
  7783. +
  7784. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  7785. + dpaa2_fl_set_final(in_fle, true);
  7786. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  7787. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  7788. + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
  7789. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  7790. + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
  7791. + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
  7792. +
  7793. + req_ctx->flc = &ctx->flc[UPDATE];
  7794. + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
  7795. + req_ctx->cbk = ahash_done_bi;
  7796. + req_ctx->ctx = &req->base;
  7797. + req_ctx->edesc = edesc;
  7798. +
  7799. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  7800. + if (ret != -EINPROGRESS &&
  7801. + !(ret == -EBUSY &&
  7802. + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  7803. + goto unmap_ctx;
  7804. + } else if (*next_buflen) {
  7805. + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
  7806. + req->nbytes, 0);
  7807. + *buflen = *next_buflen;
  7808. + *next_buflen = last_buflen;
  7809. + }
  7810. +#ifdef DEBUG
  7811. + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
  7812. + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
  7813. + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
  7814. + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
  7815. + *next_buflen, 1);
  7816. +#endif
  7817. +
  7818. + return ret;
  7819. +unmap_ctx:
  7820. + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
  7821. + qi_cache_free(edesc);
  7822. + return ret;
  7823. +}
  7824. +
  7825. +static int ahash_final_ctx(struct ahash_request *req)
  7826. +{
  7827. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7828. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7829. + struct caam_hash_state *state = ahash_request_ctx(req);
  7830. + struct caam_request *req_ctx = &state->caam_req;
  7831. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  7832. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  7833. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  7834. + GFP_KERNEL : GFP_ATOMIC;
  7835. + int buflen = *current_buflen(state);
  7836. + int qm_sg_bytes, qm_sg_src_index;
  7837. + int digestsize = crypto_ahash_digestsize(ahash);
  7838. + struct ahash_edesc *edesc;
  7839. + struct dpaa2_sg_entry *sg_table;
  7840. + int ret;
  7841. +
  7842. + /* allocate space for base edesc and link tables */
  7843. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  7844. + if (!edesc)
  7845. + return -ENOMEM;
  7846. +
  7847. + qm_sg_src_index = 1 + (buflen ? 1 : 0);
  7848. + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
  7849. + sg_table = &edesc->sgt[0];
  7850. +
  7851. + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
  7852. + DMA_TO_DEVICE);
  7853. + if (ret)
  7854. + goto unmap_ctx;
  7855. +
  7856. + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
  7857. + if (ret)
  7858. + goto unmap_ctx;
  7859. +
  7860. + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
  7861. +
  7862. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
  7863. + DMA_TO_DEVICE);
  7864. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  7865. + dev_err(ctx->dev, "unable to map S/G table\n");
  7866. + ret = -ENOMEM;
  7867. + goto unmap_ctx;
  7868. + }
  7869. + edesc->qm_sg_bytes = qm_sg_bytes;
  7870. +
  7871. + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
  7872. + DMA_FROM_DEVICE);
  7873. + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
  7874. + dev_err(ctx->dev, "unable to map dst\n");
  7875. + edesc->dst_dma = 0;
  7876. + ret = -ENOMEM;
  7877. + goto unmap_ctx;
  7878. + }
  7879. +
  7880. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  7881. + dpaa2_fl_set_final(in_fle, true);
  7882. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  7883. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  7884. + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
  7885. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  7886. + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
  7887. + dpaa2_fl_set_len(out_fle, digestsize);
  7888. +
  7889. + req_ctx->flc = &ctx->flc[FINALIZE];
  7890. + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
  7891. + req_ctx->cbk = ahash_done_ctx_src;
  7892. + req_ctx->ctx = &req->base;
  7893. + req_ctx->edesc = edesc;
  7894. +
  7895. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  7896. + if (ret == -EINPROGRESS ||
  7897. + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  7898. + return ret;
  7899. +
  7900. +unmap_ctx:
  7901. + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
  7902. + qi_cache_free(edesc);
  7903. + return ret;
  7904. +}
  7905. +
  7906. +static int ahash_finup_ctx(struct ahash_request *req)
  7907. +{
  7908. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  7909. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  7910. + struct caam_hash_state *state = ahash_request_ctx(req);
  7911. + struct caam_request *req_ctx = &state->caam_req;
  7912. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  7913. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  7914. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  7915. + GFP_KERNEL : GFP_ATOMIC;
  7916. + int buflen = *current_buflen(state);
  7917. + int qm_sg_bytes, qm_sg_src_index;
  7918. + int src_nents, mapped_nents;
  7919. + int digestsize = crypto_ahash_digestsize(ahash);
  7920. + struct ahash_edesc *edesc;
  7921. + struct dpaa2_sg_entry *sg_table;
  7922. + int ret;
  7923. +
  7924. + src_nents = sg_nents_for_len(req->src, req->nbytes);
  7925. + if (src_nents < 0) {
  7926. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  7927. + return src_nents;
  7928. + }
  7929. +
  7930. + if (src_nents) {
  7931. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  7932. + DMA_TO_DEVICE);
  7933. + if (!mapped_nents) {
  7934. + dev_err(ctx->dev, "unable to DMA map source\n");
  7935. + return -ENOMEM;
  7936. + }
  7937. + } else {
  7938. + mapped_nents = 0;
  7939. + }
  7940. +
  7941. + /* allocate space for base edesc and link tables */
  7942. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  7943. + if (!edesc) {
  7944. + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
  7945. + return -ENOMEM;
  7946. + }
  7947. +
  7948. + edesc->src_nents = src_nents;
  7949. + qm_sg_src_index = 1 + (buflen ? 1 : 0);
  7950. + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
  7951. + sg_table = &edesc->sgt[0];
  7952. +
  7953. + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
  7954. + DMA_TO_DEVICE);
  7955. + if (ret)
  7956. + goto unmap_ctx;
  7957. +
  7958. + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
  7959. + if (ret)
  7960. + goto unmap_ctx;
  7961. +
  7962. + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
  7963. +
  7964. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
  7965. + DMA_TO_DEVICE);
  7966. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  7967. + dev_err(ctx->dev, "unable to map S/G table\n");
  7968. + ret = -ENOMEM;
  7969. + goto unmap_ctx;
  7970. + }
  7971. + edesc->qm_sg_bytes = qm_sg_bytes;
  7972. +
  7973. + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
  7974. + DMA_FROM_DEVICE);
  7975. + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
  7976. + dev_err(ctx->dev, "unable to map dst\n");
  7977. + edesc->dst_dma = 0;
  7978. + ret = -ENOMEM;
  7979. + goto unmap_ctx;
  7980. + }
  7981. +
  7982. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  7983. + dpaa2_fl_set_final(in_fle, true);
  7984. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  7985. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  7986. + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
  7987. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  7988. + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
  7989. + dpaa2_fl_set_len(out_fle, digestsize);
  7990. +
  7991. + req_ctx->flc = &ctx->flc[FINALIZE];
  7992. + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
  7993. + req_ctx->cbk = ahash_done_ctx_src;
  7994. + req_ctx->ctx = &req->base;
  7995. + req_ctx->edesc = edesc;
  7996. +
  7997. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  7998. + if (ret == -EINPROGRESS ||
  7999. + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  8000. + return ret;
  8001. +
  8002. +unmap_ctx:
  8003. + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
  8004. + qi_cache_free(edesc);
  8005. + return ret;
  8006. +}
  8007. +
  8008. +static int ahash_digest(struct ahash_request *req)
  8009. +{
  8010. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  8011. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  8012. + struct caam_hash_state *state = ahash_request_ctx(req);
  8013. + struct caam_request *req_ctx = &state->caam_req;
  8014. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  8015. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  8016. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  8017. + GFP_KERNEL : GFP_ATOMIC;
  8018. + int digestsize = crypto_ahash_digestsize(ahash);
  8019. + int src_nents, mapped_nents;
  8020. + struct ahash_edesc *edesc;
  8021. + int ret = -ENOMEM;
  8022. +
  8023. + state->buf_dma = 0;
  8024. +
  8025. + src_nents = sg_nents_for_len(req->src, req->nbytes);
  8026. + if (src_nents < 0) {
  8027. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  8028. + return src_nents;
  8029. + }
  8030. +
  8031. + if (src_nents) {
  8032. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  8033. + DMA_TO_DEVICE);
  8034. + if (!mapped_nents) {
  8035. + dev_err(ctx->dev, "unable to map source for DMA\n");
  8036. + return ret;
  8037. + }
  8038. + } else {
  8039. + mapped_nents = 0;
  8040. + }
  8041. +
  8042. + /* allocate space for base edesc and link tables */
  8043. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  8044. + if (!edesc) {
  8045. + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
  8046. + return ret;
  8047. + }
  8048. +
  8049. + edesc->src_nents = src_nents;
  8050. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  8051. +
  8052. + if (mapped_nents > 1) {
  8053. + int qm_sg_bytes;
  8054. + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
  8055. +
  8056. + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
  8057. + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
  8058. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
  8059. + qm_sg_bytes, DMA_TO_DEVICE);
  8060. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  8061. + dev_err(ctx->dev, "unable to map S/G table\n");
  8062. + goto unmap;
  8063. + }
  8064. + edesc->qm_sg_bytes = qm_sg_bytes;
  8065. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  8066. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  8067. + } else {
  8068. + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
  8069. + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
  8070. + }
  8071. +
  8072. + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
  8073. + DMA_FROM_DEVICE);
  8074. + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
  8075. + dev_err(ctx->dev, "unable to map dst\n");
  8076. + edesc->dst_dma = 0;
  8077. + goto unmap;
  8078. + }
  8079. +
  8080. + dpaa2_fl_set_final(in_fle, true);
  8081. + dpaa2_fl_set_len(in_fle, req->nbytes);
  8082. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  8083. + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
  8084. + dpaa2_fl_set_len(out_fle, digestsize);
  8085. +
  8086. + req_ctx->flc = &ctx->flc[DIGEST];
  8087. + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
  8088. + req_ctx->cbk = ahash_done;
  8089. + req_ctx->ctx = &req->base;
  8090. + req_ctx->edesc = edesc;
  8091. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  8092. + if (ret == -EINPROGRESS ||
  8093. + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  8094. + return ret;
  8095. +
  8096. +unmap:
  8097. + ahash_unmap(ctx->dev, edesc, req, digestsize);
  8098. + qi_cache_free(edesc);
  8099. + return ret;
  8100. +}
  8101. +
  8102. +static int ahash_final_no_ctx(struct ahash_request *req)
  8103. +{
  8104. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  8105. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  8106. + struct caam_hash_state *state = ahash_request_ctx(req);
  8107. + struct caam_request *req_ctx = &state->caam_req;
  8108. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  8109. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  8110. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  8111. + GFP_KERNEL : GFP_ATOMIC;
  8112. + u8 *buf = current_buf(state);
  8113. + int buflen = *current_buflen(state);
  8114. + int digestsize = crypto_ahash_digestsize(ahash);
  8115. + struct ahash_edesc *edesc;
  8116. + int ret = -ENOMEM;
  8117. +
  8118. + /* allocate space for base edesc and link tables */
  8119. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  8120. + if (!edesc)
  8121. + return ret;
  8122. +
  8123. + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
  8124. + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
  8125. + dev_err(ctx->dev, "unable to map src\n");
  8126. + goto unmap;
  8127. + }
  8128. +
  8129. + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
  8130. + DMA_FROM_DEVICE);
  8131. + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
  8132. + dev_err(ctx->dev, "unable to map dst\n");
  8133. + edesc->dst_dma = 0;
  8134. + goto unmap;
  8135. + }
  8136. +
  8137. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  8138. + dpaa2_fl_set_final(in_fle, true);
  8139. + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
  8140. + dpaa2_fl_set_addr(in_fle, state->buf_dma);
  8141. + dpaa2_fl_set_len(in_fle, buflen);
  8142. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  8143. + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
  8144. + dpaa2_fl_set_len(out_fle, digestsize);
  8145. +
  8146. + req_ctx->flc = &ctx->flc[DIGEST];
  8147. + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
  8148. + req_ctx->cbk = ahash_done;
  8149. + req_ctx->ctx = &req->base;
  8150. + req_ctx->edesc = edesc;
  8151. +
  8152. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  8153. + if (ret == -EINPROGRESS ||
  8154. + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  8155. + return ret;
  8156. +
  8157. +unmap:
  8158. + ahash_unmap(ctx->dev, edesc, req, digestsize);
  8159. + qi_cache_free(edesc);
  8160. + return ret;
  8161. +}
  8162. +
  8163. +static int ahash_update_no_ctx(struct ahash_request *req)
  8164. +{
  8165. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  8166. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  8167. + struct caam_hash_state *state = ahash_request_ctx(req);
  8168. + struct caam_request *req_ctx = &state->caam_req;
  8169. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  8170. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  8171. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  8172. + GFP_KERNEL : GFP_ATOMIC;
  8173. + u8 *buf = current_buf(state);
  8174. + int *buflen = current_buflen(state);
  8175. + u8 *next_buf = alt_buf(state);
  8176. + int *next_buflen = alt_buflen(state);
  8177. + int in_len = *buflen + req->nbytes, to_hash;
  8178. + int qm_sg_bytes, src_nents, mapped_nents;
  8179. + struct ahash_edesc *edesc;
  8180. + int ret = 0;
  8181. +
  8182. + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
  8183. + to_hash = in_len - *next_buflen;
  8184. +
  8185. + if (to_hash) {
  8186. + struct dpaa2_sg_entry *sg_table;
  8187. +
  8188. + src_nents = sg_nents_for_len(req->src,
  8189. + req->nbytes - *next_buflen);
  8190. + if (src_nents < 0) {
  8191. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  8192. + return src_nents;
  8193. + }
  8194. +
  8195. + if (src_nents) {
  8196. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  8197. + DMA_TO_DEVICE);
  8198. + if (!mapped_nents) {
  8199. + dev_err(ctx->dev, "unable to DMA map source\n");
  8200. + return -ENOMEM;
  8201. + }
  8202. + } else {
  8203. + mapped_nents = 0;
  8204. + }
  8205. +
  8206. + /* allocate space for base edesc and link tables */
  8207. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  8208. + if (!edesc) {
  8209. + dma_unmap_sg(ctx->dev, req->src, src_nents,
  8210. + DMA_TO_DEVICE);
  8211. + return -ENOMEM;
  8212. + }
  8213. +
  8214. + edesc->src_nents = src_nents;
  8215. + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
  8216. + sg_table = &edesc->sgt[0];
  8217. +
  8218. + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
  8219. + if (ret)
  8220. + goto unmap_ctx;
  8221. +
  8222. + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
  8223. +
  8224. + if (*next_buflen)
  8225. + scatterwalk_map_and_copy(next_buf, req->src,
  8226. + to_hash - *buflen,
  8227. + *next_buflen, 0);
  8228. +
  8229. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
  8230. + qm_sg_bytes, DMA_TO_DEVICE);
  8231. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  8232. + dev_err(ctx->dev, "unable to map S/G table\n");
  8233. + ret = -ENOMEM;
  8234. + goto unmap_ctx;
  8235. + }
  8236. + edesc->qm_sg_bytes = qm_sg_bytes;
  8237. +
  8238. + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
  8239. + ctx->ctx_len, DMA_FROM_DEVICE);
  8240. + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
  8241. + dev_err(ctx->dev, "unable to map ctx\n");
  8242. + state->ctx_dma = 0;
  8243. + ret = -ENOMEM;
  8244. + goto unmap_ctx;
  8245. + }
  8246. +
  8247. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  8248. + dpaa2_fl_set_final(in_fle, true);
  8249. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  8250. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  8251. + dpaa2_fl_set_len(in_fle, to_hash);
  8252. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  8253. + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
  8254. + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
  8255. +
  8256. + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
  8257. + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
  8258. + req_ctx->cbk = ahash_done_ctx_dst;
  8259. + req_ctx->ctx = &req->base;
  8260. + req_ctx->edesc = edesc;
  8261. +
  8262. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  8263. + if (ret != -EINPROGRESS &&
  8264. + !(ret == -EBUSY &&
  8265. + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  8266. + goto unmap_ctx;
  8267. +
  8268. + state->update = ahash_update_ctx;
  8269. + state->finup = ahash_finup_ctx;
  8270. + state->final = ahash_final_ctx;
  8271. + } else if (*next_buflen) {
  8272. + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
  8273. + req->nbytes, 0);
  8274. + *buflen = *next_buflen;
  8275. + *next_buflen = 0;
  8276. + }
  8277. +#ifdef DEBUG
  8278. + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
  8279. + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
  8280. + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
  8281. + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
  8282. + *next_buflen, 1);
  8283. +#endif
  8284. +
  8285. + return ret;
  8286. +unmap_ctx:
  8287. + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
  8288. + qi_cache_free(edesc);
  8289. + return ret;
  8290. +}
  8291. +
  8292. +static int ahash_finup_no_ctx(struct ahash_request *req)
  8293. +{
  8294. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  8295. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  8296. + struct caam_hash_state *state = ahash_request_ctx(req);
  8297. + struct caam_request *req_ctx = &state->caam_req;
  8298. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  8299. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  8300. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  8301. + GFP_KERNEL : GFP_ATOMIC;
  8302. + int buflen = *current_buflen(state);
  8303. + int qm_sg_bytes, src_nents, mapped_nents;
  8304. + int digestsize = crypto_ahash_digestsize(ahash);
  8305. + struct ahash_edesc *edesc;
  8306. + struct dpaa2_sg_entry *sg_table;
  8307. + int ret;
  8308. +
  8309. + src_nents = sg_nents_for_len(req->src, req->nbytes);
  8310. + if (src_nents < 0) {
  8311. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  8312. + return src_nents;
  8313. + }
  8314. +
  8315. + if (src_nents) {
  8316. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  8317. + DMA_TO_DEVICE);
  8318. + if (!mapped_nents) {
  8319. + dev_err(ctx->dev, "unable to DMA map source\n");
  8320. + return -ENOMEM;
  8321. + }
  8322. + } else {
  8323. + mapped_nents = 0;
  8324. + }
  8325. +
  8326. + /* allocate space for base edesc and link tables */
  8327. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  8328. + if (!edesc) {
  8329. + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
  8330. + return -ENOMEM;
  8331. + }
  8332. +
  8333. + edesc->src_nents = src_nents;
  8334. + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
  8335. + sg_table = &edesc->sgt[0];
  8336. +
  8337. + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
  8338. + if (ret)
  8339. + goto unmap;
  8340. +
  8341. + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
  8342. +
  8343. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
  8344. + DMA_TO_DEVICE);
  8345. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  8346. + dev_err(ctx->dev, "unable to map S/G table\n");
  8347. + ret = -ENOMEM;
  8348. + goto unmap;
  8349. + }
  8350. + edesc->qm_sg_bytes = qm_sg_bytes;
  8351. +
  8352. + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
  8353. + DMA_FROM_DEVICE);
  8354. + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
  8355. + dev_err(ctx->dev, "unable to map dst\n");
  8356. + edesc->dst_dma = 0;
  8357. + ret = -ENOMEM;
  8358. + goto unmap;
  8359. + }
  8360. +
  8361. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  8362. + dpaa2_fl_set_final(in_fle, true);
  8363. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  8364. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  8365. + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
  8366. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  8367. + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
  8368. + dpaa2_fl_set_len(out_fle, digestsize);
  8369. +
  8370. + req_ctx->flc = &ctx->flc[DIGEST];
  8371. + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
  8372. + req_ctx->cbk = ahash_done;
  8373. + req_ctx->ctx = &req->base;
  8374. + req_ctx->edesc = edesc;
  8375. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  8376. + if (ret != -EINPROGRESS &&
  8377. + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  8378. + goto unmap;
  8379. +
  8380. + return ret;
  8381. +unmap:
  8382. + ahash_unmap(ctx->dev, edesc, req, digestsize);
  8383. + qi_cache_free(edesc);
  8384. + return -ENOMEM;
  8385. +}
  8386. +
  8387. +static int ahash_update_first(struct ahash_request *req)
  8388. +{
  8389. + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  8390. + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  8391. + struct caam_hash_state *state = ahash_request_ctx(req);
  8392. + struct caam_request *req_ctx = &state->caam_req;
  8393. + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
  8394. + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
  8395. + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  8396. + GFP_KERNEL : GFP_ATOMIC;
  8397. + u8 *next_buf = alt_buf(state);
  8398. + int *next_buflen = alt_buflen(state);
  8399. + int to_hash;
  8400. + int src_nents, mapped_nents;
  8401. + struct ahash_edesc *edesc;
  8402. + int ret = 0;
  8403. +
  8404. + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
  8405. + 1);
  8406. + to_hash = req->nbytes - *next_buflen;
  8407. +
  8408. + if (to_hash) {
  8409. + struct dpaa2_sg_entry *sg_table;
  8410. +
  8411. + src_nents = sg_nents_for_len(req->src,
  8412. + req->nbytes - (*next_buflen));
  8413. + if (src_nents < 0) {
  8414. + dev_err(ctx->dev, "Invalid number of src SG.\n");
  8415. + return src_nents;
  8416. + }
  8417. +
  8418. + if (src_nents) {
  8419. + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
  8420. + DMA_TO_DEVICE);
  8421. + if (!mapped_nents) {
  8422. + dev_err(ctx->dev, "unable to map source for DMA\n");
  8423. + return -ENOMEM;
  8424. + }
  8425. + } else {
  8426. + mapped_nents = 0;
  8427. + }
  8428. +
  8429. + /* allocate space for base edesc and link tables */
  8430. + edesc = qi_cache_zalloc(GFP_DMA | flags);
  8431. + if (!edesc) {
  8432. + dma_unmap_sg(ctx->dev, req->src, src_nents,
  8433. + DMA_TO_DEVICE);
  8434. + return -ENOMEM;
  8435. + }
  8436. +
  8437. + edesc->src_nents = src_nents;
  8438. + sg_table = &edesc->sgt[0];
  8439. +
  8440. + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
  8441. + dpaa2_fl_set_final(in_fle, true);
  8442. + dpaa2_fl_set_len(in_fle, to_hash);
  8443. +
  8444. + if (mapped_nents > 1) {
  8445. + int qm_sg_bytes;
  8446. +
  8447. + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
  8448. + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
  8449. + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
  8450. + qm_sg_bytes,
  8451. + DMA_TO_DEVICE);
  8452. + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
  8453. + dev_err(ctx->dev, "unable to map S/G table\n");
  8454. + ret = -ENOMEM;
  8455. + goto unmap_ctx;
  8456. + }
  8457. + edesc->qm_sg_bytes = qm_sg_bytes;
  8458. + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
  8459. + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
  8460. + } else {
  8461. + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
  8462. + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
  8463. + }
  8464. +
  8465. + if (*next_buflen)
  8466. + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
  8467. + *next_buflen, 0);
  8468. +
  8469. + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
  8470. + ctx->ctx_len, DMA_FROM_DEVICE);
  8471. + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
  8472. + dev_err(ctx->dev, "unable to map ctx\n");
  8473. + state->ctx_dma = 0;
  8474. + ret = -ENOMEM;
  8475. + goto unmap_ctx;
  8476. + }
  8477. +
  8478. + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
  8479. + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
  8480. + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
  8481. +
  8482. + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
  8483. + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
  8484. + req_ctx->cbk = ahash_done_ctx_dst;
  8485. + req_ctx->ctx = &req->base;
  8486. + req_ctx->edesc = edesc;
  8487. +
  8488. + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
  8489. + if (ret != -EINPROGRESS &&
  8490. + !(ret == -EBUSY && req->base.flags &
  8491. + CRYPTO_TFM_REQ_MAY_BACKLOG))
  8492. + goto unmap_ctx;
  8493. +
  8494. + state->update = ahash_update_ctx;
  8495. + state->finup = ahash_finup_ctx;
  8496. + state->final = ahash_final_ctx;
  8497. + } else if (*next_buflen) {
  8498. + state->update = ahash_update_no_ctx;
  8499. + state->finup = ahash_finup_no_ctx;
  8500. + state->final = ahash_final_no_ctx;
  8501. + scatterwalk_map_and_copy(next_buf, req->src, 0,
  8502. + req->nbytes, 0);
  8503. + switch_buf(state);
  8504. + }
  8505. +#ifdef DEBUG
  8506. + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
  8507. + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
  8508. +#endif
  8509. +
  8510. + return ret;
  8511. +unmap_ctx:
  8512. + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
  8513. + qi_cache_free(edesc);
  8514. + return ret;
  8515. +}
  8516. +
  8517. +static int ahash_finup_first(struct ahash_request *req)
  8518. +{
  8519. + return ahash_digest(req);
  8520. +}
  8521. +
  8522. +static int ahash_init(struct ahash_request *req)
  8523. +{
  8524. + struct caam_hash_state *state = ahash_request_ctx(req);
  8525. +
  8526. + state->update = ahash_update_first;
  8527. + state->finup = ahash_finup_first;
  8528. + state->final = ahash_final_no_ctx;
  8529. +
  8530. + state->ctx_dma = 0;
  8531. + state->current_buf = 0;
  8532. + state->buf_dma = 0;
  8533. + state->buflen_0 = 0;
  8534. + state->buflen_1 = 0;
  8535. +
  8536. + return 0;
  8537. +}
  8538. +
  8539. +static int ahash_update(struct ahash_request *req)
  8540. +{
  8541. + struct caam_hash_state *state = ahash_request_ctx(req);
  8542. +
  8543. + return state->update(req);
  8544. +}
  8545. +
  8546. +static int ahash_finup(struct ahash_request *req)
  8547. +{
  8548. + struct caam_hash_state *state = ahash_request_ctx(req);
  8549. +
  8550. + return state->finup(req);
  8551. +}
  8552. +
  8553. +static int ahash_final(struct ahash_request *req)
  8554. +{
  8555. + struct caam_hash_state *state = ahash_request_ctx(req);
  8556. +
  8557. + return state->final(req);
  8558. +}
  8559. +
  8560. +static int ahash_export(struct ahash_request *req, void *out)
  8561. +{
  8562. + struct caam_hash_state *state = ahash_request_ctx(req);
  8563. + struct caam_export_state *export = out;
  8564. + int len;
  8565. + u8 *buf;
  8566. +
  8567. + if (state->current_buf) {
  8568. + buf = state->buf_1;
  8569. + len = state->buflen_1;
  8570. + } else {
  8571. + buf = state->buf_0;
  8572. + len = state->buflen_0;
  8573. + }
  8574. +
  8575. + memcpy(export->buf, buf, len);
  8576. + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
  8577. + export->buflen = len;
  8578. + export->update = state->update;
  8579. + export->final = state->final;
  8580. + export->finup = state->finup;
  8581. +
  8582. + return 0;
  8583. +}
  8584. +
  8585. +static int ahash_import(struct ahash_request *req, const void *in)
  8586. +{
  8587. + struct caam_hash_state *state = ahash_request_ctx(req);
  8588. + const struct caam_export_state *export = in;
  8589. +
  8590. + memset(state, 0, sizeof(*state));
  8591. + memcpy(state->buf_0, export->buf, export->buflen);
  8592. + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
  8593. + state->buflen_0 = export->buflen;
  8594. + state->update = export->update;
  8595. + state->final = export->final;
  8596. + state->finup = export->finup;
  8597. +
  8598. + return 0;
  8599. +}
  8600. +
  8601. +struct caam_hash_template {
  8602. + char name[CRYPTO_MAX_ALG_NAME];
  8603. + char driver_name[CRYPTO_MAX_ALG_NAME];
  8604. + char hmac_name[CRYPTO_MAX_ALG_NAME];
  8605. + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
  8606. + unsigned int blocksize;
  8607. + struct ahash_alg template_ahash;
  8608. + u32 alg_type;
  8609. +};
  8610. +
  8611. +/* ahash descriptors */
  8612. +static struct caam_hash_template driver_hash[] = {
  8613. + {
  8614. + .name = "sha1",
  8615. + .driver_name = "sha1-caam-qi2",
  8616. + .hmac_name = "hmac(sha1)",
  8617. + .hmac_driver_name = "hmac-sha1-caam-qi2",
  8618. + .blocksize = SHA1_BLOCK_SIZE,
  8619. + .template_ahash = {
  8620. + .init = ahash_init,
  8621. + .update = ahash_update,
  8622. + .final = ahash_final,
  8623. + .finup = ahash_finup,
  8624. + .digest = ahash_digest,
  8625. + .export = ahash_export,
  8626. + .import = ahash_import,
  8627. + .setkey = ahash_setkey,
  8628. + .halg = {
  8629. + .digestsize = SHA1_DIGEST_SIZE,
  8630. + .statesize = sizeof(struct caam_export_state),
  8631. + },
  8632. + },
  8633. + .alg_type = OP_ALG_ALGSEL_SHA1,
  8634. + }, {
  8635. + .name = "sha224",
  8636. + .driver_name = "sha224-caam-qi2",
  8637. + .hmac_name = "hmac(sha224)",
  8638. + .hmac_driver_name = "hmac-sha224-caam-qi2",
  8639. + .blocksize = SHA224_BLOCK_SIZE,
  8640. + .template_ahash = {
  8641. + .init = ahash_init,
  8642. + .update = ahash_update,
  8643. + .final = ahash_final,
  8644. + .finup = ahash_finup,
  8645. + .digest = ahash_digest,
  8646. + .export = ahash_export,
  8647. + .import = ahash_import,
  8648. + .setkey = ahash_setkey,
  8649. + .halg = {
  8650. + .digestsize = SHA224_DIGEST_SIZE,
  8651. + .statesize = sizeof(struct caam_export_state),
  8652. + },
  8653. + },
  8654. + .alg_type = OP_ALG_ALGSEL_SHA224,
  8655. + }, {
  8656. + .name = "sha256",
  8657. + .driver_name = "sha256-caam-qi2",
  8658. + .hmac_name = "hmac(sha256)",
  8659. + .hmac_driver_name = "hmac-sha256-caam-qi2",
  8660. + .blocksize = SHA256_BLOCK_SIZE,
  8661. + .template_ahash = {
  8662. + .init = ahash_init,
  8663. + .update = ahash_update,
  8664. + .final = ahash_final,
  8665. + .finup = ahash_finup,
  8666. + .digest = ahash_digest,
  8667. + .export = ahash_export,
  8668. + .import = ahash_import,
  8669. + .setkey = ahash_setkey,
  8670. + .halg = {
  8671. + .digestsize = SHA256_DIGEST_SIZE,
  8672. + .statesize = sizeof(struct caam_export_state),
  8673. + },
  8674. + },
  8675. + .alg_type = OP_ALG_ALGSEL_SHA256,
  8676. + }, {
  8677. + .name = "sha384",
  8678. + .driver_name = "sha384-caam-qi2",
  8679. + .hmac_name = "hmac(sha384)",
  8680. + .hmac_driver_name = "hmac-sha384-caam-qi2",
  8681. + .blocksize = SHA384_BLOCK_SIZE,
  8682. + .template_ahash = {
  8683. + .init = ahash_init,
  8684. + .update = ahash_update,
  8685. + .final = ahash_final,
  8686. + .finup = ahash_finup,
  8687. + .digest = ahash_digest,
  8688. + .export = ahash_export,
  8689. + .import = ahash_import,
  8690. + .setkey = ahash_setkey,
  8691. + .halg = {
  8692. + .digestsize = SHA384_DIGEST_SIZE,
  8693. + .statesize = sizeof(struct caam_export_state),
  8694. + },
  8695. + },
  8696. + .alg_type = OP_ALG_ALGSEL_SHA384,
  8697. + }, {
  8698. + .name = "sha512",
  8699. + .driver_name = "sha512-caam-qi2",
  8700. + .hmac_name = "hmac(sha512)",
  8701. + .hmac_driver_name = "hmac-sha512-caam-qi2",
  8702. + .blocksize = SHA512_BLOCK_SIZE,
  8703. + .template_ahash = {
  8704. + .init = ahash_init,
  8705. + .update = ahash_update,
  8706. + .final = ahash_final,
  8707. + .finup = ahash_finup,
  8708. + .digest = ahash_digest,
  8709. + .export = ahash_export,
  8710. + .import = ahash_import,
  8711. + .setkey = ahash_setkey,
  8712. + .halg = {
  8713. + .digestsize = SHA512_DIGEST_SIZE,
  8714. + .statesize = sizeof(struct caam_export_state),
  8715. + },
  8716. + },
  8717. + .alg_type = OP_ALG_ALGSEL_SHA512,
  8718. + }, {
  8719. + .name = "md5",
  8720. + .driver_name = "md5-caam-qi2",
  8721. + .hmac_name = "hmac(md5)",
  8722. + .hmac_driver_name = "hmac-md5-caam-qi2",
  8723. + .blocksize = MD5_BLOCK_WORDS * 4,
  8724. + .template_ahash = {
  8725. + .init = ahash_init,
  8726. + .update = ahash_update,
  8727. + .final = ahash_final,
  8728. + .finup = ahash_finup,
  8729. + .digest = ahash_digest,
  8730. + .export = ahash_export,
  8731. + .import = ahash_import,
  8732. + .setkey = ahash_setkey,
  8733. + .halg = {
  8734. + .digestsize = MD5_DIGEST_SIZE,
  8735. + .statesize = sizeof(struct caam_export_state),
  8736. + },
  8737. + },
  8738. + .alg_type = OP_ALG_ALGSEL_MD5,
  8739. + }
  8740. +};
  8741. +
  8742. +struct caam_hash_alg {
  8743. + struct list_head entry;
  8744. + struct device *dev;
  8745. + int alg_type;
  8746. + struct ahash_alg ahash_alg;
  8747. +};
  8748. +
  8749. +static int caam_hash_cra_init(struct crypto_tfm *tfm)
  8750. +{
  8751. + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  8752. + struct crypto_alg *base = tfm->__crt_alg;
  8753. + struct hash_alg_common *halg =
  8754. + container_of(base, struct hash_alg_common, base);
  8755. + struct ahash_alg *alg =
  8756. + container_of(halg, struct ahash_alg, halg);
  8757. + struct caam_hash_alg *caam_hash =
  8758. + container_of(alg, struct caam_hash_alg, ahash_alg);
  8759. + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  8760. + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
  8761. + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
  8762. + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
  8763. + HASH_MSG_LEN + 32,
  8764. + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
  8765. + HASH_MSG_LEN + 64,
  8766. + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
  8767. + dma_addr_t dma_addr;
  8768. + int i;
  8769. +
  8770. + ctx->dev = caam_hash->dev;
  8771. +
  8772. + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
  8773. + DMA_BIDIRECTIONAL,
  8774. + DMA_ATTR_SKIP_CPU_SYNC);
  8775. + if (dma_mapping_error(ctx->dev, dma_addr)) {
  8776. + dev_err(ctx->dev, "unable to map shared descriptors\n");
  8777. + return -ENOMEM;
  8778. + }
  8779. +
  8780. + for (i = 0; i < HASH_NUM_OP; i++)
  8781. + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
  8782. +
  8783. + /* copy descriptor header template value */
  8784. + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
  8785. +
  8786. + ctx->ctx_len = runninglen[(ctx->adata.algtype &
  8787. + OP_ALG_ALGSEL_SUBMASK) >>
  8788. + OP_ALG_ALGSEL_SHIFT];
  8789. +
  8790. + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  8791. + sizeof(struct caam_hash_state));
  8792. +
  8793. + return ahash_set_sh_desc(ahash);
  8794. +}
  8795. +
  8796. +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
  8797. +{
  8798. + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  8799. +
  8800. + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
  8801. + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
  8802. +}
  8803. +
  8804. +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
  8805. + struct caam_hash_template *template, bool keyed)
  8806. +{
  8807. + struct caam_hash_alg *t_alg;
  8808. + struct ahash_alg *halg;
  8809. + struct crypto_alg *alg;
  8810. +
  8811. + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  8812. + if (!t_alg)
  8813. + return ERR_PTR(-ENOMEM);
  8814. +
  8815. + t_alg->ahash_alg = template->template_ahash;
  8816. + halg = &t_alg->ahash_alg;
  8817. + alg = &halg->halg.base;
  8818. +
  8819. + if (keyed) {
  8820. + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  8821. + template->hmac_name);
  8822. + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  8823. + template->hmac_driver_name);
  8824. + } else {
  8825. + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  8826. + template->name);
  8827. + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  8828. + template->driver_name);
  8829. + t_alg->ahash_alg.setkey = NULL;
  8830. + }
  8831. + alg->cra_module = THIS_MODULE;
  8832. + alg->cra_init = caam_hash_cra_init;
  8833. + alg->cra_exit = caam_hash_cra_exit;
  8834. + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
  8835. + alg->cra_priority = CAAM_CRA_PRIORITY;
  8836. + alg->cra_blocksize = template->blocksize;
  8837. + alg->cra_alignmask = 0;
  8838. + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
  8839. + alg->cra_type = &crypto_ahash_type;
  8840. +
  8841. + t_alg->alg_type = template->alg_type;
  8842. + t_alg->dev = dev;
  8843. +
  8844. + return t_alg;
  8845. +}
  8846. +
  8847. +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
  8848. +{
  8849. + struct dpaa2_caam_priv_per_cpu *ppriv;
  8850. +
  8851. + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
  8852. + napi_schedule_irqoff(&ppriv->napi);
  8853. +}
  8854. +
  8855. +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
  8856. +{
  8857. + struct device *dev = priv->dev;
  8858. + struct dpaa2_io_notification_ctx *nctx;
  8859. + struct dpaa2_caam_priv_per_cpu *ppriv;
  8860. + int err, i = 0, cpu;
  8861. +
  8862. + for_each_online_cpu(cpu) {
  8863. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  8864. + ppriv->priv = priv;
  8865. + nctx = &ppriv->nctx;
  8866. + nctx->is_cdan = 0;
  8867. + nctx->id = ppriv->rsp_fqid;
  8868. + nctx->desired_cpu = cpu;
  8869. + nctx->cb = dpaa2_caam_fqdan_cb;
  8870. +
  8871. + /* Register notification callbacks */
  8872. + err = dpaa2_io_service_register(NULL, nctx);
  8873. + if (unlikely(err)) {
  8874. + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
  8875. + nctx->cb = NULL;
  8876. + /*
  8877. + * If no affine DPIO for this core, there's probably
  8878. + * none available for next cores either. Signal we want
  8879. + * to retry later, in case the DPIO devices weren't
  8880. + * probed yet.
  8881. + */
  8882. + err = -EPROBE_DEFER;
  8883. + goto err;
  8884. + }
  8885. +
  8886. + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
  8887. + dev);
  8888. + if (unlikely(!ppriv->store)) {
  8889. + dev_err(dev, "dpaa2_io_store_create() failed\n");
  8890. + goto err;
  8891. + }
  8892. +
  8893. + if (++i == priv->num_pairs)
  8894. + break;
  8895. + }
  8896. +
  8897. + return 0;
  8898. +
  8899. +err:
  8900. + for_each_online_cpu(cpu) {
  8901. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  8902. + if (!ppriv->nctx.cb)
  8903. + break;
  8904. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  8905. + }
  8906. +
  8907. + for_each_online_cpu(cpu) {
  8908. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  8909. + if (!ppriv->store)
  8910. + break;
  8911. + dpaa2_io_store_destroy(ppriv->store);
  8912. + }
  8913. +
  8914. + return err;
  8915. +}
  8916. +
  8917. +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
  8918. +{
  8919. + struct dpaa2_caam_priv_per_cpu *ppriv;
  8920. + int i = 0, cpu;
  8921. +
  8922. + for_each_online_cpu(cpu) {
  8923. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  8924. + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
  8925. + dpaa2_io_store_destroy(ppriv->store);
  8926. +
  8927. + if (++i == priv->num_pairs)
  8928. + return;
  8929. + }
  8930. +}
  8931. +
  8932. +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
  8933. +{
  8934. + struct dpseci_rx_queue_cfg rx_queue_cfg;
  8935. + struct device *dev = priv->dev;
  8936. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  8937. + struct dpaa2_caam_priv_per_cpu *ppriv;
  8938. + int err = 0, i = 0, cpu;
  8939. +
  8940. + /* Configure Rx queues */
  8941. + for_each_online_cpu(cpu) {
  8942. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  8943. +
  8944. + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
  8945. + DPSECI_QUEUE_OPT_USER_CTX;
  8946. + rx_queue_cfg.order_preservation_en = 0;
  8947. + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
  8948. + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
  8949. + /*
  8950. + * Rx priority (WQ) doesn't really matter, since we use
  8951. + * pull mode, i.e. volatile dequeues from specific FQs
  8952. + */
  8953. + rx_queue_cfg.dest_cfg.priority = 0;
  8954. + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
  8955. +
  8956. + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
  8957. + &rx_queue_cfg);
  8958. + if (err) {
  8959. + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
  8960. + err);
  8961. + return err;
  8962. + }
  8963. +
  8964. + if (++i == priv->num_pairs)
  8965. + break;
  8966. + }
  8967. +
  8968. + return err;
  8969. +}
  8970. +
  8971. +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
  8972. +{
  8973. + struct device *dev = priv->dev;
  8974. +
  8975. + if (!priv->cscn_mem)
  8976. + return;
  8977. +
  8978. + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
  8979. + kfree(priv->cscn_mem);
  8980. +}
  8981. +
  8982. +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
  8983. +{
  8984. + struct device *dev = priv->dev;
  8985. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  8986. +
  8987. + dpaa2_dpseci_congestion_free(priv);
  8988. + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
  8989. +}
  8990. +
  8991. +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
  8992. + const struct dpaa2_fd *fd)
  8993. +{
  8994. + struct caam_request *req;
  8995. + u32 fd_err;
  8996. +
  8997. + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
  8998. + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
  8999. + return;
  9000. + }
  9001. +
  9002. + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
  9003. + if (unlikely(fd_err))
  9004. + dev_err(priv->dev, "FD error: %08x\n", fd_err);
  9005. +
  9006. + /*
  9007. + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
  9008. + * in FD[ERR] or FD[FRC].
  9009. + */
  9010. + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
  9011. + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
  9012. + DMA_BIDIRECTIONAL);
  9013. + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
  9014. +}
  9015. +
  9016. +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
  9017. +{
  9018. + int err;
  9019. +
  9020. + /* Retry while portal is busy */
  9021. + do {
  9022. + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
  9023. + ppriv->store);
  9024. + } while (err == -EBUSY);
  9025. +
  9026. + if (unlikely(err))
  9027. + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
  9028. +
  9029. + return err;
  9030. +}
  9031. +
  9032. +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
  9033. +{
  9034. + struct dpaa2_dq *dq;
  9035. + int cleaned = 0, is_last;
  9036. +
  9037. + do {
  9038. + dq = dpaa2_io_store_next(ppriv->store, &is_last);
  9039. + if (unlikely(!dq)) {
  9040. + if (unlikely(!is_last)) {
  9041. + dev_dbg(ppriv->priv->dev,
  9042. + "FQ %d returned no valid frames\n",
  9043. + ppriv->rsp_fqid);
  9044. + /*
  9045. + * MUST retry until we get some sort of
  9046. + * valid response token (be it "empty dequeue"
  9047. + * or a valid frame).
  9048. + */
  9049. + continue;
  9050. + }
  9051. + break;
  9052. + }
  9053. +
  9054. + /* Process FD */
  9055. + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
  9056. + cleaned++;
  9057. + } while (!is_last);
  9058. +
  9059. + return cleaned;
  9060. +}
  9061. +
  9062. +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
  9063. +{
  9064. + struct dpaa2_caam_priv_per_cpu *ppriv;
  9065. + struct dpaa2_caam_priv *priv;
  9066. + int err, cleaned = 0, store_cleaned;
  9067. +
  9068. + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
  9069. + priv = ppriv->priv;
  9070. +
  9071. + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
  9072. + return 0;
  9073. +
  9074. + do {
  9075. + store_cleaned = dpaa2_caam_store_consume(ppriv);
  9076. + cleaned += store_cleaned;
  9077. +
  9078. + if (store_cleaned == 0 ||
  9079. + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
  9080. + break;
  9081. +
  9082. + /* Try to dequeue some more */
  9083. + err = dpaa2_caam_pull_fq(ppriv);
  9084. + if (unlikely(err))
  9085. + break;
  9086. + } while (1);
  9087. +
  9088. + if (cleaned < budget) {
  9089. + napi_complete_done(napi, cleaned);
  9090. + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
  9091. + if (unlikely(err))
  9092. + dev_err(priv->dev, "Notification rearm failed: %d\n",
  9093. + err);
  9094. + }
  9095. +
  9096. + return cleaned;
  9097. +}
  9098. +
  9099. +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
  9100. + u16 token)
  9101. +{
  9102. + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
  9103. + struct device *dev = priv->dev;
  9104. + int err;
  9105. +
  9106. + /*
  9107. + * Congestion group feature supported starting with DPSECI API v5.1
  9108. + * and only when object has been created with this capability.
  9109. + */
  9110. + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
  9111. + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
  9112. + return 0;
  9113. +
  9114. + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
  9115. + GFP_KERNEL | GFP_DMA);
  9116. + if (!priv->cscn_mem)
  9117. + return -ENOMEM;
  9118. +
  9119. + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
  9120. + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
  9121. + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
  9122. + if (dma_mapping_error(dev, priv->cscn_dma)) {
  9123. + dev_err(dev, "Error mapping CSCN memory area\n");
  9124. + err = -ENOMEM;
  9125. + goto err_dma_map;
  9126. + }
  9127. +
  9128. + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
  9129. + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
  9130. + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
  9131. + cong_notif_cfg.message_ctx = (u64)priv;
  9132. + cong_notif_cfg.message_iova = priv->cscn_dma;
  9133. + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
  9134. + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
  9135. + DPSECI_CGN_MODE_COHERENT_WRITE;
  9136. +
  9137. + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
  9138. + &cong_notif_cfg);
  9139. + if (err) {
  9140. + dev_err(dev, "dpseci_set_congestion_notification failed\n");
  9141. + goto err_set_cong;
  9142. + }
  9143. +
  9144. + return 0;
  9145. +
  9146. +err_set_cong:
  9147. + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
  9148. +err_dma_map:
  9149. + kfree(priv->cscn_mem);
  9150. +
  9151. + return err;
  9152. +}
  9153. +
  9154. +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
  9155. +{
  9156. + struct device *dev = &ls_dev->dev;
  9157. + struct dpaa2_caam_priv *priv;
  9158. + struct dpaa2_caam_priv_per_cpu *ppriv;
  9159. + int err, cpu;
  9160. + u8 i;
  9161. +
  9162. + priv = dev_get_drvdata(dev);
  9163. +
  9164. + priv->dev = dev;
  9165. + priv->dpsec_id = ls_dev->obj_desc.id;
  9166. +
  9167. + /* Get a handle for the DPSECI this interface is associate with */
  9168. + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
  9169. + if (err) {
  9170. + dev_err(dev, "dpsec_open() failed: %d\n", err);
  9171. + goto err_open;
  9172. + }
  9173. +
  9174. + dev_info(dev, "Opened dpseci object successfully\n");
  9175. +
  9176. + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
  9177. + &priv->minor_ver);
  9178. + if (err) {
  9179. + dev_err(dev, "dpseci_get_api_version() failed\n");
  9180. + goto err_get_vers;
  9181. + }
  9182. +
  9183. + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
  9184. + &priv->dpseci_attr);
  9185. + if (err) {
  9186. + dev_err(dev, "dpseci_get_attributes() failed\n");
  9187. + goto err_get_vers;
  9188. + }
  9189. +
  9190. + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
  9191. + &priv->sec_attr);
  9192. + if (err) {
  9193. + dev_err(dev, "dpseci_get_sec_attr() failed\n");
  9194. + goto err_get_vers;
  9195. + }
  9196. +
  9197. + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
  9198. + if (err) {
  9199. + dev_err(dev, "setup_congestion() failed\n");
  9200. + goto err_get_vers;
  9201. + }
  9202. +
  9203. + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
  9204. + priv->dpseci_attr.num_tx_queues);
  9205. + if (priv->num_pairs > num_online_cpus()) {
  9206. + dev_warn(dev, "%d queues won't be used\n",
  9207. + priv->num_pairs - num_online_cpus());
  9208. + priv->num_pairs = num_online_cpus();
  9209. + }
  9210. +
  9211. + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
  9212. + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
  9213. + &priv->rx_queue_attr[i]);
  9214. + if (err) {
  9215. + dev_err(dev, "dpseci_get_rx_queue() failed\n");
  9216. + goto err_get_rx_queue;
  9217. + }
  9218. + }
  9219. +
  9220. + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
  9221. + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
  9222. + &priv->tx_queue_attr[i]);
  9223. + if (err) {
  9224. + dev_err(dev, "dpseci_get_tx_queue() failed\n");
  9225. + goto err_get_rx_queue;
  9226. + }
  9227. + }
  9228. +
  9229. + i = 0;
  9230. + for_each_online_cpu(cpu) {
  9231. + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i,
  9232. + priv->rx_queue_attr[i].fqid,
  9233. + priv->tx_queue_attr[i].fqid);
  9234. +
  9235. + ppriv = per_cpu_ptr(priv->ppriv, cpu);
  9236. + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
  9237. + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
  9238. + ppriv->prio = i;
  9239. +
  9240. + ppriv->net_dev.dev = *dev;
  9241. + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
  9242. + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
  9243. + DPAA2_CAAM_NAPI_WEIGHT);
  9244. + if (++i == priv->num_pairs)
  9245. + break;
  9246. + }
  9247. +
  9248. + return 0;
  9249. +
  9250. +err_get_rx_queue:
  9251. + dpaa2_dpseci_congestion_free(priv);
  9252. +err_get_vers:
  9253. + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
  9254. +err_open:
  9255. + return err;
  9256. +}
  9257. +
  9258. +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
  9259. +{
  9260. + struct device *dev = priv->dev;
  9261. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  9262. + struct dpaa2_caam_priv_per_cpu *ppriv;
  9263. + int err, i;
  9264. +
  9265. + for (i = 0; i < priv->num_pairs; i++) {
  9266. + ppriv = per_cpu_ptr(priv->ppriv, i);
  9267. + napi_enable(&ppriv->napi);
  9268. + }
  9269. +
  9270. + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
  9271. + if (err) {
  9272. + dev_err(dev, "dpseci_enable() failed\n");
  9273. + return err;
  9274. + }
  9275. +
  9276. + dev_info(dev, "DPSECI version %d.%d\n",
  9277. + priv->major_ver,
  9278. + priv->minor_ver);
  9279. +
  9280. + return 0;
  9281. +}
  9282. +
  9283. +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
  9284. +{
  9285. + struct device *dev = priv->dev;
  9286. + struct dpaa2_caam_priv_per_cpu *ppriv;
  9287. + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
  9288. + int i, err = 0, enabled;
  9289. +
  9290. + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
  9291. + if (err) {
  9292. + dev_err(dev, "dpseci_disable() failed\n");
  9293. + return err;
  9294. + }
  9295. +
  9296. + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
  9297. + if (err) {
  9298. + dev_err(dev, "dpseci_is_enabled() failed\n");
  9299. + return err;
  9300. + }
  9301. +
  9302. + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
  9303. +
  9304. + for (i = 0; i < priv->num_pairs; i++) {
  9305. + ppriv = per_cpu_ptr(priv->ppriv, i);
  9306. + napi_disable(&ppriv->napi);
  9307. + netif_napi_del(&ppriv->napi);
  9308. + }
  9309. +
  9310. + return 0;
  9311. +}
  9312. +
  9313. +static struct list_head hash_list;
  9314. +
  9315. +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
  9316. +{
  9317. + struct device *dev;
  9318. + struct dpaa2_caam_priv *priv;
  9319. + int i, err = 0;
  9320. + bool registered = false;
  9321. +
  9322. + /*
  9323. + * There is no way to get CAAM endianness - there is no direct register
  9324. + * space access and MC f/w does not provide this attribute.
  9325. + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
  9326. + * property.
  9327. + */
  9328. + caam_little_end = true;
  9329. +
  9330. + caam_imx = false;
  9331. +
  9332. + dev = &dpseci_dev->dev;
  9333. +
  9334. + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  9335. + if (!priv)
  9336. + return -ENOMEM;
  9337. +
  9338. + dev_set_drvdata(dev, priv);
  9339. +
  9340. + priv->domain = iommu_get_domain_for_dev(dev);
  9341. +
  9342. + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
  9343. + 0, SLAB_CACHE_DMA, NULL);
  9344. + if (!qi_cache) {
  9345. + dev_err(dev, "Can't allocate SEC cache\n");
  9346. + err = -ENOMEM;
  9347. + goto err_qicache;
  9348. + }
  9349. +
  9350. + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
  9351. + if (err) {
  9352. + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
  9353. + goto err_dma_mask;
  9354. + }
  9355. +
  9356. + /* Obtain a MC portal */
  9357. + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
  9358. + if (err) {
  9359. + if (err == -ENXIO)
  9360. + err = -EPROBE_DEFER;
  9361. + else
  9362. + dev_err(dev, "MC portal allocation failed\n");
  9363. +
  9364. + goto err_dma_mask;
  9365. + }
  9366. +
  9367. + priv->ppriv = alloc_percpu(*priv->ppriv);
  9368. + if (!priv->ppriv) {
  9369. + dev_err(dev, "alloc_percpu() failed\n");
  9370. + goto err_alloc_ppriv;
  9371. + }
  9372. +
  9373. + /* DPSECI initialization */
  9374. + err = dpaa2_dpseci_setup(dpseci_dev);
  9375. + if (err < 0) {
  9376. + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
  9377. + goto err_dpseci_setup;
  9378. + }
  9379. +
  9380. + /* DPIO */
  9381. + err = dpaa2_dpseci_dpio_setup(priv);
  9382. + if (err) {
  9383. + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
  9384. + goto err_dpio_setup;
  9385. + }
  9386. +
  9387. + /* DPSECI binding to DPIO */
  9388. + err = dpaa2_dpseci_bind(priv);
  9389. + if (err) {
  9390. + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
  9391. + goto err_bind;
  9392. + }
  9393. +
  9394. + /* DPSECI enable */
  9395. + err = dpaa2_dpseci_enable(priv);
  9396. + if (err) {
  9397. + dev_err(dev, "dpaa2_dpseci_enable() failed");
  9398. + goto err_bind;
  9399. + }
  9400. +
  9401. + /* register crypto algorithms the device supports */
  9402. + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  9403. + struct caam_skcipher_alg *t_alg = driver_algs + i;
  9404. + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
  9405. +
  9406. + /* Skip DES algorithms if not supported by device */
  9407. + if (!priv->sec_attr.des_acc_num &&
  9408. + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  9409. + (alg_sel == OP_ALG_ALGSEL_DES)))
  9410. + continue;
  9411. +
  9412. + /* Skip AES algorithms if not supported by device */
  9413. + if (!priv->sec_attr.aes_acc_num &&
  9414. + (alg_sel == OP_ALG_ALGSEL_AES))
  9415. + continue;
  9416. +
  9417. + t_alg->caam.dev = dev;
  9418. + caam_skcipher_alg_init(t_alg);
  9419. +
  9420. + err = crypto_register_skcipher(&t_alg->skcipher);
  9421. + if (err) {
  9422. + dev_warn(dev, "%s alg registration failed: %d\n",
  9423. + t_alg->skcipher.base.cra_driver_name, err);
  9424. + continue;
  9425. + }
  9426. +
  9427. + t_alg->registered = true;
  9428. + registered = true;
  9429. + }
  9430. +
  9431. + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  9432. + struct caam_aead_alg *t_alg = driver_aeads + i;
  9433. + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  9434. + OP_ALG_ALGSEL_MASK;
  9435. + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  9436. + OP_ALG_ALGSEL_MASK;
  9437. +
  9438. + /* Skip DES algorithms if not supported by device */
  9439. + if (!priv->sec_attr.des_acc_num &&
  9440. + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  9441. + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  9442. + continue;
  9443. +
  9444. + /* Skip AES algorithms if not supported by device */
  9445. + if (!priv->sec_attr.aes_acc_num &&
  9446. + (c1_alg_sel == OP_ALG_ALGSEL_AES))
  9447. + continue;
  9448. +
  9449. + /*
  9450. + * Skip algorithms requiring message digests
  9451. + * if MD not supported by device.
  9452. + */
  9453. + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
  9454. + continue;
  9455. +
  9456. + t_alg->caam.dev = dev;
  9457. + caam_aead_alg_init(t_alg);
  9458. +
  9459. + err = crypto_register_aead(&t_alg->aead);
  9460. + if (err) {
  9461. + dev_warn(dev, "%s alg registration failed: %d\n",
  9462. + t_alg->aead.base.cra_driver_name, err);
  9463. + continue;
  9464. + }
  9465. +
  9466. + t_alg->registered = true;
  9467. + registered = true;
  9468. + }
  9469. + if (registered)
  9470. + dev_info(dev, "algorithms registered in /proc/crypto\n");
  9471. +
  9472. + /* register hash algorithms the device supports */
  9473. + INIT_LIST_HEAD(&hash_list);
  9474. +
  9475. + /*
  9476. + * Skip registration of any hashing algorithms if MD block
  9477. + * is not present.
  9478. + */
  9479. + if (!priv->sec_attr.md_acc_num)
  9480. + return 0;
  9481. +
  9482. + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
  9483. + struct caam_hash_alg *t_alg;
  9484. + struct caam_hash_template *alg = driver_hash + i;
  9485. +
  9486. + /* register hmac version */
  9487. + t_alg = caam_hash_alloc(dev, alg, true);
  9488. + if (IS_ERR(t_alg)) {
  9489. + err = PTR_ERR(t_alg);
  9490. + dev_warn(dev, "%s hash alg allocation failed: %d\n",
  9491. + alg->driver_name, err);
  9492. + continue;
  9493. + }
  9494. +
  9495. + err = crypto_register_ahash(&t_alg->ahash_alg);
  9496. + if (err) {
  9497. + dev_warn(dev, "%s alg registration failed: %d\n",
  9498. + t_alg->ahash_alg.halg.base.cra_driver_name,
  9499. + err);
  9500. + kfree(t_alg);
  9501. + } else {
  9502. + list_add_tail(&t_alg->entry, &hash_list);
  9503. + }
  9504. +
  9505. + /* register unkeyed version */
  9506. + t_alg = caam_hash_alloc(dev, alg, false);
  9507. + if (IS_ERR(t_alg)) {
  9508. + err = PTR_ERR(t_alg);
  9509. + dev_warn(dev, "%s alg allocation failed: %d\n",
  9510. + alg->driver_name, err);
  9511. + continue;
  9512. + }
  9513. +
  9514. + err = crypto_register_ahash(&t_alg->ahash_alg);
  9515. + if (err) {
  9516. + dev_warn(dev, "%s alg registration failed: %d\n",
  9517. + t_alg->ahash_alg.halg.base.cra_driver_name,
  9518. + err);
  9519. + kfree(t_alg);
  9520. + } else {
  9521. + list_add_tail(&t_alg->entry, &hash_list);
  9522. + }
  9523. + }
  9524. + if (!list_empty(&hash_list))
  9525. + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
  9526. +
  9527. + return err;
  9528. +
  9529. +err_bind:
  9530. + dpaa2_dpseci_dpio_free(priv);
  9531. +err_dpio_setup:
  9532. + dpaa2_dpseci_free(priv);
  9533. +err_dpseci_setup:
  9534. + free_percpu(priv->ppriv);
  9535. +err_alloc_ppriv:
  9536. + fsl_mc_portal_free(priv->mc_io);
  9537. +err_dma_mask:
  9538. + kmem_cache_destroy(qi_cache);
  9539. +err_qicache:
  9540. + dev_set_drvdata(dev, NULL);
  9541. +
  9542. + return err;
  9543. +}
  9544. +
  9545. +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
  9546. +{
  9547. + struct device *dev;
  9548. + struct dpaa2_caam_priv *priv;
  9549. + int i;
  9550. +
  9551. + dev = &ls_dev->dev;
  9552. + priv = dev_get_drvdata(dev);
  9553. +
  9554. + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  9555. + struct caam_aead_alg *t_alg = driver_aeads + i;
  9556. +
  9557. + if (t_alg->registered)
  9558. + crypto_unregister_aead(&t_alg->aead);
  9559. + }
  9560. +
  9561. + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  9562. + struct caam_skcipher_alg *t_alg = driver_algs + i;
  9563. +
  9564. + if (t_alg->registered)
  9565. + crypto_unregister_skcipher(&t_alg->skcipher);
  9566. + }
  9567. +
  9568. + if (hash_list.next) {
  9569. + struct caam_hash_alg *t_hash_alg, *p;
  9570. +
  9571. + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
  9572. + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
  9573. + list_del(&t_hash_alg->entry);
  9574. + kfree(t_hash_alg);
  9575. + }
  9576. + }
  9577. +
  9578. + dpaa2_dpseci_disable(priv);
  9579. + dpaa2_dpseci_dpio_free(priv);
  9580. + dpaa2_dpseci_free(priv);
  9581. + free_percpu(priv->ppriv);
  9582. + fsl_mc_portal_free(priv->mc_io);
  9583. + dev_set_drvdata(dev, NULL);
  9584. + kmem_cache_destroy(qi_cache);
  9585. +
  9586. + return 0;
  9587. +}
  9588. +
  9589. +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
  9590. +{
  9591. + struct dpaa2_fd fd;
  9592. + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
  9593. + int err = 0, i, id;
  9594. +
  9595. + if (IS_ERR(req))
  9596. + return PTR_ERR(req);
  9597. +
  9598. + if (priv->cscn_mem) {
  9599. + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
  9600. + DPAA2_CSCN_SIZE,
  9601. + DMA_FROM_DEVICE);
  9602. + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
  9603. + dev_dbg_ratelimited(dev, "Dropping request\n");
  9604. + return -EBUSY;
  9605. + }
  9606. + }
  9607. +
  9608. + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
  9609. +
  9610. + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
  9611. + DMA_BIDIRECTIONAL);
  9612. + if (dma_mapping_error(dev, req->fd_flt_dma)) {
  9613. + dev_err(dev, "DMA mapping error for QI enqueue request\n");
  9614. + goto err_out;
  9615. + }
  9616. +
  9617. + memset(&fd, 0, sizeof(fd));
  9618. + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
  9619. + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
  9620. + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
  9621. + dpaa2_fd_set_flc(&fd, req->flc_dma);
  9622. +
  9623. + /*
  9624. + * There is no guarantee that preemption is disabled here,
  9625. + * thus take action.
  9626. + */
  9627. + preempt_disable();
  9628. + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
  9629. + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
  9630. + err = dpaa2_io_service_enqueue_fq(NULL,
  9631. + priv->tx_queue_attr[id].fqid,
  9632. + &fd);
  9633. + if (err != -EBUSY)
  9634. + break;
  9635. + }
  9636. + preempt_enable();
  9637. +
  9638. + if (unlikely(err < 0)) {
  9639. + dev_err(dev, "Error enqueuing frame: %d\n", err);
  9640. + goto err_out;
  9641. + }
  9642. +
  9643. + return -EINPROGRESS;
  9644. +
  9645. +err_out:
  9646. + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
  9647. + DMA_BIDIRECTIONAL);
  9648. + return -EIO;
  9649. +}
  9650. +EXPORT_SYMBOL(dpaa2_caam_enqueue);
  9651. +
  9652. +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
  9653. + {
  9654. + .vendor = FSL_MC_VENDOR_FREESCALE,
  9655. + .obj_type = "dpseci",
  9656. + },
  9657. + { .vendor = 0x0 }
  9658. +};
  9659. +
  9660. +static struct fsl_mc_driver dpaa2_caam_driver = {
  9661. + .driver = {
  9662. + .name = KBUILD_MODNAME,
  9663. + .owner = THIS_MODULE,
  9664. + },
  9665. + .probe = dpaa2_caam_probe,
  9666. + .remove = dpaa2_caam_remove,
  9667. + .match_id_table = dpaa2_caam_match_id_table
  9668. +};
  9669. +
  9670. +MODULE_LICENSE("Dual BSD/GPL");
  9671. +MODULE_AUTHOR("Freescale Semiconductor, Inc");
  9672. +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
  9673. +
  9674. +module_fsl_mc_driver(dpaa2_caam_driver);
  9675. --- /dev/null
  9676. +++ b/drivers/crypto/caam/caamalg_qi2.h
  9677. @@ -0,0 +1,274 @@
  9678. +/*
  9679. + * Copyright 2015-2016 Freescale Semiconductor Inc.
  9680. + * Copyright 2017 NXP
  9681. + *
  9682. + * Redistribution and use in source and binary forms, with or without
  9683. + * modification, are permitted provided that the following conditions are met:
  9684. + * * Redistributions of source code must retain the above copyright
  9685. + * notice, this list of conditions and the following disclaimer.
  9686. + * * Redistributions in binary form must reproduce the above copyright
  9687. + * notice, this list of conditions and the following disclaimer in the
  9688. + * documentation and/or other materials provided with the distribution.
  9689. + * * Neither the names of the above-listed copyright holders nor the
  9690. + * names of any contributors may be used to endorse or promote products
  9691. + * derived from this software without specific prior written permission.
  9692. + *
  9693. + *
  9694. + * ALTERNATIVELY, this software may be distributed under the terms of the
  9695. + * GNU General Public License ("GPL") as published by the Free Software
  9696. + * Foundation, either version 2 of that License or (at your option) any
  9697. + * later version.
  9698. + *
  9699. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  9700. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  9701. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  9702. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  9703. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  9704. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  9705. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  9706. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  9707. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  9708. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  9709. + * POSSIBILITY OF SUCH DAMAGE.
  9710. + */
  9711. +
  9712. +#ifndef _CAAMALG_QI2_H_
  9713. +#define _CAAMALG_QI2_H_
  9714. +
  9715. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
  9716. +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
  9717. +#include <linux/threads.h>
  9718. +#include "dpseci.h"
  9719. +#include "desc_constr.h"
  9720. +
  9721. +#define DPAA2_CAAM_STORE_SIZE 16
  9722. +/* NAPI weight *must* be a multiple of the store size. */
  9723. +#define DPAA2_CAAM_NAPI_WEIGHT 64
  9724. +
  9725. +/* The congestion entrance threshold was chosen so that on LS2088
  9726. + * we support the maximum throughput for the available memory
  9727. + */
  9728. +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
  9729. +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
  9730. +
  9731. +/**
  9732. + * dpaa2_caam_priv - driver private data
  9733. + * @dpseci_id: DPSECI object unique ID
  9734. + * @major_ver: DPSECI major version
  9735. + * @minor_ver: DPSECI minor version
  9736. + * @dpseci_attr: DPSECI attributes
  9737. + * @sec_attr: SEC engine attributes
  9738. + * @rx_queue_attr: array of Rx queue attributes
  9739. + * @tx_queue_attr: array of Tx queue attributes
  9740. + * @cscn_mem: pointer to memory region containing the
  9741. + * dpaa2_cscn struct; it's size is larger than
  9742. + * sizeof(struct dpaa2_cscn) to accommodate alignment
  9743. + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
  9744. + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
  9745. + * @cscn_dma: dma address used by the QMAN to write CSCN messages
  9746. + * @dev: device associated with the DPSECI object
  9747. + * @mc_io: pointer to MC portal's I/O object
  9748. + * @domain: IOMMU domain
  9749. + * @ppriv: per CPU pointers to privata data
  9750. + */
  9751. +struct dpaa2_caam_priv {
  9752. + int dpsec_id;
  9753. +
  9754. + u16 major_ver;
  9755. + u16 minor_ver;
  9756. +
  9757. + struct dpseci_attr dpseci_attr;
  9758. + struct dpseci_sec_attr sec_attr;
  9759. + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
  9760. + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
  9761. + int num_pairs;
  9762. +
  9763. + /* congestion */
  9764. + void *cscn_mem;
  9765. + void *cscn_mem_aligned;
  9766. + dma_addr_t cscn_dma;
  9767. +
  9768. + struct device *dev;
  9769. + struct fsl_mc_io *mc_io;
  9770. + struct iommu_domain *domain;
  9771. +
  9772. + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
  9773. +};
  9774. +
  9775. +/**
  9776. + * dpaa2_caam_priv_per_cpu - per CPU private data
  9777. + * @napi: napi structure
  9778. + * @net_dev: netdev used by napi
  9779. + * @req_fqid: (virtual) request (Tx / enqueue) FQID
  9780. + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
  9781. + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
  9782. + * @nctx: notification context of response FQ
  9783. + * @store: where dequeued frames are stored
  9784. + * @priv: backpointer to dpaa2_caam_priv
  9785. + */
  9786. +struct dpaa2_caam_priv_per_cpu {
  9787. + struct napi_struct napi;
  9788. + struct net_device net_dev;
  9789. + int req_fqid;
  9790. + int rsp_fqid;
  9791. + int prio;
  9792. + struct dpaa2_io_notification_ctx nctx;
  9793. + struct dpaa2_io_store *store;
  9794. + struct dpaa2_caam_priv *priv;
  9795. +};
  9796. +
  9797. +/*
  9798. + * The CAAM QI hardware constructs a job descriptor which points
  9799. + * to shared descriptor (as pointed by context_a of FQ to CAAM).
  9800. + * When the job descriptor is executed by deco, the whole job
  9801. + * descriptor together with shared descriptor gets loaded in
  9802. + * deco buffer which is 64 words long (each 32-bit).
  9803. + *
  9804. + * The job descriptor constructed by QI hardware has layout:
  9805. + *
  9806. + * HEADER (1 word)
  9807. + * Shdesc ptr (1 or 2 words)
  9808. + * SEQ_OUT_PTR (1 word)
  9809. + * Out ptr (1 or 2 words)
  9810. + * Out length (1 word)
  9811. + * SEQ_IN_PTR (1 word)
  9812. + * In ptr (1 or 2 words)
  9813. + * In length (1 word)
  9814. + *
  9815. + * The shdesc ptr is used to fetch shared descriptor contents
  9816. + * into deco buffer.
  9817. + *
  9818. + * Apart from shdesc contents, the total number of words that
  9819. + * get loaded in deco buffer are '8' or '11'. The remaining words
  9820. + * in deco buffer can be used for storing shared descriptor.
  9821. + */
  9822. +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
  9823. +
  9824. +/* Length of a single buffer in the QI driver memory cache */
  9825. +#define CAAM_QI_MEMCACHE_SIZE 512
  9826. +
  9827. +/*
  9828. + * aead_edesc - s/w-extended aead descriptor
  9829. + * @src_nents: number of segments in input scatterlist
  9830. + * @dst_nents: number of segments in output scatterlist
  9831. + * @iv_dma: dma address of iv for checking continuity and link table
  9832. + * @qm_sg_bytes: length of dma mapped h/w link table
  9833. + * @qm_sg_dma: bus physical mapped address of h/w link table
  9834. + * @assoclen: associated data length, in CAAM endianness
  9835. + * @assoclen_dma: bus physical mapped address of req->assoclen
  9836. + * @sgt: the h/w link table, followed by IV
  9837. + */
  9838. +struct aead_edesc {
  9839. + int src_nents;
  9840. + int dst_nents;
  9841. + dma_addr_t iv_dma;
  9842. + int qm_sg_bytes;
  9843. + dma_addr_t qm_sg_dma;
  9844. + unsigned int assoclen;
  9845. + dma_addr_t assoclen_dma;
  9846. + struct dpaa2_sg_entry sgt[0];
  9847. +};
  9848. +
  9849. +/*
  9850. + * tls_edesc - s/w-extended tls descriptor
  9851. + * @src_nents: number of segments in input scatterlist
  9852. + * @dst_nents: number of segments in output scatterlist
  9853. + * @iv_dma: dma address of iv for checking continuity and link table
  9854. + * @qm_sg_bytes: length of dma mapped h/w link table
  9855. + * @qm_sg_dma: bus physical mapped address of h/w link table
  9856. + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
  9857. + * @dst: pointer to output scatterlist, usefull for unmapping
  9858. + * @sgt: the h/w link table, followed by IV
  9859. + */
  9860. +struct tls_edesc {
  9861. + int src_nents;
  9862. + int dst_nents;
  9863. + dma_addr_t iv_dma;
  9864. + int qm_sg_bytes;
  9865. + dma_addr_t qm_sg_dma;
  9866. + struct scatterlist tmp[2];
  9867. + struct scatterlist *dst;
  9868. + struct dpaa2_sg_entry sgt[0];
  9869. +};
  9870. +
  9871. +/*
  9872. + * skcipher_edesc - s/w-extended skcipher descriptor
  9873. + * @src_nents: number of segments in input scatterlist
  9874. + * @dst_nents: number of segments in output scatterlist
  9875. + * @iv_dma: dma address of iv for checking continuity and link table
  9876. + * @qm_sg_bytes: length of dma mapped qm_sg space
  9877. + * @qm_sg_dma: I/O virtual address of h/w link table
  9878. + * @sgt: the h/w link table, followed by IV
  9879. + */
  9880. +struct skcipher_edesc {
  9881. + int src_nents;
  9882. + int dst_nents;
  9883. + dma_addr_t iv_dma;
  9884. + int qm_sg_bytes;
  9885. + dma_addr_t qm_sg_dma;
  9886. + struct dpaa2_sg_entry sgt[0];
  9887. +};
  9888. +
  9889. +/*
  9890. + * ahash_edesc - s/w-extended ahash descriptor
  9891. + * @dst_dma: I/O virtual address of req->result
  9892. + * @qm_sg_dma: I/O virtual address of h/w link table
  9893. + * @src_nents: number of segments in input scatterlist
  9894. + * @qm_sg_bytes: length of dma mapped qm_sg space
  9895. + * @sgt: pointer to h/w link table
  9896. + */
  9897. +struct ahash_edesc {
  9898. + dma_addr_t dst_dma;
  9899. + dma_addr_t qm_sg_dma;
  9900. + int src_nents;
  9901. + int qm_sg_bytes;
  9902. + struct dpaa2_sg_entry sgt[0];
  9903. +};
  9904. +
  9905. +/**
  9906. + * caam_flc - Flow Context (FLC)
  9907. + * @flc: Flow Context options
  9908. + * @sh_desc: Shared Descriptor
  9909. + */
  9910. +struct caam_flc {
  9911. + u32 flc[16];
  9912. + u32 sh_desc[MAX_SDLEN];
  9913. +} ____cacheline_aligned;
  9914. +
  9915. +enum optype {
  9916. + ENCRYPT = 0,
  9917. + DECRYPT,
  9918. + NUM_OP
  9919. +};
  9920. +
  9921. +/**
  9922. + * caam_request - the request structure the driver application should fill while
  9923. + * submitting a job to driver.
  9924. + * @fd_flt: Frame list table defining input and output
  9925. + * fd_flt[0] - FLE pointing to output buffer
  9926. + * fd_flt[1] - FLE pointing to input buffer
  9927. + * @fd_flt_dma: DMA address for the frame list table
  9928. + * @flc: Flow Context
  9929. + * @flc_dma: I/O virtual address of Flow Context
  9930. + * @cbk: Callback function to invoke when job is completed
  9931. + * @ctx: arbit context attached with request by the application
  9932. + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
  9933. + */
  9934. +struct caam_request {
  9935. + struct dpaa2_fl_entry fd_flt[2];
  9936. + dma_addr_t fd_flt_dma;
  9937. + struct caam_flc *flc;
  9938. + dma_addr_t flc_dma;
  9939. + void (*cbk)(void *ctx, u32 err);
  9940. + void *ctx;
  9941. + void *edesc;
  9942. +};
  9943. +
  9944. +/**
  9945. + * dpaa2_caam_enqueue() - enqueue a crypto request
  9946. + * @dev: device associated with the DPSECI object
  9947. + * @req: pointer to caam_request
  9948. + */
  9949. +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
  9950. +
  9951. +#endif /* _CAAMALG_QI2_H_ */
  9952. --- a/drivers/crypto/caam/caamhash.c
  9953. +++ b/drivers/crypto/caam/caamhash.c
  9954. @@ -62,6 +62,7 @@
  9955. #include "error.h"
  9956. #include "sg_sw_sec4.h"
  9957. #include "key_gen.h"
  9958. +#include "caamhash_desc.h"
  9959. #define CAAM_CRA_PRIORITY 3000
  9960. @@ -71,14 +72,6 @@
  9961. #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
  9962. #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
  9963. -/* length of descriptors text */
  9964. -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
  9965. -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
  9966. -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
  9967. -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
  9968. -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
  9969. -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
  9970. -
  9971. #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
  9972. CAAM_MAX_HASH_KEY_SIZE)
  9973. #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
  9974. @@ -107,6 +100,7 @@ struct caam_hash_ctx {
  9975. dma_addr_t sh_desc_update_first_dma;
  9976. dma_addr_t sh_desc_fin_dma;
  9977. dma_addr_t sh_desc_digest_dma;
  9978. + enum dma_data_direction dir;
  9979. struct device *jrdev;
  9980. u8 key[CAAM_MAX_HASH_KEY_SIZE];
  9981. int ctx_len;
  9982. @@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str
  9983. }
  9984. /* Map state->caam_ctx, and add it to link table */
  9985. -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
  9986. +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
  9987. struct caam_hash_state *state, int ctx_len,
  9988. struct sec4_sg_entry *sec4_sg, u32 flag)
  9989. {
  9990. @@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
  9991. return 0;
  9992. }
  9993. -/*
  9994. - * For ahash update, final and finup (import_ctx = true)
  9995. - * import context, read and write to seqout
  9996. - * For ahash firsts and digest (import_ctx = false)
  9997. - * read and write to seqout
  9998. - */
  9999. -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
  10000. - struct caam_hash_ctx *ctx, bool import_ctx)
  10001. -{
  10002. - u32 op = ctx->adata.algtype;
  10003. - u32 *skip_key_load;
  10004. -
  10005. - init_sh_desc(desc, HDR_SHARE_SERIAL);
  10006. -
  10007. - /* Append key if it has been set; ahash update excluded */
  10008. - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
  10009. - /* Skip key loading if already shared */
  10010. - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  10011. - JUMP_COND_SHRD);
  10012. -
  10013. - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
  10014. - ctx->adata.keylen, CLASS_2 |
  10015. - KEY_DEST_MDHA_SPLIT | KEY_ENC);
  10016. -
  10017. - set_jump_tgt_here(desc, skip_key_load);
  10018. -
  10019. - op |= OP_ALG_AAI_HMAC_PRECOMP;
  10020. - }
  10021. -
  10022. - /* If needed, import context from software */
  10023. - if (import_ctx)
  10024. - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
  10025. - LDST_SRCDST_BYTE_CONTEXT);
  10026. -
  10027. - /* Class 2 operation */
  10028. - append_operation(desc, op | state | OP_ALG_ENCRYPT);
  10029. -
  10030. - /*
  10031. - * Load from buf and/or src and write to req->result or state->context
  10032. - * Calculate remaining bytes to read
  10033. - */
  10034. - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  10035. - /* Read remaining bytes */
  10036. - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
  10037. - FIFOLD_TYPE_MSG | KEY_VLF);
  10038. - /* Store class2 context bytes */
  10039. - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
  10040. - LDST_SRCDST_BYTE_CONTEXT);
  10041. -}
  10042. -
  10043. static int ahash_set_sh_desc(struct crypto_ahash *ahash)
  10044. {
  10045. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  10046. int digestsize = crypto_ahash_digestsize(ahash);
  10047. struct device *jrdev = ctx->jrdev;
  10048. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  10049. u32 *desc;
  10050. + ctx->adata.key_virt = ctx->key;
  10051. +
  10052. /* ahash_update shared descriptor */
  10053. desc = ctx->sh_desc_update;
  10054. - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
  10055. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
  10056. + ctx->ctx_len, true, ctrlpriv->era);
  10057. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
  10058. - desc_bytes(desc), DMA_TO_DEVICE);
  10059. + desc_bytes(desc), ctx->dir);
  10060. #ifdef DEBUG
  10061. print_hex_dump(KERN_ERR,
  10062. "ahash update shdesc@"__stringify(__LINE__)": ",
  10063. @@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp
  10064. /* ahash_update_first shared descriptor */
  10065. desc = ctx->sh_desc_update_first;
  10066. - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
  10067. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
  10068. + ctx->ctx_len, false, ctrlpriv->era);
  10069. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
  10070. - desc_bytes(desc), DMA_TO_DEVICE);
  10071. + desc_bytes(desc), ctx->dir);
  10072. #ifdef DEBUG
  10073. print_hex_dump(KERN_ERR,
  10074. "ahash update first shdesc@"__stringify(__LINE__)": ",
  10075. @@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp
  10076. /* ahash_final shared descriptor */
  10077. desc = ctx->sh_desc_fin;
  10078. - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
  10079. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
  10080. + ctx->ctx_len, true, ctrlpriv->era);
  10081. dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
  10082. - desc_bytes(desc), DMA_TO_DEVICE);
  10083. + desc_bytes(desc), ctx->dir);
  10084. #ifdef DEBUG
  10085. print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
  10086. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  10087. @@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp
  10088. /* ahash_digest shared descriptor */
  10089. desc = ctx->sh_desc_digest;
  10090. - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
  10091. + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
  10092. + ctx->ctx_len, false, ctrlpriv->era);
  10093. dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
  10094. - desc_bytes(desc), DMA_TO_DEVICE);
  10095. + desc_bytes(desc), ctx->dir);
  10096. #ifdef DEBUG
  10097. print_hex_dump(KERN_ERR,
  10098. "ahash digest shdesc@"__stringify(__LINE__)": ",
  10099. @@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah
  10100. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  10101. int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
  10102. int digestsize = crypto_ahash_digestsize(ahash);
  10103. + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  10104. int ret;
  10105. u8 *hashed_key = NULL;
  10106. @@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah
  10107. key = hashed_key;
  10108. }
  10109. - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
  10110. - CAAM_MAX_HASH_KEY_SIZE);
  10111. - if (ret)
  10112. - goto bad_free_key;
  10113. + /*
  10114. + * If DKP is supported, use it in the shared descriptor to generate
  10115. + * the split key.
  10116. + */
  10117. + if (ctrlpriv->era >= 6) {
  10118. + ctx->adata.key_inline = true;
  10119. + ctx->adata.keylen = keylen;
  10120. + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  10121. + OP_ALG_ALGSEL_MASK);
  10122. -#ifdef DEBUG
  10123. - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  10124. - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  10125. - ctx->adata.keylen_pad, 1);
  10126. -#endif
  10127. + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
  10128. + goto bad_free_key;
  10129. +
  10130. + memcpy(ctx->key, key, keylen);
  10131. + } else {
  10132. + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
  10133. + keylen, CAAM_MAX_HASH_KEY_SIZE);
  10134. + if (ret)
  10135. + goto bad_free_key;
  10136. + }
  10137. kfree(hashed_key);
  10138. return ahash_set_sh_desc(ahash);
  10139. @@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash
  10140. edesc->src_nents = src_nents;
  10141. edesc->sec4_sg_bytes = sec4_sg_bytes;
  10142. - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
  10143. + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  10144. edesc->sec4_sg, DMA_BIDIRECTIONAL);
  10145. if (ret)
  10146. goto unmap_ctx;
  10147. @@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_
  10148. desc = edesc->hw_desc;
  10149. edesc->sec4_sg_bytes = sec4_sg_bytes;
  10150. - edesc->src_nents = 0;
  10151. - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
  10152. + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  10153. edesc->sec4_sg, DMA_TO_DEVICE);
  10154. if (ret)
  10155. goto unmap_ctx;
  10156. @@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_
  10157. edesc->src_nents = src_nents;
  10158. - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
  10159. + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  10160. edesc->sec4_sg, DMA_TO_DEVICE);
  10161. if (ret)
  10162. goto unmap_ctx;
  10163. @@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha
  10164. dev_err(jrdev, "unable to map dst\n");
  10165. goto unmap;
  10166. }
  10167. - edesc->src_nents = 0;
  10168. #ifdef DEBUG
  10169. print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
  10170. @@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah
  10171. edesc->src_nents = src_nents;
  10172. edesc->sec4_sg_bytes = sec4_sg_bytes;
  10173. - edesc->dst_dma = 0;
  10174. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
  10175. if (ret)
  10176. @@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha
  10177. }
  10178. edesc->src_nents = src_nents;
  10179. - edesc->dst_dma = 0;
  10180. ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
  10181. to_hash);
  10182. @@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry
  10183. HASH_MSG_LEN + 64,
  10184. HASH_MSG_LEN + SHA512_DIGEST_SIZE };
  10185. dma_addr_t dma_addr;
  10186. + struct caam_drv_private *priv;
  10187. /*
  10188. * Get a Job ring from Job Ring driver to ensure in-order
  10189. @@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry
  10190. return PTR_ERR(ctx->jrdev);
  10191. }
  10192. + priv = dev_get_drvdata(ctx->jrdev->parent);
  10193. + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  10194. +
  10195. dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
  10196. offsetof(struct caam_hash_ctx,
  10197. sh_desc_update_dma),
  10198. - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
  10199. + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  10200. if (dma_mapping_error(ctx->jrdev, dma_addr)) {
  10201. dev_err(ctx->jrdev, "unable to map shared descriptors\n");
  10202. caam_jr_free(ctx->jrdev);
  10203. @@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr
  10204. dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
  10205. offsetof(struct caam_hash_ctx,
  10206. sh_desc_update_dma),
  10207. - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
  10208. + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  10209. caam_jr_free(ctx->jrdev);
  10210. }
  10211. --- /dev/null
  10212. +++ b/drivers/crypto/caam/caamhash_desc.c
  10213. @@ -0,0 +1,108 @@
  10214. +/*
  10215. + * Shared descriptors for ahash algorithms
  10216. + *
  10217. + * Copyright 2017 NXP
  10218. + *
  10219. + * Redistribution and use in source and binary forms, with or without
  10220. + * modification, are permitted provided that the following conditions are met:
  10221. + * * Redistributions of source code must retain the above copyright
  10222. + * notice, this list of conditions and the following disclaimer.
  10223. + * * Redistributions in binary form must reproduce the above copyright
  10224. + * notice, this list of conditions and the following disclaimer in the
  10225. + * documentation and/or other materials provided with the distribution.
  10226. + * * Neither the names of the above-listed copyright holders nor the
  10227. + * names of any contributors may be used to endorse or promote products
  10228. + * derived from this software without specific prior written permission.
  10229. + *
  10230. + *
  10231. + * ALTERNATIVELY, this software may be distributed under the terms of the
  10232. + * GNU General Public License ("GPL") as published by the Free Software
  10233. + * Foundation, either version 2 of that License or (at your option) any
  10234. + * later version.
  10235. + *
  10236. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  10237. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  10238. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  10239. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  10240. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  10241. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  10242. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  10243. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  10244. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  10245. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  10246. + * POSSIBILITY OF SUCH DAMAGE.
  10247. + */
  10248. +
  10249. +#include "compat.h"
  10250. +#include "desc_constr.h"
  10251. +#include "caamhash_desc.h"
  10252. +
  10253. +/**
  10254. + * cnstr_shdsc_ahash - ahash shared descriptor
  10255. + * @desc: pointer to buffer used for descriptor construction
  10256. + * @adata: pointer to authentication transform definitions.
  10257. + * A split key is required for SEC Era < 6; the size of the split key
  10258. + * is specified in this case.
  10259. + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
  10260. + * SHA256, SHA384, SHA512}.
  10261. + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
  10262. + * @digestsize: algorithm's digest size
  10263. + * @ctx_len: size of Context Register
  10264. + * @import_ctx: true if previous Context Register needs to be restored
  10265. + * must be true for ahash update and final
  10266. + * must be false for for ahash first and digest
  10267. + * @era: SEC Era
  10268. + */
  10269. +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
  10270. + int digestsize, int ctx_len, bool import_ctx, int era)
  10271. +{
  10272. + u32 op = adata->algtype;
  10273. +
  10274. + init_sh_desc(desc, HDR_SHARE_SERIAL);
  10275. +
  10276. + /* Append key if it has been set; ahash update excluded */
  10277. + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
  10278. + u32 *skip_key_load;
  10279. +
  10280. + /* Skip key loading if already shared */
  10281. + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  10282. + JUMP_COND_SHRD);
  10283. +
  10284. + if (era < 6)
  10285. + append_key_as_imm(desc, adata->key_virt,
  10286. + adata->keylen_pad,
  10287. + adata->keylen, CLASS_2 |
  10288. + KEY_DEST_MDHA_SPLIT | KEY_ENC);
  10289. + else
  10290. + append_proto_dkp(desc, adata);
  10291. +
  10292. + set_jump_tgt_here(desc, skip_key_load);
  10293. +
  10294. + op |= OP_ALG_AAI_HMAC_PRECOMP;
  10295. + }
  10296. +
  10297. + /* If needed, import context from software */
  10298. + if (import_ctx)
  10299. + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
  10300. + LDST_SRCDST_BYTE_CONTEXT);
  10301. +
  10302. + /* Class 2 operation */
  10303. + append_operation(desc, op | state | OP_ALG_ENCRYPT);
  10304. +
  10305. + /*
  10306. + * Load from buf and/or src and write to req->result or state->context
  10307. + * Calculate remaining bytes to read
  10308. + */
  10309. + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  10310. + /* Read remaining bytes */
  10311. + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
  10312. + FIFOLD_TYPE_MSG | KEY_VLF);
  10313. + /* Store class2 context bytes */
  10314. + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
  10315. + LDST_SRCDST_BYTE_CONTEXT);
  10316. +}
  10317. +EXPORT_SYMBOL(cnstr_shdsc_ahash);
  10318. +
  10319. +MODULE_LICENSE("Dual BSD/GPL");
  10320. +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
  10321. +MODULE_AUTHOR("NXP Semiconductors");
  10322. --- /dev/null
  10323. +++ b/drivers/crypto/caam/caamhash_desc.h
  10324. @@ -0,0 +1,49 @@
  10325. +/*
  10326. + * Shared descriptors for ahash algorithms
  10327. + *
  10328. + * Copyright 2017 NXP
  10329. + *
  10330. + * Redistribution and use in source and binary forms, with or without
  10331. + * modification, are permitted provided that the following conditions are met:
  10332. + * * Redistributions of source code must retain the above copyright
  10333. + * notice, this list of conditions and the following disclaimer.
  10334. + * * Redistributions in binary form must reproduce the above copyright
  10335. + * notice, this list of conditions and the following disclaimer in the
  10336. + * documentation and/or other materials provided with the distribution.
  10337. + * * Neither the names of the above-listed copyright holders nor the
  10338. + * names of any contributors may be used to endorse or promote products
  10339. + * derived from this software without specific prior written permission.
  10340. + *
  10341. + *
  10342. + * ALTERNATIVELY, this software may be distributed under the terms of the
  10343. + * GNU General Public License ("GPL") as published by the Free Software
  10344. + * Foundation, either version 2 of that License or (at your option) any
  10345. + * later version.
  10346. + *
  10347. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  10348. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  10349. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  10350. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  10351. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  10352. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  10353. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  10354. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  10355. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  10356. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  10357. + * POSSIBILITY OF SUCH DAMAGE.
  10358. + */
  10359. +
  10360. +#ifndef _CAAMHASH_DESC_H_
  10361. +#define _CAAMHASH_DESC_H_
  10362. +
  10363. +/* length of descriptors text */
  10364. +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
  10365. +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
  10366. +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
  10367. +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
  10368. +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
  10369. +
  10370. +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
  10371. + int digestsize, int ctx_len, bool import_ctx, int era);
  10372. +
  10373. +#endif /* _CAAMHASH_DESC_H_ */
  10374. --- a/drivers/crypto/caam/compat.h
  10375. +++ b/drivers/crypto/caam/compat.h
  10376. @@ -17,6 +17,7 @@
  10377. #include <linux/of_platform.h>
  10378. #include <linux/dma-mapping.h>
  10379. #include <linux/io.h>
  10380. +#include <linux/iommu.h>
  10381. #include <linux/spinlock.h>
  10382. #include <linux/rtnetlink.h>
  10383. #include <linux/in.h>
  10384. @@ -38,6 +39,7 @@
  10385. #include <crypto/authenc.h>
  10386. #include <crypto/akcipher.h>
  10387. #include <crypto/scatterwalk.h>
  10388. +#include <crypto/skcipher.h>
  10389. #include <crypto/internal/skcipher.h>
  10390. #include <crypto/internal/hash.h>
  10391. #include <crypto/internal/rsa.h>
  10392. --- a/drivers/crypto/caam/ctrl.c
  10393. +++ b/drivers/crypto/caam/ctrl.c
  10394. @@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx);
  10395. #include "qi.h"
  10396. #endif
  10397. +static struct platform_device *caam_dma_dev;
  10398. +
  10399. /*
  10400. * i.MX targets tend to have clock control subsystems that can
  10401. * enable/disable clocking to our device.
  10402. @@ -332,6 +334,9 @@ static int caam_remove(struct platform_d
  10403. debugfs_remove_recursive(ctrlpriv->dfs_root);
  10404. #endif
  10405. + if (caam_dma_dev)
  10406. + platform_device_unregister(caam_dma_dev);
  10407. +
  10408. /* Unmap controller region */
  10409. iounmap(ctrl);
  10410. @@ -433,6 +438,10 @@ static int caam_probe(struct platform_de
  10411. {.family = "Freescale i.MX"},
  10412. {},
  10413. };
  10414. + static struct platform_device_info caam_dma_pdev_info = {
  10415. + .name = "caam-dma",
  10416. + .id = PLATFORM_DEVID_NONE
  10417. + };
  10418. struct device *dev;
  10419. struct device_node *nprop, *np;
  10420. struct caam_ctrl __iomem *ctrl;
  10421. @@ -615,6 +624,8 @@ static int caam_probe(struct platform_de
  10422. goto iounmap_ctrl;
  10423. }
  10424. + ctrlpriv->era = caam_get_era();
  10425. +
  10426. ret = of_platform_populate(nprop, caam_match, NULL, dev);
  10427. if (ret) {
  10428. dev_err(dev, "JR platform devices creation error\n");
  10429. @@ -671,6 +682,16 @@ static int caam_probe(struct platform_de
  10430. goto caam_remove;
  10431. }
  10432. + caam_dma_pdev_info.parent = dev;
  10433. + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
  10434. + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
  10435. + if (IS_ERR(caam_dma_dev)) {
  10436. + dev_err(dev, "Unable to create and register caam-dma dev\n");
  10437. + caam_dma_dev = 0;
  10438. + } else {
  10439. + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
  10440. + }
  10441. +
  10442. cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
  10443. /*
  10444. @@ -746,7 +767,7 @@ static int caam_probe(struct platform_de
  10445. /* Report "alive" for developer to see */
  10446. dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
  10447. - caam_get_era());
  10448. + ctrlpriv->era);
  10449. dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
  10450. ctrlpriv->total_jobrs, ctrlpriv->qi_present,
  10451. caam_dpaa2 ? "yes" : "no");
  10452. --- a/drivers/crypto/caam/desc.h
  10453. +++ b/drivers/crypto/caam/desc.h
  10454. @@ -42,6 +42,7 @@
  10455. #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
  10456. #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
  10457. #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
  10458. +#define CMD_MOVEB (0x07 << CMD_SHIFT)
  10459. #define CMD_STORE (0x0a << CMD_SHIFT)
  10460. #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
  10461. #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
  10462. @@ -355,6 +356,7 @@
  10463. #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
  10464. #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
  10465. #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
  10466. +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
  10467. /* Other types. Need to OR in last/flush bits as desired */
  10468. #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
  10469. @@ -408,6 +410,7 @@
  10470. #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
  10471. #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
  10472. #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
  10473. +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
  10474. #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
  10475. /*
  10476. @@ -444,6 +447,18 @@
  10477. #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
  10478. #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
  10479. #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
  10480. +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
  10481. +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
  10482. +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
  10483. +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
  10484. +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
  10485. +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
  10486. +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
  10487. +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
  10488. +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
  10489. +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
  10490. +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
  10491. +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
  10492. /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
  10493. #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
  10494. @@ -1093,6 +1108,22 @@
  10495. /* MacSec protinfos */
  10496. #define OP_PCL_MACSEC 0x0001
  10497. +/* Derived Key Protocol (DKP) Protinfo */
  10498. +#define OP_PCL_DKP_SRC_SHIFT 14
  10499. +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
  10500. +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
  10501. +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
  10502. +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
  10503. +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
  10504. +#define OP_PCL_DKP_DST_SHIFT 12
  10505. +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
  10506. +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
  10507. +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
  10508. +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
  10509. +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
  10510. +#define OP_PCL_DKP_KEY_SHIFT 0
  10511. +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
  10512. +
  10513. /* PKI unidirectional protocol protinfo bits */
  10514. #define OP_PCL_PKPROT_TEST 0x0008
  10515. #define OP_PCL_PKPROT_DECRYPT 0x0004
  10516. @@ -1440,10 +1471,11 @@
  10517. #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
  10518. #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
  10519. #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
  10520. -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
  10521. +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
  10522. #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
  10523. #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
  10524. #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
  10525. +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
  10526. /* Destination selectors */
  10527. #define MATH_DEST_SHIFT 8
  10528. @@ -1452,6 +1484,7 @@
  10529. #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
  10530. #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
  10531. #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
  10532. +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
  10533. #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
  10534. #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
  10535. #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
  10536. @@ -1624,4 +1657,31 @@
  10537. /* Frame Descriptor Command for Replacement Job Descriptor */
  10538. #define FD_CMD_REPLACE_JOB_DESC 0x20000000
  10539. +/* CHA Control Register bits */
  10540. +#define CCTRL_RESET_CHA_ALL 0x1
  10541. +#define CCTRL_RESET_CHA_AESA 0x2
  10542. +#define CCTRL_RESET_CHA_DESA 0x4
  10543. +#define CCTRL_RESET_CHA_AFHA 0x8
  10544. +#define CCTRL_RESET_CHA_KFHA 0x10
  10545. +#define CCTRL_RESET_CHA_SF8A 0x20
  10546. +#define CCTRL_RESET_CHA_PKHA 0x40
  10547. +#define CCTRL_RESET_CHA_MDHA 0x80
  10548. +#define CCTRL_RESET_CHA_CRCA 0x100
  10549. +#define CCTRL_RESET_CHA_RNG 0x200
  10550. +#define CCTRL_RESET_CHA_SF9A 0x400
  10551. +#define CCTRL_RESET_CHA_ZUCE 0x800
  10552. +#define CCTRL_RESET_CHA_ZUCA 0x1000
  10553. +#define CCTRL_UNLOAD_PK_A0 0x10000
  10554. +#define CCTRL_UNLOAD_PK_A1 0x20000
  10555. +#define CCTRL_UNLOAD_PK_A2 0x40000
  10556. +#define CCTRL_UNLOAD_PK_A3 0x80000
  10557. +#define CCTRL_UNLOAD_PK_B0 0x100000
  10558. +#define CCTRL_UNLOAD_PK_B1 0x200000
  10559. +#define CCTRL_UNLOAD_PK_B2 0x400000
  10560. +#define CCTRL_UNLOAD_PK_B3 0x800000
  10561. +#define CCTRL_UNLOAD_PK_N 0x1000000
  10562. +#define CCTRL_UNLOAD_PK_A 0x4000000
  10563. +#define CCTRL_UNLOAD_PK_B 0x8000000
  10564. +#define CCTRL_UNLOAD_SBOX 0x10000000
  10565. +
  10566. #endif /* DESC_H */
  10567. --- a/drivers/crypto/caam/desc_constr.h
  10568. +++ b/drivers/crypto/caam/desc_constr.h
  10569. @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
  10570. append_ptr(desc, ptr);
  10571. }
  10572. -static inline void append_data(u32 * const desc, void *data, int len)
  10573. +static inline void append_data(u32 * const desc, const void *data, int len)
  10574. {
  10575. u32 *offset = desc_end(desc);
  10576. @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
  10577. append_cmd(desc, len);
  10578. }
  10579. -static inline void append_cmd_data(u32 * const desc, void *data, int len,
  10580. +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
  10581. u32 command)
  10582. {
  10583. append_cmd(desc, command | IMMEDIATE | len);
  10584. @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co
  10585. }
  10586. APPEND_CMD_RET(jump, JUMP)
  10587. APPEND_CMD_RET(move, MOVE)
  10588. +APPEND_CMD_RET(moveb, MOVEB)
  10589. static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
  10590. {
  10591. @@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
  10592. APPEND_SEQ_PTR_INTLEN(out, OUT)
  10593. #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
  10594. -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
  10595. +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
  10596. unsigned int len, u32 options) \
  10597. { \
  10598. PRINT_POS; \
  10599. @@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
  10600. * from length of immediate data provided, e.g., split keys
  10601. */
  10602. #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
  10603. -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
  10604. +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
  10605. unsigned int data_len, \
  10606. unsigned int len, u32 options) \
  10607. { \
  10608. @@ -452,7 +453,7 @@ struct alginfo {
  10609. unsigned int keylen_pad;
  10610. union {
  10611. dma_addr_t key_dma;
  10612. - void *key_virt;
  10613. + const void *key_virt;
  10614. };
  10615. bool key_inline;
  10616. };
  10617. @@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi
  10618. return (rem_bytes >= 0) ? 0 : -1;
  10619. }
  10620. +/**
  10621. + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
  10622. + * @desc: pointer to buffer used for descriptor construction
  10623. + * @adata: pointer to authentication transform definitions.
  10624. + * keylen should be the length of initial key, while keylen_pad
  10625. + * the length of the derived (split) key.
  10626. + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
  10627. + * SHA256, SHA384, SHA512}.
  10628. + */
  10629. +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
  10630. +{
  10631. + u32 protid;
  10632. +
  10633. + /*
  10634. + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
  10635. + * to OP_PCLID_DKP_{MD5, SHA*}
  10636. + */
  10637. + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
  10638. + (0x20 << OP_ALG_ALGSEL_SHIFT);
  10639. +
  10640. + if (adata->key_inline) {
  10641. + int words;
  10642. +
  10643. + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
  10644. + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
  10645. + adata->keylen);
  10646. + append_data(desc, adata->key_virt, adata->keylen);
  10647. +
  10648. + /* Reserve space in descriptor buffer for the derived key */
  10649. + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
  10650. + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
  10651. + if (words)
  10652. + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
  10653. + } else {
  10654. + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
  10655. + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
  10656. + adata->keylen);
  10657. + append_ptr(desc, adata->key_dma);
  10658. + }
  10659. +}
  10660. +
  10661. #endif /* DESC_CONSTR_H */
  10662. --- /dev/null
  10663. +++ b/drivers/crypto/caam/dpseci.c
  10664. @@ -0,0 +1,865 @@
  10665. +/*
  10666. + * Copyright 2013-2016 Freescale Semiconductor Inc.
  10667. + * Copyright 2017 NXP
  10668. + *
  10669. + * Redistribution and use in source and binary forms, with or without
  10670. + * modification, are permitted provided that the following conditions are met:
  10671. + * * Redistributions of source code must retain the above copyright
  10672. + * notice, this list of conditions and the following disclaimer.
  10673. + * * Redistributions in binary form must reproduce the above copyright
  10674. + * notice, this list of conditions and the following disclaimer in the
  10675. + * documentation and/or other materials provided with the distribution.
  10676. + * * Neither the names of the above-listed copyright holders nor the
  10677. + * names of any contributors may be used to endorse or promote products
  10678. + * derived from this software without specific prior written permission.
  10679. + *
  10680. + *
  10681. + * ALTERNATIVELY, this software may be distributed under the terms of the
  10682. + * GNU General Public License ("GPL") as published by the Free Software
  10683. + * Foundation, either version 2 of that License or (at your option) any
  10684. + * later version.
  10685. + *
  10686. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  10687. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  10688. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  10689. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  10690. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  10691. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  10692. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  10693. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  10694. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  10695. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  10696. + * POSSIBILITY OF SUCH DAMAGE.
  10697. + */
  10698. +
  10699. +#include <linux/fsl/mc.h>
  10700. +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
  10701. +#include "dpseci.h"
  10702. +#include "dpseci_cmd.h"
  10703. +
  10704. +/**
  10705. + * dpseci_open() - Open a control session for the specified object
  10706. + * @mc_io: Pointer to MC portal's I/O object
  10707. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10708. + * @dpseci_id: DPSECI unique ID
  10709. + * @token: Returned token; use in subsequent API calls
  10710. + *
  10711. + * This function can be used to open a control session for an already created
  10712. + * object; an object may have been declared in the DPL or by calling the
  10713. + * dpseci_create() function.
  10714. + * This function returns a unique authentication token, associated with the
  10715. + * specific object ID and the specific MC portal; this token must be used in all
  10716. + * subsequent commands for this specific object.
  10717. + *
  10718. + * Return: '0' on success, error code otherwise
  10719. + */
  10720. +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
  10721. + u16 *token)
  10722. +{
  10723. + struct fsl_mc_command cmd = { 0 };
  10724. + struct dpseci_cmd_open *cmd_params;
  10725. + int err;
  10726. +
  10727. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
  10728. + cmd_flags,
  10729. + 0);
  10730. + cmd_params = (struct dpseci_cmd_open *)cmd.params;
  10731. + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
  10732. + err = mc_send_command(mc_io, &cmd);
  10733. + if (err)
  10734. + return err;
  10735. +
  10736. + *token = mc_cmd_hdr_read_token(&cmd);
  10737. +
  10738. + return 0;
  10739. +}
  10740. +
  10741. +/**
  10742. + * dpseci_close() - Close the control session of the object
  10743. + * @mc_io: Pointer to MC portal's I/O object
  10744. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10745. + * @token: Token of DPSECI object
  10746. + *
  10747. + * After this function is called, no further operations are allowed on the
  10748. + * object without opening a new control session.
  10749. + *
  10750. + * Return: '0' on success, error code otherwise
  10751. + */
  10752. +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
  10753. +{
  10754. + struct fsl_mc_command cmd = { 0 };
  10755. +
  10756. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
  10757. + cmd_flags,
  10758. + token);
  10759. + return mc_send_command(mc_io, &cmd);
  10760. +}
  10761. +
  10762. +/**
  10763. + * dpseci_create() - Create the DPSECI object
  10764. + * @mc_io: Pointer to MC portal's I/O object
  10765. + * @dprc_token: Parent container token; '0' for default container
  10766. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10767. + * @cfg: Configuration structure
  10768. + * @obj_id: returned object id
  10769. + *
  10770. + * Create the DPSECI object, allocate required resources and perform required
  10771. + * initialization.
  10772. + *
  10773. + * The object can be created either by declaring it in the DPL file, or by
  10774. + * calling this function.
  10775. + *
  10776. + * The function accepts an authentication token of a parent container that this
  10777. + * object should be assigned to. The token can be '0' so the object will be
  10778. + * assigned to the default container.
  10779. + * The newly created object can be opened with the returned object id and using
  10780. + * the container's associated tokens and MC portals.
  10781. + *
  10782. + * Return: '0' on success, error code otherwise
  10783. + */
  10784. +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
  10785. + const struct dpseci_cfg *cfg, u32 *obj_id)
  10786. +{
  10787. + struct fsl_mc_command cmd = { 0 };
  10788. + struct dpseci_cmd_create *cmd_params;
  10789. + int i, err;
  10790. +
  10791. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
  10792. + cmd_flags,
  10793. + dprc_token);
  10794. + cmd_params = (struct dpseci_cmd_create *)cmd.params;
  10795. + for (i = 0; i < 8; i++)
  10796. + cmd_params->priorities[i] = cfg->priorities[i];
  10797. + for (i = 0; i < 8; i++)
  10798. + cmd_params->priorities2[i] = cfg->priorities[8 + i];
  10799. + cmd_params->num_tx_queues = cfg->num_tx_queues;
  10800. + cmd_params->num_rx_queues = cfg->num_rx_queues;
  10801. + cmd_params->options = cpu_to_le32(cfg->options);
  10802. + err = mc_send_command(mc_io, &cmd);
  10803. + if (err)
  10804. + return err;
  10805. +
  10806. + *obj_id = mc_cmd_read_object_id(&cmd);
  10807. +
  10808. + return 0;
  10809. +}
  10810. +
  10811. +/**
  10812. + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
  10813. + * @mc_io: Pointer to MC portal's I/O object
  10814. + * @dprc_token: Parent container token; '0' for default container
  10815. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10816. + * @object_id: The object id; it must be a valid id within the container that
  10817. + * created this object
  10818. + *
  10819. + * The function accepts the authentication token of the parent container that
  10820. + * created the object (not the one that currently owns the object). The object
  10821. + * is searched within parent using the provided 'object_id'.
  10822. + * All tokens to the object must be closed before calling destroy.
  10823. + *
  10824. + * Return: '0' on success, error code otherwise
  10825. + */
  10826. +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
  10827. + u32 object_id)
  10828. +{
  10829. + struct fsl_mc_command cmd = { 0 };
  10830. + struct dpseci_cmd_destroy *cmd_params;
  10831. +
  10832. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
  10833. + cmd_flags,
  10834. + dprc_token);
  10835. + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
  10836. + cmd_params->object_id = cpu_to_le32(object_id);
  10837. +
  10838. + return mc_send_command(mc_io, &cmd);
  10839. +}
  10840. +
  10841. +/**
  10842. + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
  10843. + * @mc_io: Pointer to MC portal's I/O object
  10844. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10845. + * @token: Token of DPSECI object
  10846. + *
  10847. + * Return: '0' on success, error code otherwise
  10848. + */
  10849. +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
  10850. +{
  10851. + struct fsl_mc_command cmd = { 0 };
  10852. +
  10853. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
  10854. + cmd_flags,
  10855. + token);
  10856. + return mc_send_command(mc_io, &cmd);
  10857. +}
  10858. +
  10859. +/**
  10860. + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
  10861. + * @mc_io: Pointer to MC portal's I/O object
  10862. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10863. + * @token: Token of DPSECI object
  10864. + *
  10865. + * Return: '0' on success, error code otherwise
  10866. + */
  10867. +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
  10868. +{
  10869. + struct fsl_mc_command cmd = { 0 };
  10870. +
  10871. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
  10872. + cmd_flags,
  10873. + token);
  10874. +
  10875. + return mc_send_command(mc_io, &cmd);
  10876. +}
  10877. +
  10878. +/**
  10879. + * dpseci_is_enabled() - Check if the DPSECI is enabled.
  10880. + * @mc_io: Pointer to MC portal's I/O object
  10881. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10882. + * @token: Token of DPSECI object
  10883. + * @en: Returns '1' if object is enabled; '0' otherwise
  10884. + *
  10885. + * Return: '0' on success, error code otherwise
  10886. + */
  10887. +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  10888. + int *en)
  10889. +{
  10890. + struct fsl_mc_command cmd = { 0 };
  10891. + struct dpseci_rsp_is_enabled *rsp_params;
  10892. + int err;
  10893. +
  10894. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
  10895. + cmd_flags,
  10896. + token);
  10897. + err = mc_send_command(mc_io, &cmd);
  10898. + if (err)
  10899. + return err;
  10900. +
  10901. + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
  10902. + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
  10903. +
  10904. + return 0;
  10905. +}
  10906. +
  10907. +/**
  10908. + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
  10909. + * @mc_io: Pointer to MC portal's I/O object
  10910. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10911. + * @token: Token of DPSECI object
  10912. + *
  10913. + * Return: '0' on success, error code otherwise
  10914. + */
  10915. +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
  10916. +{
  10917. + struct fsl_mc_command cmd = { 0 };
  10918. +
  10919. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
  10920. + cmd_flags,
  10921. + token);
  10922. +
  10923. + return mc_send_command(mc_io, &cmd);
  10924. +}
  10925. +
  10926. +/**
  10927. + * dpseci_get_irq_enable() - Get overall interrupt state
  10928. + * @mc_io: Pointer to MC portal's I/O object
  10929. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10930. + * @token: Token of DPSECI object
  10931. + * @irq_index: The interrupt index to configure
  10932. + * @en: Returned Interrupt state - enable = 1, disable = 0
  10933. + *
  10934. + * Return: '0' on success, error code otherwise
  10935. + */
  10936. +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  10937. + u8 irq_index, u8 *en)
  10938. +{
  10939. + struct fsl_mc_command cmd = { 0 };
  10940. + struct dpseci_cmd_irq_enable *cmd_params;
  10941. + struct dpseci_rsp_get_irq_enable *rsp_params;
  10942. + int err;
  10943. +
  10944. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
  10945. + cmd_flags,
  10946. + token);
  10947. + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
  10948. + cmd_params->irq_index = irq_index;
  10949. + err = mc_send_command(mc_io, &cmd);
  10950. + if (err)
  10951. + return err;
  10952. +
  10953. + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
  10954. + *en = rsp_params->enable_state;
  10955. +
  10956. + return 0;
  10957. +}
  10958. +
  10959. +/**
  10960. + * dpseci_set_irq_enable() - Set overall interrupt state.
  10961. + * @mc_io: Pointer to MC portal's I/O object
  10962. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10963. + * @token: Token of DPSECI object
  10964. + * @irq_index: The interrupt index to configure
  10965. + * @en: Interrupt state - enable = 1, disable = 0
  10966. + *
  10967. + * Allows GPP software to control when interrupts are generated.
  10968. + * Each interrupt can have up to 32 causes. The enable/disable control's the
  10969. + * overall interrupt state. If the interrupt is disabled no causes will cause
  10970. + * an interrupt.
  10971. + *
  10972. + * Return: '0' on success, error code otherwise
  10973. + */
  10974. +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  10975. + u8 irq_index, u8 en)
  10976. +{
  10977. + struct fsl_mc_command cmd = { 0 };
  10978. + struct dpseci_cmd_irq_enable *cmd_params;
  10979. +
  10980. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
  10981. + cmd_flags,
  10982. + token);
  10983. + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
  10984. + cmd_params->irq_index = irq_index;
  10985. + cmd_params->enable_state = en;
  10986. +
  10987. + return mc_send_command(mc_io, &cmd);
  10988. +}
  10989. +
  10990. +/**
  10991. + * dpseci_get_irq_mask() - Get interrupt mask.
  10992. + * @mc_io: Pointer to MC portal's I/O object
  10993. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  10994. + * @token: Token of DPSECI object
  10995. + * @irq_index: The interrupt index to configure
  10996. + * @mask: Returned event mask to trigger interrupt
  10997. + *
  10998. + * Every interrupt can have up to 32 causes and the interrupt model supports
  10999. + * masking/unmasking each cause independently.
  11000. + *
  11001. + * Return: '0' on success, error code otherwise
  11002. + */
  11003. +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11004. + u8 irq_index, u32 *mask)
  11005. +{
  11006. + struct fsl_mc_command cmd = { 0 };
  11007. + struct dpseci_cmd_irq_mask *cmd_params;
  11008. + int err;
  11009. +
  11010. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
  11011. + cmd_flags,
  11012. + token);
  11013. + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
  11014. + cmd_params->irq_index = irq_index;
  11015. + err = mc_send_command(mc_io, &cmd);
  11016. + if (err)
  11017. + return err;
  11018. +
  11019. + *mask = le32_to_cpu(cmd_params->mask);
  11020. +
  11021. + return 0;
  11022. +}
  11023. +
  11024. +/**
  11025. + * dpseci_set_irq_mask() - Set interrupt mask.
  11026. + * @mc_io: Pointer to MC portal's I/O object
  11027. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11028. + * @token: Token of DPSECI object
  11029. + * @irq_index: The interrupt index to configure
  11030. + * @mask: event mask to trigger interrupt;
  11031. + * each bit:
  11032. + * 0 = ignore event
  11033. + * 1 = consider event for asserting IRQ
  11034. + *
  11035. + * Every interrupt can have up to 32 causes and the interrupt model supports
  11036. + * masking/unmasking each cause independently
  11037. + *
  11038. + * Return: '0' on success, error code otherwise
  11039. + */
  11040. +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11041. + u8 irq_index, u32 mask)
  11042. +{
  11043. + struct fsl_mc_command cmd = { 0 };
  11044. + struct dpseci_cmd_irq_mask *cmd_params;
  11045. +
  11046. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
  11047. + cmd_flags,
  11048. + token);
  11049. + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
  11050. + cmd_params->mask = cpu_to_le32(mask);
  11051. + cmd_params->irq_index = irq_index;
  11052. +
  11053. + return mc_send_command(mc_io, &cmd);
  11054. +}
  11055. +
  11056. +/**
  11057. + * dpseci_get_irq_status() - Get the current status of any pending interrupts
  11058. + * @mc_io: Pointer to MC portal's I/O object
  11059. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11060. + * @token: Token of DPSECI object
  11061. + * @irq_index: The interrupt index to configure
  11062. + * @status: Returned interrupts status - one bit per cause:
  11063. + * 0 = no interrupt pending
  11064. + * 1 = interrupt pending
  11065. + *
  11066. + * Return: '0' on success, error code otherwise
  11067. + */
  11068. +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11069. + u8 irq_index, u32 *status)
  11070. +{
  11071. + struct fsl_mc_command cmd = { 0 };
  11072. + struct dpseci_cmd_irq_status *cmd_params;
  11073. + int err;
  11074. +
  11075. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
  11076. + cmd_flags,
  11077. + token);
  11078. + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
  11079. + cmd_params->status = cpu_to_le32(*status);
  11080. + cmd_params->irq_index = irq_index;
  11081. + err = mc_send_command(mc_io, &cmd);
  11082. + if (err)
  11083. + return err;
  11084. +
  11085. + *status = le32_to_cpu(cmd_params->status);
  11086. +
  11087. + return 0;
  11088. +}
  11089. +
  11090. +/**
  11091. + * dpseci_clear_irq_status() - Clear a pending interrupt's status
  11092. + * @mc_io: Pointer to MC portal's I/O object
  11093. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11094. + * @token: Token of DPSECI object
  11095. + * @irq_index: The interrupt index to configure
  11096. + * @status: bits to clear (W1C) - one bit per cause:
  11097. + * 0 = don't change
  11098. + * 1 = clear status bit
  11099. + *
  11100. + * Return: '0' on success, error code otherwise
  11101. + */
  11102. +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11103. + u8 irq_index, u32 status)
  11104. +{
  11105. + struct fsl_mc_command cmd = { 0 };
  11106. + struct dpseci_cmd_irq_status *cmd_params;
  11107. +
  11108. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
  11109. + cmd_flags,
  11110. + token);
  11111. + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
  11112. + cmd_params->status = cpu_to_le32(status);
  11113. + cmd_params->irq_index = irq_index;
  11114. +
  11115. + return mc_send_command(mc_io, &cmd);
  11116. +}
  11117. +
  11118. +/**
  11119. + * dpseci_get_attributes() - Retrieve DPSECI attributes
  11120. + * @mc_io: Pointer to MC portal's I/O object
  11121. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11122. + * @token: Token of DPSECI object
  11123. + * @attr: Returned object's attributes
  11124. + *
  11125. + * Return: '0' on success, error code otherwise
  11126. + */
  11127. +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11128. + struct dpseci_attr *attr)
  11129. +{
  11130. + struct fsl_mc_command cmd = { 0 };
  11131. + struct dpseci_rsp_get_attributes *rsp_params;
  11132. + int err;
  11133. +
  11134. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
  11135. + cmd_flags,
  11136. + token);
  11137. + err = mc_send_command(mc_io, &cmd);
  11138. + if (err)
  11139. + return err;
  11140. +
  11141. + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
  11142. + attr->id = le32_to_cpu(rsp_params->id);
  11143. + attr->num_tx_queues = rsp_params->num_tx_queues;
  11144. + attr->num_rx_queues = rsp_params->num_rx_queues;
  11145. + attr->options = le32_to_cpu(rsp_params->options);
  11146. +
  11147. + return 0;
  11148. +}
  11149. +
  11150. +/**
  11151. + * dpseci_set_rx_queue() - Set Rx queue configuration
  11152. + * @mc_io: Pointer to MC portal's I/O object
  11153. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11154. + * @token: Token of DPSECI object
  11155. + * @queue: Select the queue relative to number of priorities configured at
  11156. + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
  11157. + * Rx queues identically.
  11158. + * @cfg: Rx queue configuration
  11159. + *
  11160. + * Return: '0' on success, error code otherwise
  11161. + */
  11162. +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11163. + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
  11164. +{
  11165. + struct fsl_mc_command cmd = { 0 };
  11166. + struct dpseci_cmd_queue *cmd_params;
  11167. +
  11168. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
  11169. + cmd_flags,
  11170. + token);
  11171. + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
  11172. + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
  11173. + cmd_params->priority = cfg->dest_cfg.priority;
  11174. + cmd_params->queue = queue;
  11175. + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
  11176. + cfg->dest_cfg.dest_type);
  11177. + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
  11178. + cmd_params->options = cpu_to_le32(cfg->options);
  11179. + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
  11180. + cfg->order_preservation_en);
  11181. +
  11182. + return mc_send_command(mc_io, &cmd);
  11183. +}
  11184. +
  11185. +/**
  11186. + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
  11187. + * @mc_io: Pointer to MC portal's I/O object
  11188. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11189. + * @token: Token of DPSECI object
  11190. + * @queue: Select the queue relative to number of priorities configured at
  11191. + * DPSECI creation
  11192. + * @attr: Returned Rx queue attributes
  11193. + *
  11194. + * Return: '0' on success, error code otherwise
  11195. + */
  11196. +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11197. + u8 queue, struct dpseci_rx_queue_attr *attr)
  11198. +{
  11199. + struct fsl_mc_command cmd = { 0 };
  11200. + struct dpseci_cmd_queue *cmd_params;
  11201. + int err;
  11202. +
  11203. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
  11204. + cmd_flags,
  11205. + token);
  11206. + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
  11207. + cmd_params->queue = queue;
  11208. + err = mc_send_command(mc_io, &cmd);
  11209. + if (err)
  11210. + return err;
  11211. +
  11212. + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
  11213. + attr->dest_cfg.priority = cmd_params->priority;
  11214. + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
  11215. + DEST_TYPE);
  11216. + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
  11217. + attr->fqid = le32_to_cpu(cmd_params->fqid);
  11218. + attr->order_preservation_en =
  11219. + dpseci_get_field(cmd_params->order_preservation_en,
  11220. + ORDER_PRESERVATION);
  11221. +
  11222. + return 0;
  11223. +}
  11224. +
  11225. +/**
  11226. + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
  11227. + * @mc_io: Pointer to MC portal's I/O object
  11228. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11229. + * @token: Token of DPSECI object
  11230. + * @queue: Select the queue relative to number of priorities configured at
  11231. + * DPSECI creation
  11232. + * @attr: Returned Tx queue attributes
  11233. + *
  11234. + * Return: '0' on success, error code otherwise
  11235. + */
  11236. +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11237. + u8 queue, struct dpseci_tx_queue_attr *attr)
  11238. +{
  11239. + struct fsl_mc_command cmd = { 0 };
  11240. + struct dpseci_cmd_queue *cmd_params;
  11241. + struct dpseci_rsp_get_tx_queue *rsp_params;
  11242. + int err;
  11243. +
  11244. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
  11245. + cmd_flags,
  11246. + token);
  11247. + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
  11248. + cmd_params->queue = queue;
  11249. + err = mc_send_command(mc_io, &cmd);
  11250. + if (err)
  11251. + return err;
  11252. +
  11253. + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
  11254. + attr->fqid = le32_to_cpu(rsp_params->fqid);
  11255. + attr->priority = rsp_params->priority;
  11256. +
  11257. + return 0;
  11258. +}
  11259. +
  11260. +/**
  11261. + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
  11262. + * @mc_io: Pointer to MC portal's I/O object
  11263. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11264. + * @token: Token of DPSECI object
  11265. + * @attr: Returned SEC attributes
  11266. + *
  11267. + * Return: '0' on success, error code otherwise
  11268. + */
  11269. +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11270. + struct dpseci_sec_attr *attr)
  11271. +{
  11272. + struct fsl_mc_command cmd = { 0 };
  11273. + struct dpseci_rsp_get_sec_attr *rsp_params;
  11274. + int err;
  11275. +
  11276. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
  11277. + cmd_flags,
  11278. + token);
  11279. + err = mc_send_command(mc_io, &cmd);
  11280. + if (err)
  11281. + return err;
  11282. +
  11283. + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
  11284. + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
  11285. + attr->major_rev = rsp_params->major_rev;
  11286. + attr->minor_rev = rsp_params->minor_rev;
  11287. + attr->era = rsp_params->era;
  11288. + attr->deco_num = rsp_params->deco_num;
  11289. + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
  11290. + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
  11291. + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
  11292. + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
  11293. + attr->crc_acc_num = rsp_params->crc_acc_num;
  11294. + attr->pk_acc_num = rsp_params->pk_acc_num;
  11295. + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
  11296. + attr->rng_acc_num = rsp_params->rng_acc_num;
  11297. + attr->md_acc_num = rsp_params->md_acc_num;
  11298. + attr->arc4_acc_num = rsp_params->arc4_acc_num;
  11299. + attr->des_acc_num = rsp_params->des_acc_num;
  11300. + attr->aes_acc_num = rsp_params->aes_acc_num;
  11301. + attr->ccha_acc_num = rsp_params->ccha_acc_num;
  11302. + attr->ptha_acc_num = rsp_params->ptha_acc_num;
  11303. +
  11304. + return 0;
  11305. +}
  11306. +
  11307. +/**
  11308. + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
  11309. + * @mc_io: Pointer to MC portal's I/O object
  11310. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11311. + * @token: Token of DPSECI object
  11312. + * @counters: Returned SEC counters
  11313. + *
  11314. + * Return: '0' on success, error code otherwise
  11315. + */
  11316. +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11317. + struct dpseci_sec_counters *counters)
  11318. +{
  11319. + struct fsl_mc_command cmd = { 0 };
  11320. + struct dpseci_rsp_get_sec_counters *rsp_params;
  11321. + int err;
  11322. +
  11323. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
  11324. + cmd_flags,
  11325. + token);
  11326. + err = mc_send_command(mc_io, &cmd);
  11327. + if (err)
  11328. + return err;
  11329. +
  11330. + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
  11331. + counters->dequeued_requests =
  11332. + le64_to_cpu(rsp_params->dequeued_requests);
  11333. + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
  11334. + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
  11335. + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
  11336. + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
  11337. + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
  11338. + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
  11339. +
  11340. + return 0;
  11341. +}
  11342. +
  11343. +/**
  11344. + * dpseci_get_api_version() - Get Data Path SEC Interface API version
  11345. + * @mc_io: Pointer to MC portal's I/O object
  11346. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11347. + * @major_ver: Major version of data path sec API
  11348. + * @minor_ver: Minor version of data path sec API
  11349. + *
  11350. + * Return: '0' on success, error code otherwise
  11351. + */
  11352. +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11353. + u16 *major_ver, u16 *minor_ver)
  11354. +{
  11355. + struct fsl_mc_command cmd = { 0 };
  11356. + struct dpseci_rsp_get_api_version *rsp_params;
  11357. + int err;
  11358. +
  11359. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
  11360. + cmd_flags, 0);
  11361. + err = mc_send_command(mc_io, &cmd);
  11362. + if (err)
  11363. + return err;
  11364. +
  11365. + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
  11366. + *major_ver = le16_to_cpu(rsp_params->major);
  11367. + *minor_ver = le16_to_cpu(rsp_params->minor);
  11368. +
  11369. + return 0;
  11370. +}
  11371. +
  11372. +/**
  11373. + * dpseci_set_opr() - Set Order Restoration configuration
  11374. + * @mc_io: Pointer to MC portal's I/O object
  11375. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11376. + * @token: Token of DPSECI object
  11377. + * @index: The queue index
  11378. + * @options: Configuration mode options; can be OPR_OPT_CREATE or
  11379. + * OPR_OPT_RETIRE
  11380. + * @cfg: Configuration options for the OPR
  11381. + *
  11382. + * Return: '0' on success, error code otherwise
  11383. + */
  11384. +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
  11385. + u8 options, struct opr_cfg *cfg)
  11386. +{
  11387. + struct fsl_mc_command cmd = { 0 };
  11388. + struct dpseci_cmd_opr *cmd_params;
  11389. +
  11390. + cmd.header = mc_encode_cmd_header(
  11391. + DPSECI_CMDID_SET_OPR,
  11392. + cmd_flags,
  11393. + token);
  11394. + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
  11395. + cmd_params->index = index;
  11396. + cmd_params->options = options;
  11397. + cmd_params->oloe = cfg->oloe;
  11398. + cmd_params->oeane = cfg->oeane;
  11399. + cmd_params->olws = cfg->olws;
  11400. + cmd_params->oa = cfg->oa;
  11401. + cmd_params->oprrws = cfg->oprrws;
  11402. +
  11403. + return mc_send_command(mc_io, &cmd);
  11404. +}
  11405. +
  11406. +/**
  11407. + * dpseci_get_opr() - Retrieve Order Restoration config and query
  11408. + * @mc_io: Pointer to MC portal's I/O object
  11409. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11410. + * @token: Token of DPSECI object
  11411. + * @index: The queue index
  11412. + * @cfg: Returned OPR configuration
  11413. + * @qry: Returned OPR query
  11414. + *
  11415. + * Return: '0' on success, error code otherwise
  11416. + */
  11417. +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
  11418. + struct opr_cfg *cfg, struct opr_qry *qry)
  11419. +{
  11420. + struct fsl_mc_command cmd = { 0 };
  11421. + struct dpseci_cmd_opr *cmd_params;
  11422. + struct dpseci_rsp_get_opr *rsp_params;
  11423. + int err;
  11424. +
  11425. + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
  11426. + cmd_flags,
  11427. + token);
  11428. + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
  11429. + cmd_params->index = index;
  11430. + err = mc_send_command(mc_io, &cmd);
  11431. + if (err)
  11432. + return err;
  11433. +
  11434. + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
  11435. + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
  11436. + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
  11437. + cfg->oloe = rsp_params->oloe;
  11438. + cfg->oeane = rsp_params->oeane;
  11439. + cfg->olws = rsp_params->olws;
  11440. + cfg->oa = rsp_params->oa;
  11441. + cfg->oprrws = rsp_params->oprrws;
  11442. + qry->nesn = le16_to_cpu(rsp_params->nesn);
  11443. + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
  11444. + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
  11445. + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
  11446. + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
  11447. + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
  11448. + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
  11449. + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
  11450. + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
  11451. + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
  11452. +
  11453. + return 0;
  11454. +}
  11455. +
  11456. +/**
  11457. + * dpseci_set_congestion_notification() - Set congestion group
  11458. + * notification configuration
  11459. + * @mc_io: Pointer to MC portal's I/O object
  11460. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11461. + * @token: Token of DPSECI object
  11462. + * @cfg: congestion notification configuration
  11463. + *
  11464. + * Return: '0' on success, error code otherwise
  11465. + */
  11466. +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11467. + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
  11468. +{
  11469. + struct fsl_mc_command cmd = { 0 };
  11470. + struct dpseci_cmd_congestion_notification *cmd_params;
  11471. +
  11472. + cmd.header = mc_encode_cmd_header(
  11473. + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
  11474. + cmd_flags,
  11475. + token);
  11476. + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
  11477. + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
  11478. + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
  11479. + cmd_params->priority = cfg->dest_cfg.priority;
  11480. + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
  11481. + cfg->dest_cfg.dest_type);
  11482. + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
  11483. + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
  11484. + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
  11485. + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
  11486. + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
  11487. +
  11488. + return mc_send_command(mc_io, &cmd);
  11489. +}
  11490. +
  11491. +/**
  11492. + * dpseci_get_congestion_notification() - Get congestion group notification
  11493. + * configuration
  11494. + * @mc_io: Pointer to MC portal's I/O object
  11495. + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  11496. + * @token: Token of DPSECI object
  11497. + * @cfg: congestion notification configuration
  11498. + *
  11499. + * Return: '0' on success, error code otherwise
  11500. + */
  11501. +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11502. + u16 token, struct dpseci_congestion_notification_cfg *cfg)
  11503. +{
  11504. + struct fsl_mc_command cmd = { 0 };
  11505. + struct dpseci_cmd_congestion_notification *rsp_params;
  11506. + int err;
  11507. +
  11508. + cmd.header = mc_encode_cmd_header(
  11509. + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
  11510. + cmd_flags,
  11511. + token);
  11512. + err = mc_send_command(mc_io, &cmd);
  11513. + if (err)
  11514. + return err;
  11515. +
  11516. + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
  11517. + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
  11518. + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
  11519. + cfg->dest_cfg.priority = rsp_params->priority;
  11520. + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
  11521. + CGN_DEST_TYPE);
  11522. + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
  11523. + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
  11524. + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
  11525. + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
  11526. + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
  11527. +
  11528. + return 0;
  11529. +}
  11530. --- /dev/null
  11531. +++ b/drivers/crypto/caam/dpseci.h
  11532. @@ -0,0 +1,433 @@
  11533. +/*
  11534. + * Copyright 2013-2016 Freescale Semiconductor Inc.
  11535. + * Copyright 2017 NXP
  11536. + *
  11537. + * Redistribution and use in source and binary forms, with or without
  11538. + * modification, are permitted provided that the following conditions are met:
  11539. + * * Redistributions of source code must retain the above copyright
  11540. + * notice, this list of conditions and the following disclaimer.
  11541. + * * Redistributions in binary form must reproduce the above copyright
  11542. + * notice, this list of conditions and the following disclaimer in the
  11543. + * documentation and/or other materials provided with the distribution.
  11544. + * * Neither the names of the above-listed copyright holders nor the
  11545. + * names of any contributors may be used to endorse or promote products
  11546. + * derived from this software without specific prior written permission.
  11547. + *
  11548. + *
  11549. + * ALTERNATIVELY, this software may be distributed under the terms of the
  11550. + * GNU General Public License ("GPL") as published by the Free Software
  11551. + * Foundation, either version 2 of that License or (at your option) any
  11552. + * later version.
  11553. + *
  11554. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  11555. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  11556. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  11557. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  11558. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  11559. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  11560. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  11561. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  11562. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  11563. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  11564. + * POSSIBILITY OF SUCH DAMAGE.
  11565. + */
  11566. +#ifndef _DPSECI_H_
  11567. +#define _DPSECI_H_
  11568. +
  11569. +/*
  11570. + * Data Path SEC Interface API
  11571. + * Contains initialization APIs and runtime control APIs for DPSECI
  11572. + */
  11573. +
  11574. +struct fsl_mc_io;
  11575. +struct opr_cfg;
  11576. +struct opr_qry;
  11577. +
  11578. +/**
  11579. + * General DPSECI macros
  11580. + */
  11581. +
  11582. +/**
  11583. + * Maximum number of Tx/Rx queues per DPSECI object
  11584. + */
  11585. +#define DPSECI_MAX_QUEUE_NUM 16
  11586. +
  11587. +/**
  11588. + * All queues considered; see dpseci_set_rx_queue()
  11589. + */
  11590. +#define DPSECI_ALL_QUEUES (u8)(-1)
  11591. +
  11592. +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
  11593. + u16 *token);
  11594. +
  11595. +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
  11596. +
  11597. +/**
  11598. + * Enable the Congestion Group support
  11599. + */
  11600. +#define DPSECI_OPT_HAS_CG 0x000020
  11601. +
  11602. +/**
  11603. + * Enable the Order Restoration support
  11604. + */
  11605. +#define DPSECI_OPT_HAS_OPR 0x000040
  11606. +
  11607. +/**
  11608. + * Order Point Records are shared for the entire DPSECI
  11609. + */
  11610. +#define DPSECI_OPT_OPR_SHARED 0x000080
  11611. +
  11612. +/**
  11613. + * struct dpseci_cfg - Structure representing DPSECI configuration
  11614. + * @options: Any combination of the following options:
  11615. + * DPSECI_OPT_HAS_CG
  11616. + * DPSECI_OPT_HAS_OPR
  11617. + * DPSECI_OPT_OPR_SHARED
  11618. + * @num_tx_queues: num of queues towards the SEC
  11619. + * @num_rx_queues: num of queues back from the SEC
  11620. + * @priorities: Priorities for the SEC hardware processing;
  11621. + * each place in the array is the priority of the tx queue
  11622. + * towards the SEC;
  11623. + * valid priorities are configured with values 1-8;
  11624. + */
  11625. +struct dpseci_cfg {
  11626. + u32 options;
  11627. + u8 num_tx_queues;
  11628. + u8 num_rx_queues;
  11629. + u8 priorities[DPSECI_MAX_QUEUE_NUM];
  11630. +};
  11631. +
  11632. +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
  11633. + const struct dpseci_cfg *cfg, u32 *obj_id);
  11634. +
  11635. +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
  11636. + u32 object_id);
  11637. +
  11638. +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
  11639. +
  11640. +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
  11641. +
  11642. +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11643. + int *en);
  11644. +
  11645. +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
  11646. +
  11647. +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11648. + u8 irq_index, u8 *en);
  11649. +
  11650. +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11651. + u8 irq_index, u8 en);
  11652. +
  11653. +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11654. + u8 irq_index, u32 *mask);
  11655. +
  11656. +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11657. + u8 irq_index, u32 mask);
  11658. +
  11659. +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11660. + u8 irq_index, u32 *status);
  11661. +
  11662. +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11663. + u8 irq_index, u32 status);
  11664. +
  11665. +/**
  11666. + * struct dpseci_attr - Structure representing DPSECI attributes
  11667. + * @id: DPSECI object ID
  11668. + * @num_tx_queues: number of queues towards the SEC
  11669. + * @num_rx_queues: number of queues back from the SEC
  11670. + * @options: any combination of the following options:
  11671. + * DPSECI_OPT_HAS_CG
  11672. + * DPSECI_OPT_HAS_OPR
  11673. + * DPSECI_OPT_OPR_SHARED
  11674. + */
  11675. +struct dpseci_attr {
  11676. + int id;
  11677. + u8 num_tx_queues;
  11678. + u8 num_rx_queues;
  11679. + u32 options;
  11680. +};
  11681. +
  11682. +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11683. + struct dpseci_attr *attr);
  11684. +
  11685. +/**
  11686. + * enum dpseci_dest - DPSECI destination types
  11687. + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
  11688. + * and does not generate FQDAN notifications; user is expected to dequeue
  11689. + * from the queue based on polling or other user-defined method
  11690. + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
  11691. + * notifications to the specified DPIO; user is expected to dequeue from
  11692. + * the queue only after notification is received
  11693. + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
  11694. + * FQDAN notifications, but is connected to the specified DPCON object;
  11695. + * user is expected to dequeue from the DPCON channel
  11696. + */
  11697. +enum dpseci_dest {
  11698. + DPSECI_DEST_NONE = 0,
  11699. + DPSECI_DEST_DPIO,
  11700. + DPSECI_DEST_DPCON
  11701. +};
  11702. +
  11703. +/**
  11704. + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
  11705. + * @dest_type: Destination type
  11706. + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
  11707. + * @priority: Priority selection within the DPIO or DPCON channel; valid values
  11708. + * are 0-1 or 0-7, depending on the number of priorities in that channel;
  11709. + * not relevant for 'DPSECI_DEST_NONE' option
  11710. + */
  11711. +struct dpseci_dest_cfg {
  11712. + enum dpseci_dest dest_type;
  11713. + int dest_id;
  11714. + u8 priority;
  11715. +};
  11716. +
  11717. +/**
  11718. + * DPSECI queue modification options
  11719. + */
  11720. +
  11721. +/**
  11722. + * Select to modify the user's context associated with the queue
  11723. + */
  11724. +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
  11725. +
  11726. +/**
  11727. + * Select to modify the queue's destination
  11728. + */
  11729. +#define DPSECI_QUEUE_OPT_DEST 0x00000002
  11730. +
  11731. +/**
  11732. + * Select to modify the queue's order preservation
  11733. + */
  11734. +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
  11735. +
  11736. +/**
  11737. + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
  11738. + * @options: Flags representing the suggested modifications to the queue;
  11739. + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
  11740. + * @order_preservation_en: order preservation configuration for the rx queue
  11741. + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
  11742. + * @user_ctx: User context value provided in the frame descriptor of each
  11743. + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
  11744. + * in 'options'
  11745. + * @dest_cfg: Queue destination parameters; valid only if
  11746. + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
  11747. + */
  11748. +struct dpseci_rx_queue_cfg {
  11749. + u32 options;
  11750. + int order_preservation_en;
  11751. + u64 user_ctx;
  11752. + struct dpseci_dest_cfg dest_cfg;
  11753. +};
  11754. +
  11755. +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11756. + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
  11757. +
  11758. +/**
  11759. + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
  11760. + * @user_ctx: User context value provided in the frame descriptor of each
  11761. + * dequeued frame
  11762. + * @order_preservation_en: Status of the order preservation configuration on the
  11763. + * queue
  11764. + * @dest_cfg: Queue destination configuration
  11765. + * @fqid: Virtual FQID value to be used for dequeue operations
  11766. + */
  11767. +struct dpseci_rx_queue_attr {
  11768. + u64 user_ctx;
  11769. + int order_preservation_en;
  11770. + struct dpseci_dest_cfg dest_cfg;
  11771. + u32 fqid;
  11772. +};
  11773. +
  11774. +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11775. + u8 queue, struct dpseci_rx_queue_attr *attr);
  11776. +
  11777. +/**
  11778. + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
  11779. + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
  11780. + * @priority: SEC hardware processing priority for the queue
  11781. + */
  11782. +struct dpseci_tx_queue_attr {
  11783. + u32 fqid;
  11784. + u8 priority;
  11785. +};
  11786. +
  11787. +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11788. + u8 queue, struct dpseci_tx_queue_attr *attr);
  11789. +
  11790. +/**
  11791. + * struct dpseci_sec_attr - Structure representing attributes of the SEC
  11792. + * hardware accelerator
  11793. + * @ip_id: ID for SEC
  11794. + * @major_rev: Major revision number for SEC
  11795. + * @minor_rev: Minor revision number for SEC
  11796. + * @era: SEC Era
  11797. + * @deco_num: The number of copies of the DECO that are implemented in this
  11798. + * version of SEC
  11799. + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
  11800. + * version of SEC
  11801. + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
  11802. + * version of SEC
  11803. + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
  11804. + * implemented in this version of SEC
  11805. + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
  11806. + * implemented in this version of SEC
  11807. + * @crc_acc_num: The number of copies of the CRC module that are implemented in
  11808. + * this version of SEC
  11809. + * @pk_acc_num: The number of copies of the Public Key module that are
  11810. + * implemented in this version of SEC
  11811. + * @kasumi_acc_num: The number of copies of the Kasumi module that are
  11812. + * implemented in this version of SEC
  11813. + * @rng_acc_num: The number of copies of the Random Number Generator that are
  11814. + * implemented in this version of SEC
  11815. + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
  11816. + * implemented in this version of SEC
  11817. + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
  11818. + * in this version of SEC
  11819. + * @des_acc_num: The number of copies of the DES module that are implemented in
  11820. + * this version of SEC
  11821. + * @aes_acc_num: The number of copies of the AES module that are implemented in
  11822. + * this version of SEC
  11823. + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
  11824. + * implemented in this version of SEC.
  11825. + * @ptha_acc_num: The number of copies of the Poly1305 module that are
  11826. + * implemented in this version of SEC.
  11827. + **/
  11828. +struct dpseci_sec_attr {
  11829. + u16 ip_id;
  11830. + u8 major_rev;
  11831. + u8 minor_rev;
  11832. + u8 era;
  11833. + u8 deco_num;
  11834. + u8 zuc_auth_acc_num;
  11835. + u8 zuc_enc_acc_num;
  11836. + u8 snow_f8_acc_num;
  11837. + u8 snow_f9_acc_num;
  11838. + u8 crc_acc_num;
  11839. + u8 pk_acc_num;
  11840. + u8 kasumi_acc_num;
  11841. + u8 rng_acc_num;
  11842. + u8 md_acc_num;
  11843. + u8 arc4_acc_num;
  11844. + u8 des_acc_num;
  11845. + u8 aes_acc_num;
  11846. + u8 ccha_acc_num;
  11847. + u8 ptha_acc_num;
  11848. +};
  11849. +
  11850. +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11851. + struct dpseci_sec_attr *attr);
  11852. +
  11853. +/**
  11854. + * struct dpseci_sec_counters - Structure representing global SEC counters and
  11855. + * not per dpseci counters
  11856. + * @dequeued_requests: Number of Requests Dequeued
  11857. + * @ob_enc_requests: Number of Outbound Encrypt Requests
  11858. + * @ib_dec_requests: Number of Inbound Decrypt Requests
  11859. + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
  11860. + * @ob_prot_bytes: Number of Outbound Bytes Protected
  11861. + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
  11862. + * @ib_valid_bytes: Number of Inbound Bytes Validated
  11863. + */
  11864. +struct dpseci_sec_counters {
  11865. + u64 dequeued_requests;
  11866. + u64 ob_enc_requests;
  11867. + u64 ib_dec_requests;
  11868. + u64 ob_enc_bytes;
  11869. + u64 ob_prot_bytes;
  11870. + u64 ib_dec_bytes;
  11871. + u64 ib_valid_bytes;
  11872. +};
  11873. +
  11874. +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
  11875. + struct dpseci_sec_counters *counters);
  11876. +
  11877. +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11878. + u16 *major_ver, u16 *minor_ver);
  11879. +
  11880. +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
  11881. + u8 options, struct opr_cfg *cfg);
  11882. +
  11883. +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
  11884. + struct opr_cfg *cfg, struct opr_qry *qry);
  11885. +
  11886. +/**
  11887. + * enum dpseci_congestion_unit - DPSECI congestion units
  11888. + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
  11889. + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
  11890. + */
  11891. +enum dpseci_congestion_unit {
  11892. + DPSECI_CONGESTION_UNIT_BYTES = 0,
  11893. + DPSECI_CONGESTION_UNIT_FRAMES
  11894. +};
  11895. +
  11896. +/**
  11897. + * CSCN message is written to message_iova once entering a
  11898. + * congestion state (see 'threshold_entry')
  11899. + */
  11900. +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
  11901. +
  11902. +/**
  11903. + * CSCN message is written to message_iova once exiting a
  11904. + * congestion state (see 'threshold_exit')
  11905. + */
  11906. +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
  11907. +
  11908. +/**
  11909. + * CSCN write will attempt to allocate into a cache (coherent write);
  11910. + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
  11911. + */
  11912. +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
  11913. +
  11914. +/**
  11915. + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
  11916. + * DPIO/DPCON's WQ channel once entering a congestion state
  11917. + * (see 'threshold_entry')
  11918. + */
  11919. +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
  11920. +
  11921. +/**
  11922. + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
  11923. + * DPIO/DPCON's WQ channel once exiting a congestion state
  11924. + * (see 'threshold_exit')
  11925. + */
  11926. +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
  11927. +
  11928. +/**
  11929. + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
  11930. + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
  11931. + * (if enabled)
  11932. + */
  11933. +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
  11934. +
  11935. +/**
  11936. + * struct dpseci_congestion_notification_cfg - congestion notification
  11937. + * configuration
  11938. + * @units: units type
  11939. + * @threshold_entry: above this threshold we enter a congestion state.
  11940. + * set it to '0' to disable it
  11941. + * @threshold_exit: below this threshold we exit the congestion state.
  11942. + * @message_ctx: The context that will be part of the CSCN message
  11943. + * @message_iova: I/O virtual address (must be in DMA-able memory),
  11944. + * must be 16B aligned;
  11945. + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
  11946. + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
  11947. + * values
  11948. + */
  11949. +struct dpseci_congestion_notification_cfg {
  11950. + enum dpseci_congestion_unit units;
  11951. + u32 threshold_entry;
  11952. + u32 threshold_exit;
  11953. + u64 message_ctx;
  11954. + u64 message_iova;
  11955. + struct dpseci_dest_cfg dest_cfg;
  11956. + u16 notification_mode;
  11957. +};
  11958. +
  11959. +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11960. + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
  11961. +
  11962. +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
  11963. + u16 token, struct dpseci_congestion_notification_cfg *cfg);
  11964. +
  11965. +#endif /* _DPSECI_H_ */
  11966. --- /dev/null
  11967. +++ b/drivers/crypto/caam/dpseci_cmd.h
  11968. @@ -0,0 +1,287 @@
  11969. +/*
  11970. + * Copyright 2013-2016 Freescale Semiconductor Inc.
  11971. + * Copyright 2017 NXP
  11972. + *
  11973. + * Redistribution and use in source and binary forms, with or without
  11974. + * modification, are permitted provided that the following conditions are met:
  11975. + * * Redistributions of source code must retain the above copyright
  11976. + * notice, this list of conditions and the following disclaimer.
  11977. + * * Redistributions in binary form must reproduce the above copyright
  11978. + * notice, this list of conditions and the following disclaimer in the
  11979. + * documentation and/or other materials provided with the distribution.
  11980. + * * Neither the names of the above-listed copyright holders nor the
  11981. + * names of any contributors may be used to endorse or promote products
  11982. + * derived from this software without specific prior written permission.
  11983. + *
  11984. + *
  11985. + * ALTERNATIVELY, this software may be distributed under the terms of the
  11986. + * GNU General Public License ("GPL") as published by the Free Software
  11987. + * Foundation, either version 2 of that License or (at your option) any
  11988. + * later version.
  11989. + *
  11990. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  11991. + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  11992. + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  11993. + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
  11994. + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  11995. + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  11996. + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  11997. + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  11998. + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  11999. + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  12000. + * POSSIBILITY OF SUCH DAMAGE.
  12001. + */
  12002. +
  12003. +#ifndef _DPSECI_CMD_H_
  12004. +#define _DPSECI_CMD_H_
  12005. +
  12006. +/* DPSECI Version */
  12007. +#define DPSECI_VER_MAJOR 5
  12008. +#define DPSECI_VER_MINOR 3
  12009. +
  12010. +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
  12011. +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
  12012. +
  12013. +/* Command versioning */
  12014. +#define DPSECI_CMD_BASE_VERSION 1
  12015. +#define DPSECI_CMD_BASE_VERSION_V2 2
  12016. +#define DPSECI_CMD_BASE_VERSION_V3 3
  12017. +#define DPSECI_CMD_ID_OFFSET 4
  12018. +
  12019. +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
  12020. + DPSECI_CMD_BASE_VERSION)
  12021. +
  12022. +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
  12023. + DPSECI_CMD_BASE_VERSION_V2)
  12024. +
  12025. +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
  12026. + DPSECI_CMD_BASE_VERSION_V3)
  12027. +
  12028. +/* Command IDs */
  12029. +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
  12030. +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
  12031. +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
  12032. +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
  12033. +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
  12034. +
  12035. +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
  12036. +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
  12037. +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
  12038. +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
  12039. +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
  12040. +
  12041. +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
  12042. +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
  12043. +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
  12044. +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
  12045. +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
  12046. +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
  12047. +
  12048. +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
  12049. +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
  12050. +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
  12051. +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
  12052. +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
  12053. +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
  12054. +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
  12055. +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
  12056. +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
  12057. +
  12058. +/* Macros for accessing command fields smaller than 1 byte */
  12059. +#define DPSECI_MASK(field) \
  12060. + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
  12061. + DPSECI_##field##_SHIFT)
  12062. +
  12063. +#define dpseci_set_field(var, field, val) \
  12064. + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
  12065. +
  12066. +#define dpseci_get_field(var, field) \
  12067. + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
  12068. +
  12069. +struct dpseci_cmd_open {
  12070. + __le32 dpseci_id;
  12071. +};
  12072. +
  12073. +struct dpseci_cmd_create {
  12074. + u8 priorities[8];
  12075. + u8 num_tx_queues;
  12076. + u8 num_rx_queues;
  12077. + u8 pad0[6];
  12078. + __le32 options;
  12079. + __le32 pad1;
  12080. + u8 priorities2[8];
  12081. +};
  12082. +
  12083. +struct dpseci_cmd_destroy {
  12084. + __le32 object_id;
  12085. +};
  12086. +
  12087. +#define DPSECI_ENABLE_SHIFT 0
  12088. +#define DPSECI_ENABLE_SIZE 1
  12089. +
  12090. +struct dpseci_rsp_is_enabled {
  12091. + u8 is_enabled;
  12092. +};
  12093. +
  12094. +struct dpseci_cmd_irq_enable {
  12095. + u8 enable_state;
  12096. + u8 pad[3];
  12097. + u8 irq_index;
  12098. +};
  12099. +
  12100. +struct dpseci_rsp_get_irq_enable {
  12101. + u8 enable_state;
  12102. +};
  12103. +
  12104. +struct dpseci_cmd_irq_mask {
  12105. + __le32 mask;
  12106. + u8 irq_index;
  12107. +};
  12108. +
  12109. +struct dpseci_cmd_irq_status {
  12110. + __le32 status;
  12111. + u8 irq_index;
  12112. +};
  12113. +
  12114. +struct dpseci_rsp_get_attributes {
  12115. + __le32 id;
  12116. + __le32 pad0;
  12117. + u8 num_tx_queues;
  12118. + u8 num_rx_queues;
  12119. + u8 pad1[6];
  12120. + __le32 options;
  12121. +};
  12122. +
  12123. +#define DPSECI_DEST_TYPE_SHIFT 0
  12124. +#define DPSECI_DEST_TYPE_SIZE 4
  12125. +
  12126. +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
  12127. +#define DPSECI_ORDER_PRESERVATION_SIZE 1
  12128. +
  12129. +struct dpseci_cmd_queue {
  12130. + __le32 dest_id;
  12131. + u8 priority;
  12132. + u8 queue;
  12133. + u8 dest_type;
  12134. + u8 pad;
  12135. + __le64 user_ctx;
  12136. + union {
  12137. + __le32 options;
  12138. + __le32 fqid;
  12139. + };
  12140. + u8 order_preservation_en;
  12141. +};
  12142. +
  12143. +struct dpseci_rsp_get_tx_queue {
  12144. + __le32 pad;
  12145. + __le32 fqid;
  12146. + u8 priority;
  12147. +};
  12148. +
  12149. +struct dpseci_rsp_get_sec_attr {
  12150. + __le16 ip_id;
  12151. + u8 major_rev;
  12152. + u8 minor_rev;
  12153. + u8 era;
  12154. + u8 pad0[3];
  12155. + u8 deco_num;
  12156. + u8 zuc_auth_acc_num;
  12157. + u8 zuc_enc_acc_num;
  12158. + u8 pad1;
  12159. + u8 snow_f8_acc_num;
  12160. + u8 snow_f9_acc_num;
  12161. + u8 crc_acc_num;
  12162. + u8 pad2;
  12163. + u8 pk_acc_num;
  12164. + u8 kasumi_acc_num;
  12165. + u8 rng_acc_num;
  12166. + u8 pad3;
  12167. + u8 md_acc_num;
  12168. + u8 arc4_acc_num;
  12169. + u8 des_acc_num;
  12170. + u8 aes_acc_num;
  12171. + u8 ccha_acc_num;
  12172. + u8 ptha_acc_num;
  12173. +};
  12174. +
  12175. +struct dpseci_rsp_get_sec_counters {
  12176. + __le64 dequeued_requests;
  12177. + __le64 ob_enc_requests;
  12178. + __le64 ib_dec_requests;
  12179. + __le64 ob_enc_bytes;
  12180. + __le64 ob_prot_bytes;
  12181. + __le64 ib_dec_bytes;
  12182. + __le64 ib_valid_bytes;
  12183. +};
  12184. +
  12185. +struct dpseci_rsp_get_api_version {
  12186. + __le16 major;
  12187. + __le16 minor;
  12188. +};
  12189. +
  12190. +struct dpseci_cmd_opr {
  12191. + __le16 pad;
  12192. + u8 index;
  12193. + u8 options;
  12194. + u8 pad1[7];
  12195. + u8 oloe;
  12196. + u8 oeane;
  12197. + u8 olws;
  12198. + u8 oa;
  12199. + u8 oprrws;
  12200. +};
  12201. +
  12202. +#define DPSECI_OPR_RIP_SHIFT 0
  12203. +#define DPSECI_OPR_RIP_SIZE 1
  12204. +#define DPSECI_OPR_ENABLE_SHIFT 1
  12205. +#define DPSECI_OPR_ENABLE_SIZE 1
  12206. +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
  12207. +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
  12208. +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
  12209. +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
  12210. +
  12211. +struct dpseci_rsp_get_opr {
  12212. + __le64 pad;
  12213. + u8 flags;
  12214. + u8 pad0[2];
  12215. + u8 oloe;
  12216. + u8 oeane;
  12217. + u8 olws;
  12218. + u8 oa;
  12219. + u8 oprrws;
  12220. + __le16 nesn;
  12221. + __le16 pad1;
  12222. + __le16 ndsn;
  12223. + __le16 pad2;
  12224. + __le16 ea_tseq;
  12225. + u8 tseq_nlis;
  12226. + u8 pad3;
  12227. + __le16 ea_hseq;
  12228. + u8 hseq_nlis;
  12229. + u8 pad4;
  12230. + __le16 ea_hptr;
  12231. + __le16 pad5;
  12232. + __le16 ea_tptr;
  12233. + __le16 pad6;
  12234. + __le16 opr_vid;
  12235. + __le16 pad7;
  12236. + __le16 opr_id;
  12237. +};
  12238. +
  12239. +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
  12240. +#define DPSECI_CGN_DEST_TYPE_SIZE 4
  12241. +#define DPSECI_CGN_UNITS_SHIFT 4
  12242. +#define DPSECI_CGN_UNITS_SIZE 2
  12243. +
  12244. +struct dpseci_cmd_congestion_notification {
  12245. + __le32 dest_id;
  12246. + __le16 notification_mode;
  12247. + u8 priority;
  12248. + u8 options;
  12249. + __le64 message_iova;
  12250. + __le64 message_ctx;
  12251. + __le32 threshold_entry;
  12252. + __le32 threshold_exit;
  12253. +};
  12254. +
  12255. +#endif /* _DPSECI_CMD_H_ */
  12256. --- a/drivers/crypto/caam/error.c
  12257. +++ b/drivers/crypto/caam/error.c
  12258. @@ -108,6 +108,54 @@ static const struct {
  12259. { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
  12260. };
  12261. +static const struct {
  12262. + u8 value;
  12263. + const char *error_text;
  12264. +} qi_error_list[] = {
  12265. + { 0x1F, "Job terminated by FQ or ICID flush" },
  12266. + { 0x20, "FD format error"},
  12267. + { 0x21, "FD command format error"},
  12268. + { 0x23, "FL format error"},
  12269. + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
  12270. + { 0x30, "Max. buffer size too small"},
  12271. + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
  12272. + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
  12273. + { 0x33, "Size over/underflow (allocate mode)"},
  12274. + { 0x34, "Size over/underflow (reuse mode)"},
  12275. + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
  12276. + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
  12277. + { 0x41, "SBC frame format not supported (allocate mode)"},
  12278. + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
  12279. + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
  12280. + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
  12281. + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
  12282. + { 0x46, "Annotation length exceeds offset (reuse mode)"},
  12283. + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
  12284. + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
  12285. + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
  12286. + { 0x51, "Unsupported IF reuse mode"},
  12287. + { 0x52, "Unsupported FL use mode"},
  12288. + { 0x53, "Unsupported RJD use mode"},
  12289. + { 0x54, "Unsupported inline descriptor use mode"},
  12290. + { 0xC0, "Table buffer pool 0 depletion"},
  12291. + { 0xC1, "Table buffer pool 1 depletion"},
  12292. + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
  12293. + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
  12294. + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
  12295. + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
  12296. + { 0xD0, "FLC read error"},
  12297. + { 0xD1, "FL read error"},
  12298. + { 0xD2, "FL write error"},
  12299. + { 0xD3, "OF SGT write error"},
  12300. + { 0xD4, "PTA read error"},
  12301. + { 0xD5, "PTA write error"},
  12302. + { 0xD6, "OF SGT F-bit write error"},
  12303. + { 0xD7, "ASA write error"},
  12304. + { 0xE1, "FLC[ICR]=0 ICID error"},
  12305. + { 0xE2, "FLC[ICR]=1 ICID error"},
  12306. + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
  12307. +};
  12308. +
  12309. static const char * const cha_id_list[] = {
  12310. "",
  12311. "AES",
  12312. @@ -236,6 +284,27 @@ static void report_deco_status(struct de
  12313. status, error, idx_str, idx, err_str, err_err_code);
  12314. }
  12315. +static void report_qi_status(struct device *qidev, const u32 status,
  12316. + const char *error)
  12317. +{
  12318. + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
  12319. + const char *err_str = "unidentified error value 0x";
  12320. + char err_err_code[3] = { 0 };
  12321. + int i;
  12322. +
  12323. + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
  12324. + if (qi_error_list[i].value == err_id)
  12325. + break;
  12326. +
  12327. + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
  12328. + err_str = qi_error_list[i].error_text;
  12329. + else
  12330. + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
  12331. +
  12332. + dev_err(qidev, "%08x: %s: %s%s\n",
  12333. + status, error, err_str, err_err_code);
  12334. +}
  12335. +
  12336. static void report_jr_status(struct device *jrdev, const u32 status,
  12337. const char *error)
  12338. {
  12339. @@ -250,7 +319,7 @@ static void report_cond_code_status(stru
  12340. status, error, __func__);
  12341. }
  12342. -void caam_jr_strstatus(struct device *jrdev, u32 status)
  12343. +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
  12344. {
  12345. static const struct stat_src {
  12346. void (*report_ssed)(struct device *jrdev, const u32 status,
  12347. @@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr
  12348. { report_ccb_status, "CCB" },
  12349. { report_jump_status, "Jump" },
  12350. { report_deco_status, "DECO" },
  12351. - { NULL, "Queue Manager Interface" },
  12352. + { report_qi_status, "Queue Manager Interface" },
  12353. { report_jr_status, "Job Ring" },
  12354. { report_cond_code_status, "Condition Code" },
  12355. { NULL, NULL },
  12356. @@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr
  12357. else
  12358. dev_err(jrdev, "%d: unknown error source\n", ssrc);
  12359. }
  12360. -EXPORT_SYMBOL(caam_jr_strstatus);
  12361. +EXPORT_SYMBOL(caam_strstatus);
  12362. --- a/drivers/crypto/caam/error.h
  12363. +++ b/drivers/crypto/caam/error.h
  12364. @@ -8,7 +8,11 @@
  12365. #ifndef CAAM_ERROR_H
  12366. #define CAAM_ERROR_H
  12367. #define CAAM_ERROR_STR_MAX 302
  12368. -void caam_jr_strstatus(struct device *jrdev, u32 status);
  12369. +
  12370. +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
  12371. +
  12372. +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
  12373. +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
  12374. void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
  12375. int rowsize, int groupsize, struct scatterlist *sg,
  12376. --- a/drivers/crypto/caam/intern.h
  12377. +++ b/drivers/crypto/caam/intern.h
  12378. @@ -84,6 +84,7 @@ struct caam_drv_private {
  12379. u8 qi_present; /* Nonzero if QI present in device */
  12380. int secvio_irq; /* Security violation interrupt number */
  12381. int virt_en; /* Virtualization enabled in CAAM */
  12382. + int era; /* CAAM Era (internal HW revision) */
  12383. #define RNG4_MAX_HANDLES 2
  12384. /* RNG4 block */
  12385. --- a/drivers/crypto/caam/jr.c
  12386. +++ b/drivers/crypto/caam/jr.c
  12387. @@ -23,6 +23,14 @@ struct jr_driver_data {
  12388. static struct jr_driver_data driver_data;
  12389. +static int jr_driver_probed;
  12390. +
  12391. +int caam_jr_driver_probed(void)
  12392. +{
  12393. + return jr_driver_probed;
  12394. +}
  12395. +EXPORT_SYMBOL(caam_jr_driver_probed);
  12396. +
  12397. static int caam_reset_hw_jr(struct device *dev)
  12398. {
  12399. struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
  12400. @@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor
  12401. dev_err(jrdev, "Failed to shut down job ring\n");
  12402. irq_dispose_mapping(jrpriv->irq);
  12403. + jr_driver_probed--;
  12404. +
  12405. return ret;
  12406. }
  12407. @@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void)
  12408. EXPORT_SYMBOL(caam_jr_alloc);
  12409. /**
  12410. + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
  12411. + *
  12412. + * returns : pointer to the newly allocated physical
  12413. + * JobR dev can be written to if successful.
  12414. + **/
  12415. +struct device *caam_jridx_alloc(int idx)
  12416. +{
  12417. + struct caam_drv_private_jr *jrpriv;
  12418. + struct device *dev = ERR_PTR(-ENODEV);
  12419. +
  12420. + spin_lock(&driver_data.jr_alloc_lock);
  12421. +
  12422. + if (list_empty(&driver_data.jr_list))
  12423. + goto end;
  12424. +
  12425. + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
  12426. + if (jrpriv->ridx == idx) {
  12427. + atomic_inc(&jrpriv->tfm_count);
  12428. + dev = jrpriv->dev;
  12429. + break;
  12430. + }
  12431. + }
  12432. +
  12433. +end:
  12434. + spin_unlock(&driver_data.jr_alloc_lock);
  12435. + return dev;
  12436. +}
  12437. +EXPORT_SYMBOL(caam_jridx_alloc);
  12438. +
  12439. +/**
  12440. * caam_jr_free() - Free the Job Ring
  12441. * @rdev - points to the dev that identifies the Job ring to
  12442. * be released.
  12443. @@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform
  12444. atomic_set(&jrpriv->tfm_count, 0);
  12445. + jr_driver_probed++;
  12446. +
  12447. return 0;
  12448. }
  12449. --- a/drivers/crypto/caam/jr.h
  12450. +++ b/drivers/crypto/caam/jr.h
  12451. @@ -9,7 +9,9 @@
  12452. #define JR_H
  12453. /* Prototypes for backend-level services exposed to APIs */
  12454. +int caam_jr_driver_probed(void);
  12455. struct device *caam_jr_alloc(void);
  12456. +struct device *caam_jridx_alloc(int idx);
  12457. void caam_jr_free(struct device *rdev);
  12458. int caam_jr_enqueue(struct device *dev, u32 *desc,
  12459. void (*cbk)(struct device *dev, u32 *desc, u32 status,
  12460. --- a/drivers/crypto/caam/key_gen.c
  12461. +++ b/drivers/crypto/caam/key_gen.c
  12462. @@ -11,36 +11,6 @@
  12463. #include "desc_constr.h"
  12464. #include "key_gen.h"
  12465. -/**
  12466. - * split_key_len - Compute MDHA split key length for a given algorithm
  12467. - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
  12468. - * SHA224, SHA384, SHA512.
  12469. - *
  12470. - * Return: MDHA split key length
  12471. - */
  12472. -static inline u32 split_key_len(u32 hash)
  12473. -{
  12474. - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  12475. - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  12476. - u32 idx;
  12477. -
  12478. - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
  12479. -
  12480. - return (u32)(mdpadlen[idx] * 2);
  12481. -}
  12482. -
  12483. -/**
  12484. - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
  12485. - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
  12486. - * SHA224, SHA384, SHA512.
  12487. - *
  12488. - * Return: MDHA split key pad length
  12489. - */
  12490. -static inline u32 split_key_pad_len(u32 hash)
  12491. -{
  12492. - return ALIGN(split_key_len(hash), 16);
  12493. -}
  12494. -
  12495. void split_key_done(struct device *dev, u32 *desc, u32 err,
  12496. void *context)
  12497. {
  12498. --- a/drivers/crypto/caam/key_gen.h
  12499. +++ b/drivers/crypto/caam/key_gen.h
  12500. @@ -6,6 +6,36 @@
  12501. *
  12502. */
  12503. +/**
  12504. + * split_key_len - Compute MDHA split key length for a given algorithm
  12505. + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
  12506. + * SHA224, SHA384, SHA512.
  12507. + *
  12508. + * Return: MDHA split key length
  12509. + */
  12510. +static inline u32 split_key_len(u32 hash)
  12511. +{
  12512. + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  12513. + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  12514. + u32 idx;
  12515. +
  12516. + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
  12517. +
  12518. + return (u32)(mdpadlen[idx] * 2);
  12519. +}
  12520. +
  12521. +/**
  12522. + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
  12523. + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
  12524. + * SHA224, SHA384, SHA512.
  12525. + *
  12526. + * Return: MDHA split key pad length
  12527. + */
  12528. +static inline u32 split_key_pad_len(u32 hash)
  12529. +{
  12530. + return ALIGN(split_key_len(hash), 16);
  12531. +}
  12532. +
  12533. struct split_key_result {
  12534. struct completion completion;
  12535. int err;
  12536. --- a/drivers/crypto/caam/qi.c
  12537. +++ b/drivers/crypto/caam/qi.c
  12538. @@ -9,7 +9,7 @@
  12539. #include <linux/cpumask.h>
  12540. #include <linux/kthread.h>
  12541. -#include <soc/fsl/qman.h>
  12542. +#include <linux/fsl_qman.h>
  12543. #include "regs.h"
  12544. #include "qi.h"
  12545. @@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache;
  12546. int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
  12547. {
  12548. struct qm_fd fd;
  12549. - dma_addr_t addr;
  12550. int ret;
  12551. int num_retries = 0;
  12552. - qm_fd_clear_fd(&fd);
  12553. - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
  12554. -
  12555. - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
  12556. + fd.cmd = 0;
  12557. + fd.format = qm_fd_compound;
  12558. + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
  12559. + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
  12560. DMA_BIDIRECTIONAL);
  12561. - if (dma_mapping_error(qidev, addr)) {
  12562. + if (dma_mapping_error(qidev, fd.addr)) {
  12563. dev_err(qidev, "DMA mapping error for QI enqueue request\n");
  12564. return -EIO;
  12565. }
  12566. - qm_fd_addr_set64(&fd, addr);
  12567. do {
  12568. - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
  12569. + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
  12570. if (likely(!ret))
  12571. return 0;
  12572. @@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev
  12573. EXPORT_SYMBOL(caam_qi_enqueue);
  12574. static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
  12575. - const union qm_mr_entry *msg)
  12576. + const struct qm_mr_entry *msg)
  12577. {
  12578. const struct qm_fd *fd;
  12579. struct caam_drv_req *drv_req;
  12580. @@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p
  12581. fd = &msg->ern.fd;
  12582. - if (qm_fd_get_format(fd) != qm_fd_compound) {
  12583. + if (fd->format != qm_fd_compound) {
  12584. dev_err(qidev, "Non-compound FD from CAAM\n");
  12585. return;
  12586. }
  12587. @@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f
  12588. req_fq->cb.fqs = NULL;
  12589. ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
  12590. - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
  12591. + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
  12592. + req_fq);
  12593. if (ret) {
  12594. dev_err(qidev, "Failed to create session req FQ\n");
  12595. goto create_req_fq_fail;
  12596. }
  12597. - memset(&opts, 0, sizeof(opts));
  12598. - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
  12599. - QM_INITFQ_WE_CONTEXTB |
  12600. - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
  12601. - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
  12602. - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
  12603. - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
  12604. - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
  12605. + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
  12606. + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
  12607. + QM_INITFQ_WE_CGID;
  12608. + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
  12609. + opts.fqd.dest.channel = qm_channel_caam;
  12610. + opts.fqd.dest.wq = 2;
  12611. + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
  12612. + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
  12613. + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
  12614. opts.fqd.cgid = qipriv.cgr.cgrid;
  12615. ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
  12616. @@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f
  12617. return req_fq;
  12618. init_req_fq_fail:
  12619. - qman_destroy_fq(req_fq);
  12620. + qman_destroy_fq(req_fq, 0);
  12621. create_req_fq_fail:
  12622. kfree(req_fq);
  12623. return ERR_PTR(ret);
  12624. @@ -275,7 +275,7 @@ empty_fq:
  12625. if (ret)
  12626. dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
  12627. - qman_destroy_fq(fq);
  12628. + qman_destroy_fq(fq, 0);
  12629. kfree(fq);
  12630. return ret;
  12631. @@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq
  12632. if (ret)
  12633. return ret;
  12634. - if (!qm_mcr_np_get(&np, frm_cnt))
  12635. + if (!np.frm_cnt)
  12636. break;
  12637. msleep(20);
  12638. @@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp
  12639. struct caam_drv_req *drv_req;
  12640. const struct qm_fd *fd;
  12641. struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
  12642. - u32 status;
  12643. if (caam_qi_napi_schedule(p, caam_napi))
  12644. return qman_cb_dqrr_stop;
  12645. fd = &dqrr->fd;
  12646. - status = be32_to_cpu(fd->status);
  12647. - if (unlikely(status))
  12648. - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
  12649. + if (unlikely(fd->status)) {
  12650. + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
  12651. + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
  12652. +
  12653. + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
  12654. + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
  12655. + dev_err(qidev, "Error: %#x in CAAM response FD\n",
  12656. + fd->status);
  12657. + }
  12658. - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
  12659. + if (unlikely(fd->format != qm_fd_compound)) {
  12660. dev_err(qidev, "Non-compound FD from CAAM\n");
  12661. return qman_cb_dqrr_consume;
  12662. }
  12663. - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
  12664. + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
  12665. if (unlikely(!drv_req)) {
  12666. dev_err(qidev,
  12667. "Can't find original request for caam response\n");
  12668. @@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp
  12669. dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
  12670. sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
  12671. - drv_req->cbk(drv_req, status);
  12672. + drv_req->cbk(drv_req, fd->status);
  12673. return qman_cb_dqrr_consume;
  12674. }
  12675. @@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic
  12676. return -ENODEV;
  12677. }
  12678. - memset(&opts, 0, sizeof(opts));
  12679. - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
  12680. - QM_INITFQ_WE_CONTEXTB |
  12681. - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
  12682. - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
  12683. - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
  12684. - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
  12685. + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
  12686. + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
  12687. + QM_INITFQ_WE_CGID;
  12688. + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
  12689. + QM_FQCTRL_CGE;
  12690. + opts.fqd.dest.channel = qman_affine_channel(cpu);
  12691. + opts.fqd.dest.wq = 3;
  12692. opts.fqd.cgid = qipriv.cgr.cgrid;
  12693. opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
  12694. QM_STASHING_EXCL_DATA;
  12695. - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
  12696. + opts.fqd.context_a.stashing.data_cl = 1;
  12697. + opts.fqd.context_a.stashing.context_cl = 1;
  12698. ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
  12699. if (ret) {
  12700. @@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev
  12701. qipriv.cgr.cb = cgr_cb;
  12702. memset(&opts, 0, sizeof(opts));
  12703. - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
  12704. - QM_CGR_WE_MODE);
  12705. + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
  12706. opts.cgr.cscn_en = QM_CGR_EN;
  12707. opts.cgr.mode = QMAN_CGR_MODE_FRAME;
  12708. qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
  12709. --- a/drivers/crypto/caam/qi.h
  12710. +++ b/drivers/crypto/caam/qi.h
  12711. @@ -9,7 +9,7 @@
  12712. #ifndef __QI_H__
  12713. #define __QI_H__
  12714. -#include <soc/fsl/qman.h>
  12715. +#include <linux/fsl_qman.h>
  12716. #include "compat.h"
  12717. #include "desc.h"
  12718. #include "desc_constr.h"
  12719. --- a/drivers/crypto/caam/regs.h
  12720. +++ b/drivers/crypto/caam/regs.h
  12721. @@ -627,6 +627,8 @@ struct caam_job_ring {
  12722. #define JRSTA_DECOERR_INVSIGN 0x86
  12723. #define JRSTA_DECOERR_DSASIGN 0x87
  12724. +#define JRSTA_QIERR_ERROR_MASK 0x00ff
  12725. +
  12726. #define JRSTA_CCBERR_JUMP 0x08000000
  12727. #define JRSTA_CCBERR_INDEX_MASK 0xff00
  12728. #define JRSTA_CCBERR_INDEX_SHIFT 8
  12729. --- a/drivers/crypto/caam/sg_sw_qm.h
  12730. +++ b/drivers/crypto/caam/sg_sw_qm.h
  12731. @@ -34,46 +34,61 @@
  12732. #ifndef __SG_SW_QM_H
  12733. #define __SG_SW_QM_H
  12734. -#include <soc/fsl/qman.h>
  12735. +#include <linux/fsl_qman.h>
  12736. #include "regs.h"
  12737. +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
  12738. +{
  12739. + dma_addr_t addr = qm_sg_ptr->opaque;
  12740. +
  12741. + qm_sg_ptr->opaque = cpu_to_caam64(addr);
  12742. + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
  12743. +}
  12744. +
  12745. static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
  12746. - u16 offset)
  12747. + u32 len, u16 offset)
  12748. {
  12749. - qm_sg_entry_set64(qm_sg_ptr, dma);
  12750. + qm_sg_ptr->addr = dma;
  12751. + qm_sg_ptr->length = len;
  12752. qm_sg_ptr->__reserved2 = 0;
  12753. qm_sg_ptr->bpid = 0;
  12754. - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
  12755. + qm_sg_ptr->__reserved3 = 0;
  12756. + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
  12757. +
  12758. + cpu_to_hw_sg(qm_sg_ptr);
  12759. }
  12760. static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
  12761. dma_addr_t dma, u32 len, u16 offset)
  12762. {
  12763. - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
  12764. - qm_sg_entry_set_len(qm_sg_ptr, len);
  12765. + qm_sg_ptr->extension = 0;
  12766. + qm_sg_ptr->final = 0;
  12767. + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
  12768. }
  12769. static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
  12770. dma_addr_t dma, u32 len, u16 offset)
  12771. {
  12772. - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
  12773. - qm_sg_entry_set_f(qm_sg_ptr, len);
  12774. + qm_sg_ptr->extension = 0;
  12775. + qm_sg_ptr->final = 1;
  12776. + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
  12777. }
  12778. static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
  12779. dma_addr_t dma, u32 len, u16 offset)
  12780. {
  12781. - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
  12782. - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
  12783. + qm_sg_ptr->extension = 1;
  12784. + qm_sg_ptr->final = 0;
  12785. + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
  12786. }
  12787. static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
  12788. dma_addr_t dma, u32 len,
  12789. u16 offset)
  12790. {
  12791. - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
  12792. - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
  12793. - (len & QM_SG_LEN_MASK));
  12794. + qm_sg_ptr->extension = 1;
  12795. + qm_sg_ptr->final = 1;
  12796. + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
  12797. }
  12798. /*
  12799. @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
  12800. struct qm_sg_entry *qm_sg_ptr, u16 offset)
  12801. {
  12802. qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
  12803. - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
  12804. +
  12805. + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
  12806. + qm_sg_ptr->final = 1;
  12807. + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
  12808. }
  12809. #endif /* __SG_SW_QM_H */
  12810. --- a/drivers/crypto/talitos.c
  12811. +++ b/drivers/crypto/talitos.c
  12812. @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
  12813. ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
  12814. sg_count, areq->assoclen, tbl_off, elen);
  12815. + /*
  12816. + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
  12817. + * while extent is used for ICV len.
  12818. + */
  12819. + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
  12820. + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
  12821. + desc->ptr[4].len = cpu_to_be16(cryptlen);
  12822. +
  12823. if (ret > 1) {
  12824. tbl_off += ret;
  12825. sync_needed = true;