12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093 |
- From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
- From: Biwen Li <[email protected]>
- Date: Tue, 30 Oct 2018 18:28:03 +0800
- Subject: [PATCH 37/40] sec: support layerscape
- This is an integrated patch of sec for layerscape
- Signed-off-by: Alex Porosanu <[email protected]>
- Signed-off-by: Cristian Stoica <[email protected]>
- Signed-off-by: Guanhua Gao <[email protected]>
- Signed-off-by: Herbert Xu <[email protected]>
- Signed-off-by: Horia Geantă <[email protected]>
- Signed-off-by: Horia Geantă [email protected]
- Signed-off-by: Radu Alexe <[email protected]>
- Signed-off-by: Tudor Ambarus <[email protected]>
- Signed-off-by: Yangbo Lu <[email protected]>
- Signed-off-by: Zhao Qiang <[email protected]>
- Signed-off-by: Biwen Li <[email protected]>
- ---
- crypto/Kconfig | 20 +
- crypto/Makefile | 1 +
- crypto/tcrypt.c | 27 +-
- crypto/testmgr.c | 244 ++
- crypto/testmgr.h | 219 ++
- crypto/tls.c | 607 +++
- drivers/crypto/Makefile | 2 +-
- drivers/crypto/caam/Kconfig | 57 +-
- drivers/crypto/caam/Makefile | 10 +-
- drivers/crypto/caam/caamalg.c | 131 +-
- drivers/crypto/caam/caamalg_desc.c | 761 +++-
- drivers/crypto/caam/caamalg_desc.h | 47 +-
- drivers/crypto/caam/caamalg_qi.c | 927 ++++-
- drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 274 ++
- drivers/crypto/caam/caamhash.c | 132 +-
- drivers/crypto/caam/caamhash_desc.c | 108 +
- drivers/crypto/caam/caamhash_desc.h | 49 +
- drivers/crypto/caam/compat.h | 2 +
- drivers/crypto/caam/ctrl.c | 23 +-
- drivers/crypto/caam/desc.h | 62 +-
- drivers/crypto/caam/desc_constr.h | 52 +-
- drivers/crypto/caam/dpseci.c | 865 ++++
- drivers/crypto/caam/dpseci.h | 433 ++
- drivers/crypto/caam/dpseci_cmd.h | 287 ++
- drivers/crypto/caam/error.c | 75 +-
- drivers/crypto/caam/error.h | 6 +-
- drivers/crypto/caam/intern.h | 1 +
- drivers/crypto/caam/jr.c | 42 +
- drivers/crypto/caam/jr.h | 2 +
- drivers/crypto/caam/key_gen.c | 30 -
- drivers/crypto/caam/key_gen.h | 30 +
- drivers/crypto/caam/qi.c | 85 +-
- drivers/crypto/caam/qi.h | 2 +-
- drivers/crypto/caam/regs.h | 2 +
- drivers/crypto/caam/sg_sw_qm.h | 46 +-
- drivers/crypto/talitos.c | 8 +
- 37 files changed, 11006 insertions(+), 354 deletions(-)
- create mode 100644 crypto/tls.c
- create mode 100644 drivers/crypto/caam/caamalg_qi2.c
- create mode 100644 drivers/crypto/caam/caamalg_qi2.h
- create mode 100644 drivers/crypto/caam/caamhash_desc.c
- create mode 100644 drivers/crypto/caam/caamhash_desc.h
- create mode 100644 drivers/crypto/caam/dpseci.c
- create mode 100644 drivers/crypto/caam/dpseci.h
- create mode 100644 drivers/crypto/caam/dpseci_cmd.h
- --- a/crypto/Kconfig
- +++ b/crypto/Kconfig
- @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
- a sequence number xored with a salt. This is the default
- algorithm for CBC.
-
- +config CRYPTO_TLS
- + tristate "TLS support"
- + select CRYPTO_AEAD
- + select CRYPTO_BLKCIPHER
- + select CRYPTO_MANAGER
- + select CRYPTO_HASH
- + select CRYPTO_NULL
- + select CRYPTO_AUTHENC
- + help
- + Support for TLS 1.0 record encryption and decryption
- +
- + This module adds support for encryption/decryption of TLS 1.0 frames
- + using blockcipher algorithms. The name of the resulting algorithm is
- + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
- + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
- + accelerated versions will be used automatically if available.
- +
- + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
- + operations through AF_ALG or cryptodev interfaces
- +
- comment "Block modes"
-
- config CRYPTO_CBC
- --- a/crypto/Makefile
- +++ b/crypto/Makefile
- @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
- obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
- obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
- obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
- +obj-$(CONFIG_CRYPTO_TLS) += tls.o
- obj-$(CONFIG_CRYPTO_LZO) += lzo.o
- obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
- obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
- --- a/crypto/tcrypt.c
- +++ b/crypto/tcrypt.c
- @@ -76,7 +76,7 @@ static char *check[] = {
- "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
- "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
- - NULL
- + "rsa", NULL
- };
-
- struct tcrypt_result {
- @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
- iv);
- aead_request_set_ad(req, aad_size);
-
- - if (secs)
- + if (secs) {
- ret = test_aead_jiffies(req, enc, *b_size,
- secs);
- - else
- + cond_resched();
- + } else {
- ret = test_aead_cycles(req, enc, *b_size);
- + }
-
- if (ret) {
- pr_err("%s() failed return code=%d\n", e, ret);
- @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
-
- ahash_request_set_crypt(req, sg, output, speed[i].plen);
-
- - if (secs)
- + if (secs) {
- ret = test_ahash_jiffies(req, speed[i].blen,
- speed[i].plen, output, secs);
- - else
- + cond_resched();
- + } else {
- ret = test_ahash_cycles(req, speed[i].blen,
- speed[i].plen, output);
- + }
-
- if (ret) {
- pr_err("hashing failed ret=%d\n", ret);
- @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
-
- skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
-
- - if (secs)
- + if (secs) {
- ret = test_acipher_jiffies(req, enc,
- *b_size, secs);
- - else
- + cond_resched();
- + } else {
- ret = test_acipher_cycles(req, enc,
- *b_size);
- + }
-
- if (ret) {
- pr_err("%s() failed flags=%x\n", e,
- @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
- ret += tcrypt_test("hmac(sha3-512)");
- break;
-
- + case 115:
- + ret += tcrypt_test("rsa");
- + break;
- +
- case 150:
- ret += tcrypt_test("ansi_cprng");
- break;
- @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
- case 190:
- ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
- break;
- + case 191:
- + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
- + break;
- case 200:
- test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- --- a/crypto/testmgr.c
- +++ b/crypto/testmgr.c
- @@ -117,6 +117,13 @@ struct drbg_test_suite {
- unsigned int count;
- };
-
- +struct tls_test_suite {
- + struct {
- + struct tls_testvec *vecs;
- + unsigned int count;
- + } enc, dec;
- +};
- +
- struct akcipher_test_suite {
- const struct akcipher_testvec *vecs;
- unsigned int count;
- @@ -140,6 +147,7 @@ struct alg_test_desc {
- struct hash_test_suite hash;
- struct cprng_test_suite cprng;
- struct drbg_test_suite drbg;
- + struct tls_test_suite tls;
- struct akcipher_test_suite akcipher;
- struct kpp_test_suite kpp;
- } suite;
- @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
- return 0;
- }
-
- +static int __test_tls(struct crypto_aead *tfm, int enc,
- + struct tls_testvec *template, unsigned int tcount,
- + const bool diff_dst)
- +{
- + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
- + unsigned int i, k, authsize;
- + char *q;
- + struct aead_request *req;
- + struct scatterlist *sg;
- + struct scatterlist *sgout;
- + const char *e, *d;
- + struct tcrypt_result result;
- + void *input;
- + void *output;
- + void *assoc;
- + char *iv;
- + char *key;
- + char *xbuf[XBUFSIZE];
- + char *xoutbuf[XBUFSIZE];
- + char *axbuf[XBUFSIZE];
- + int ret = -ENOMEM;
- +
- + if (testmgr_alloc_buf(xbuf))
- + goto out_noxbuf;
- +
- + if (diff_dst && testmgr_alloc_buf(xoutbuf))
- + goto out_nooutbuf;
- +
- + if (testmgr_alloc_buf(axbuf))
- + goto out_noaxbuf;
- +
- + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
- + if (!iv)
- + goto out_noiv;
- +
- + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
- + if (!key)
- + goto out_nokey;
- +
- + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
- + if (!sg)
- + goto out_nosg;
- +
- + sgout = sg + 8;
- +
- + d = diff_dst ? "-ddst" : "";
- + e = enc ? "encryption" : "decryption";
- +
- + init_completion(&result.completion);
- +
- + req = aead_request_alloc(tfm, GFP_KERNEL);
- + if (!req) {
- + pr_err("alg: tls%s: Failed to allocate request for %s\n",
- + d, algo);
- + goto out;
- + }
- +
- + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- + tcrypt_complete, &result);
- +
- + for (i = 0; i < tcount; i++) {
- + input = xbuf[0];
- + assoc = axbuf[0];
- +
- + ret = -EINVAL;
- + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
- + template[i].alen > PAGE_SIZE))
- + goto out;
- +
- + memcpy(assoc, template[i].assoc, template[i].alen);
- + memcpy(input, template[i].input, template[i].ilen);
- +
- + if (template[i].iv)
- + memcpy(iv, template[i].iv, MAX_IVLEN);
- + else
- + memset(iv, 0, MAX_IVLEN);
- +
- + crypto_aead_clear_flags(tfm, ~0);
- +
- + if (template[i].klen > MAX_KEYLEN) {
- + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
- + d, i, algo, template[i].klen, MAX_KEYLEN);
- + ret = -EINVAL;
- + goto out;
- + }
- + memcpy(key, template[i].key, template[i].klen);
- +
- + ret = crypto_aead_setkey(tfm, key, template[i].klen);
- + if (!ret == template[i].fail) {
- + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
- + d, i, algo, crypto_aead_get_flags(tfm));
- + goto out;
- + } else if (ret)
- + continue;
- +
- + authsize = 20;
- + ret = crypto_aead_setauthsize(tfm, authsize);
- + if (ret) {
- + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
- + d, authsize, i, algo);
- + goto out;
- + }
- +
- + k = !!template[i].alen;
- + sg_init_table(sg, k + 1);
- + sg_set_buf(&sg[0], assoc, template[i].alen);
- + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
- + template[i].ilen));
- + output = input;
- +
- + if (diff_dst) {
- + sg_init_table(sgout, k + 1);
- + sg_set_buf(&sgout[0], assoc, template[i].alen);
- +
- + output = xoutbuf[0];
- + sg_set_buf(&sgout[k], output,
- + (enc ? template[i].rlen : template[i].ilen));
- + }
- +
- + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- + template[i].ilen, iv);
- +
- + aead_request_set_ad(req, template[i].alen);
- +
- + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
- +
- + switch (ret) {
- + case 0:
- + if (template[i].novrfy) {
- + /* verification was supposed to fail */
- + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
- + d, e, i, algo);
- + /* so really, we got a bad message */
- + ret = -EBADMSG;
- + goto out;
- + }
- + break;
- + case -EINPROGRESS:
- + case -EBUSY:
- + wait_for_completion(&result.completion);
- + reinit_completion(&result.completion);
- + ret = result.err;
- + if (!ret)
- + break;
- + case -EBADMSG:
- + /* verification failure was expected */
- + if (template[i].novrfy)
- + continue;
- + /* fall through */
- + default:
- + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
- + d, e, i, algo, -ret);
- + goto out;
- + }
- +
- + q = output;
- + if (memcmp(q, template[i].result, template[i].rlen)) {
- + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
- + d, i, e, algo);
- + hexdump(q, template[i].rlen);
- + pr_err("should be:\n");
- + hexdump(template[i].result, template[i].rlen);
- + ret = -EINVAL;
- + goto out;
- + }
- + }
- +
- +out:
- + aead_request_free(req);
- +
- + kfree(sg);
- +out_nosg:
- + kfree(key);
- +out_nokey:
- + kfree(iv);
- +out_noiv:
- + testmgr_free_buf(axbuf);
- +out_noaxbuf:
- + if (diff_dst)
- + testmgr_free_buf(xoutbuf);
- +out_nooutbuf:
- + testmgr_free_buf(xbuf);
- +out_noxbuf:
- + return ret;
- +}
- +
- +static int test_tls(struct crypto_aead *tfm, int enc,
- + struct tls_testvec *template, unsigned int tcount)
- +{
- + int ret;
- + /* test 'dst == src' case */
- + ret = __test_tls(tfm, enc, template, tcount, false);
- + if (ret)
- + return ret;
- + /* test 'dst != src' case */
- + return __test_tls(tfm, enc, template, tcount, true);
- +}
- +
- +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
- + u32 type, u32 mask)
- +{
- + struct crypto_aead *tfm;
- + int err = 0;
- +
- + tfm = crypto_alloc_aead(driver, type, mask);
- + if (IS_ERR(tfm)) {
- + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
- + driver, PTR_ERR(tfm));
- + return PTR_ERR(tfm);
- + }
- +
- + if (desc->suite.tls.enc.vecs) {
- + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
- + desc->suite.tls.enc.count);
- + if (err)
- + goto out;
- + }
- +
- + if (!err && desc->suite.tls.dec.vecs)
- + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
- + desc->suite.tls.dec.count);
- +
- +out:
- + crypto_free_aead(tfm);
- + return err;
- +}
- +
- static int test_cipher(struct crypto_cipher *tfm, int enc,
- const struct cipher_testvec *template,
- unsigned int tcount)
- @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
- .hash = __VECS(tgr192_tv_template)
- }
- }, {
- + .alg = "tls10(hmac(sha1),cbc(aes))",
- + .test = alg_test_tls,
- + .suite = {
- + .tls = {
- + .enc = __VECS(tls_enc_tv_template),
- + .dec = __VECS(tls_dec_tv_template)
- + }
- + }
- + }, {
- .alg = "vmac(aes)",
- .test = alg_test_hash,
- .suite = {
- --- a/crypto/testmgr.h
- +++ b/crypto/testmgr.h
- @@ -125,6 +125,20 @@ struct drbg_testvec {
- size_t expectedlen;
- };
-
- +struct tls_testvec {
- + char *key; /* wrapped keys for encryption and authentication */
- + char *iv; /* initialization vector */
- + char *input; /* input data */
- + char *assoc; /* associated data: seq num, type, version, input len */
- + char *result; /* result data */
- + unsigned char fail; /* the test failure is expected */
- + unsigned char novrfy; /* dec verification failure expected */
- + unsigned char klen; /* key length */
- + unsigned short ilen; /* input data length */
- + unsigned short alen; /* associated data length */
- + unsigned short rlen; /* result length */
- +};
- +
- struct akcipher_testvec {
- const unsigned char *key;
- const unsigned char *m;
- @@ -153,6 +167,211 @@ struct kpp_testvec {
- static const char zeroed_string[48];
-
- /*
- + * TLS1.0 synthetic test vectors
- + */
- +static struct tls_testvec tls_enc_tv_template[] = {
- + {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "Single block msg",
- + .ilen = 16,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x00\x10",
- + .alen = 13,
- + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
- + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
- + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
- + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
- + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
- + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
- + .rlen = 16 + 20 + 12,
- + }, {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "",
- + .ilen = 0,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x00\x00",
- + .alen = 13,
- + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
- + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
- + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
- + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
- + .rlen = 20 + 12,
- + }, {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext285"
- + " bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext285"
- + " bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext285"
- + " bytes plaintext285 bytes plaintext",
- + .ilen = 285,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x01\x1d",
- + .alen = 13,
- + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
- + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
- + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
- + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
- + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
- + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
- + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
- + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
- + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
- + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
- + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
- + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
- + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
- + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
- + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
- + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
- + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
- + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
- + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
- + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
- + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
- + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
- + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
- + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
- + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
- + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
- + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
- + .rlen = 285 + 20 + 15,
- + }
- +};
- +
- +static struct tls_testvec tls_dec_tv_template[] = {
- + {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
- + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
- + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
- + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
- + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
- + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
- + .ilen = 16 + 20 + 12,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x00\x30",
- + .alen = 13,
- + .result = "Single block msg",
- + .rlen = 16,
- + }, {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
- + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
- + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
- + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
- + .ilen = 20 + 12,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x00\x20",
- + .alen = 13,
- + .result = "",
- + .rlen = 0,
- + }, {
- +#ifdef __LITTLE_ENDIAN
- + .key = "\x08\x00" /* rta length */
- + "\x01\x00" /* rta type */
- +#else
- + .key = "\x00\x08" /* rta length */
- + "\x00\x01" /* rta type */
- +#endif
- + "\x00\x00\x00\x10" /* enc key length */
- + "authenticationkey20benckeyis16_bytes",
- + .klen = 8 + 20 + 16,
- + .iv = "iv0123456789abcd",
- + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
- + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
- + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
- + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
- + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
- + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
- + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
- + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
- + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
- + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
- + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
- + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
- + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
- + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
- + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
- + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
- + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
- + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
- + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
- + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
- + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
- + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
- + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
- + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
- + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
- + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
- + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
- +
- + .ilen = 285 + 20 + 15,
- + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- + "\x00\x03\x01\x01\x40",
- + .alen = 13,
- + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext285"
- + " bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext285"
- + " bytes plaintext285 bytes plaintext285 bytes"
- + " plaintext285 bytes plaintext285 bytes plaintext",
- + .rlen = 285,
- + }
- +};
- +
- +/*
- * RSA test vectors. Borrowed from openSSL.
- */
- static const struct akcipher_testvec rsa_tv_template[] = {
- --- /dev/null
- +++ b/crypto/tls.c
- @@ -0,0 +1,607 @@
- +/*
- + * Copyright 2013 Freescale Semiconductor, Inc.
- + * Copyright 2017 NXP Semiconductor, Inc.
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the Free
- + * Software Foundation; either version 2 of the License, or (at your option)
- + * any later version.
- + *
- + */
- +
- +#include <crypto/internal/aead.h>
- +#include <crypto/internal/hash.h>
- +#include <crypto/internal/skcipher.h>
- +#include <crypto/authenc.h>
- +#include <crypto/null.h>
- +#include <crypto/scatterwalk.h>
- +#include <linux/err.h>
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/rtnetlink.h>
- +
- +struct tls_instance_ctx {
- + struct crypto_ahash_spawn auth;
- + struct crypto_skcipher_spawn enc;
- +};
- +
- +struct crypto_tls_ctx {
- + unsigned int reqoff;
- + struct crypto_ahash *auth;
- + struct crypto_skcipher *enc;
- + struct crypto_skcipher *null;
- +};
- +
- +struct tls_request_ctx {
- + /*
- + * cryptlen holds the payload length in the case of encryption or
- + * payload_len + icv_len + padding_len in case of decryption
- + */
- + unsigned int cryptlen;
- + /* working space for partial results */
- + struct scatterlist tmp[2];
- + struct scatterlist cipher[2];
- + struct scatterlist dst[2];
- + char tail[];
- +};
- +
- +struct async_op {
- + struct completion completion;
- + int err;
- +};
- +
- +static void tls_async_op_done(struct crypto_async_request *req, int err)
- +{
- + struct async_op *areq = req->data;
- +
- + if (err == -EINPROGRESS)
- + return;
- +
- + areq->err = err;
- + complete(&areq->completion);
- +}
- +
- +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
- + unsigned int keylen)
- +{
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
- + struct crypto_ahash *auth = ctx->auth;
- + struct crypto_skcipher *enc = ctx->enc;
- + struct crypto_authenc_keys keys;
- + int err = -EINVAL;
- +
- + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- + goto badkey;
- +
- + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
- + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
- + CRYPTO_TFM_REQ_MASK);
- + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
- + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
- + CRYPTO_TFM_RES_MASK);
- +
- + if (err)
- + goto out;
- +
- + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
- + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
- + CRYPTO_TFM_REQ_MASK);
- + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
- + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
- + CRYPTO_TFM_RES_MASK);
- +
- +out:
- + return err;
- +
- +badkey:
- + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + goto out;
- +}
- +
- +/**
- + * crypto_tls_genicv - Calculate hmac digest for a TLS record
- + * @hash: (output) buffer to save the digest into
- + * @src: (input) scatterlist with the assoc and payload data
- + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
- + * @req: (input) aead request
- + **/
- +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
- + unsigned int srclen, struct aead_request *req)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
- + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
- + struct async_op ahash_op;
- + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
- + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- + int err = -EBADMSG;
- +
- + /* Bail out if the request assoc len is 0 */
- + if (!req->assoclen)
- + return err;
- +
- + init_completion(&ahash_op.completion);
- +
- + /* the hash transform to be executed comes from the original request */
- + ahash_request_set_tfm(ahreq, ctx->auth);
- + /* prepare the hash request with input data and result pointer */
- + ahash_request_set_crypt(ahreq, src, hash, srclen);
- + /* set the notifier for when the async hash function returns */
- + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
- + tls_async_op_done, &ahash_op);
- +
- + /* Calculate the digest on the given data. The result is put in hash */
- + err = crypto_ahash_digest(ahreq);
- + if (err == -EINPROGRESS) {
- + err = wait_for_completion_interruptible(&ahash_op.completion);
- + if (!err)
- + err = ahash_op.err;
- + }
- +
- + return err;
- +}
- +
- +/**
- + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
- + * @hash: (output) buffer to save the digest and padding into
- + * @phashlen: (output) the size of digest + padding
- + * @req: (input) aead request
- + **/
- +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
- + struct aead_request *req)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + unsigned int hash_size = crypto_aead_authsize(tls);
- + unsigned int block_size = crypto_aead_blocksize(tls);
- + unsigned int srclen = req->cryptlen + hash_size;
- + unsigned int icvlen = req->cryptlen + req->assoclen;
- + unsigned int padlen;
- + int err;
- +
- + err = crypto_tls_genicv(hash, req->src, icvlen, req);
- + if (err)
- + goto out;
- +
- + /* add padding after digest */
- + padlen = block_size - (srclen % block_size);
- + memset(hash + hash_size, padlen - 1, padlen);
- +
- + *phashlen = hash_size + padlen;
- +out:
- + return err;
- +}
- +
- +static int crypto_tls_copy_data(struct aead_request *req,
- + struct scatterlist *src,
- + struct scatterlist *dst,
- + unsigned int len)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
- + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- +
- + skcipher_request_set_tfm(skreq, ctx->null);
- + skcipher_request_set_callback(skreq, aead_request_flags(req),
- + NULL, NULL);
- + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
- +
- + return crypto_skcipher_encrypt(skreq);
- +}
- +
- +static int crypto_tls_encrypt(struct aead_request *req)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
- + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
- + struct skcipher_request *skreq;
- + struct scatterlist *cipher = treq_ctx->cipher;
- + struct scatterlist *tmp = treq_ctx->tmp;
- + struct scatterlist *sg, *src, *dst;
- + unsigned int cryptlen, phashlen;
- + u8 *hash = treq_ctx->tail;
- + int err;
- +
- + /*
- + * The hash result is saved at the beginning of the tls request ctx
- + * and is aligned as required by the hash transform. Enough space was
- + * allocated in crypto_tls_init_tfm to accommodate the difference. The
- + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
- + * the result is not overwritten by the second (cipher) request.
- + */
- + hash = (u8 *)ALIGN((unsigned long)hash +
- + crypto_ahash_alignmask(ctx->auth),
- + crypto_ahash_alignmask(ctx->auth) + 1);
- +
- + /*
- + * STEP 1: create ICV together with necessary padding
- + */
- + err = crypto_tls_gen_padicv(hash, &phashlen, req);
- + if (err)
- + return err;
- +
- + /*
- + * STEP 2: Hash and padding are combined with the payload
- + * depending on the form it arrives. Scatter tables must have at least
- + * one page of data before chaining with another table and can't have
- + * an empty data page. The following code addresses these requirements.
- + *
- + * If the payload is empty, only the hash is encrypted, otherwise the
- + * payload scatterlist is merged with the hash. A special merging case
- + * is when the payload has only one page of data. In that case the
- + * payload page is moved to another scatterlist and prepared there for
- + * encryption.
- + */
- + if (req->cryptlen) {
- + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
- +
- + sg_init_table(cipher, 2);
- + sg_set_buf(cipher + 1, hash, phashlen);
- +
- + if (sg_is_last(src)) {
- + sg_set_page(cipher, sg_page(src), req->cryptlen,
- + src->offset);
- + src = cipher;
- + } else {
- + unsigned int rem_len = req->cryptlen;
- +
- + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
- + rem_len -= min(rem_len, sg->length);
- +
- + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
- + sg_chain(sg, 1, cipher);
- + }
- + } else {
- + sg_init_one(cipher, hash, phashlen);
- + src = cipher;
- + }
- +
- + /**
- + * If src != dst copy the associated data from source to destination.
- + * In both cases fast-forward passed the associated data in the dest.
- + */
- + if (req->src != req->dst) {
- + err = crypto_tls_copy_data(req, req->src, req->dst,
- + req->assoclen);
- + if (err)
- + return err;
- + }
- + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
- +
- + /*
- + * STEP 3: encrypt the frame and return the result
- + */
- + cryptlen = req->cryptlen + phashlen;
- +
- + /*
- + * The hash and the cipher are applied at different times and their
- + * requests can use the same memory space without interference
- + */
- + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
- + skcipher_request_set_tfm(skreq, ctx->enc);
- + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
- + skcipher_request_set_callback(skreq, aead_request_flags(req),
- + req->base.complete, req->base.data);
- + /*
- + * Apply the cipher transform. The result will be in req->dst when the
- + * asynchronuous call terminates
- + */
- + err = crypto_skcipher_encrypt(skreq);
- +
- + return err;
- +}
- +
- +static int crypto_tls_decrypt(struct aead_request *req)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
- + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
- + unsigned int cryptlen = req->cryptlen;
- + unsigned int hash_size = crypto_aead_authsize(tls);
- + unsigned int block_size = crypto_aead_blocksize(tls);
- + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
- + struct scatterlist *tmp = treq_ctx->tmp;
- + struct scatterlist *src, *dst;
- +
- + u8 padding[255]; /* padding can be 0-255 bytes */
- + u8 pad_size;
- + u16 *len_field;
- + u8 *ihash, *hash = treq_ctx->tail;
- +
- + int paderr = 0;
- + int err = -EINVAL;
- + int i;
- + struct async_op ciph_op;
- +
- + /*
- + * Rule out bad packets. The input packet length must be at least one
- + * byte more than the hash_size
- + */
- + if (cryptlen <= hash_size || cryptlen % block_size)
- + goto out;
- +
- + /*
- + * Step 1 - Decrypt the source. Fast-forward past the associated data
- + * to the encrypted data. The result will be overwritten in place so
- + * that the decrypted data will be adjacent to the associated data. The
- + * last step (computing the hash) will have it's input data already
- + * prepared and ready to be accessed at req->src.
- + */
- + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
- + dst = src;
- +
- + init_completion(&ciph_op.completion);
- + skcipher_request_set_tfm(skreq, ctx->enc);
- + skcipher_request_set_callback(skreq, aead_request_flags(req),
- + tls_async_op_done, &ciph_op);
- + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
- + err = crypto_skcipher_decrypt(skreq);
- + if (err == -EINPROGRESS) {
- + err = wait_for_completion_interruptible(&ciph_op.completion);
- + if (!err)
- + err = ciph_op.err;
- + }
- + if (err)
- + goto out;
- +
- + /*
- + * Step 2 - Verify padding
- + * Retrieve the last byte of the payload; this is the padding size.
- + */
- + cryptlen -= 1;
- + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
- +
- + /* RFC recommendation for invalid padding size. */
- + if (cryptlen < pad_size + hash_size) {
- + pad_size = 0;
- + paderr = -EBADMSG;
- + }
- + cryptlen -= pad_size;
- + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
- +
- + /* Padding content must be equal with pad_size. We verify it all */
- + for (i = 0; i < pad_size; i++)
- + if (padding[i] != pad_size)
- + paderr = -EBADMSG;
- +
- + /*
- + * Step 3 - Verify hash
- + * Align the digest result as required by the hash transform. Enough
- + * space was allocated in crypto_tls_init_tfm
- + */
- + hash = (u8 *)ALIGN((unsigned long)hash +
- + crypto_ahash_alignmask(ctx->auth),
- + crypto_ahash_alignmask(ctx->auth) + 1);
- + /*
- + * Two bytes at the end of the associated data make the length field.
- + * It must be updated with the length of the cleartext message before
- + * the hash is calculated.
- + */
- + len_field = sg_virt(req->src) + req->assoclen - 2;
- + cryptlen -= hash_size;
- + *len_field = htons(cryptlen);
- +
- + /* This is the hash from the decrypted packet. Save it for later */
- + ihash = hash + hash_size;
- + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
- +
- + /* Now compute and compare our ICV with the one from the packet */
- + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
- + if (!err)
- + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
- +
- + if (req->src != req->dst) {
- + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
- + req->assoclen);
- + if (err)
- + goto out;
- + }
- +
- + /* return the first found error */
- + if (paderr)
- + err = paderr;
- +
- +out:
- + aead_request_complete(req, err);
- + return err;
- +}
- +
- +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
- +{
- + struct aead_instance *inst = aead_alg_instance(tfm);
- + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
- + struct crypto_ahash *auth;
- + struct crypto_skcipher *enc;
- + struct crypto_skcipher *null;
- + int err;
- +
- + auth = crypto_spawn_ahash(&ictx->auth);
- + if (IS_ERR(auth))
- + return PTR_ERR(auth);
- +
- + enc = crypto_spawn_skcipher(&ictx->enc);
- + err = PTR_ERR(enc);
- + if (IS_ERR(enc))
- + goto err_free_ahash;
- +
- + null = crypto_get_default_null_skcipher2();
- + err = PTR_ERR(null);
- + if (IS_ERR(null))
- + goto err_free_skcipher;
- +
- + ctx->auth = auth;
- + ctx->enc = enc;
- + ctx->null = null;
- +
- + /*
- + * Allow enough space for two digests. The two digests will be compared
- + * during the decryption phase. One will come from the decrypted packet
- + * and the other will be calculated. For encryption, one digest is
- + * padded (up to a cipher blocksize) and chained with the payload
- + */
- + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
- + crypto_ahash_alignmask(auth),
- + crypto_ahash_alignmask(auth) + 1) +
- + max(crypto_ahash_digestsize(auth),
- + crypto_skcipher_blocksize(enc));
- +
- + crypto_aead_set_reqsize(tfm,
- + sizeof(struct tls_request_ctx) +
- + ctx->reqoff +
- + max_t(unsigned int,
- + crypto_ahash_reqsize(auth) +
- + sizeof(struct ahash_request),
- + crypto_skcipher_reqsize(enc) +
- + sizeof(struct skcipher_request)));
- +
- + return 0;
- +
- +err_free_skcipher:
- + crypto_free_skcipher(enc);
- +err_free_ahash:
- + crypto_free_ahash(auth);
- + return err;
- +}
- +
- +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
- +{
- + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
- +
- + crypto_free_ahash(ctx->auth);
- + crypto_free_skcipher(ctx->enc);
- + crypto_put_default_null_skcipher2();
- +}
- +
- +static void crypto_tls_free(struct aead_instance *inst)
- +{
- + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
- +
- + crypto_drop_skcipher(&ctx->enc);
- + crypto_drop_ahash(&ctx->auth);
- + kfree(inst);
- +}
- +
- +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
- +{
- + struct crypto_attr_type *algt;
- + struct aead_instance *inst;
- + struct hash_alg_common *auth;
- + struct crypto_alg *auth_base;
- + struct skcipher_alg *enc;
- + struct tls_instance_ctx *ctx;
- + const char *enc_name;
- + int err;
- +
- + algt = crypto_get_attr_type(tb);
- + if (IS_ERR(algt))
- + return PTR_ERR(algt);
- +
- + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- + return -EINVAL;
- +
- + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- + CRYPTO_ALG_TYPE_AHASH_MASK |
- + crypto_requires_sync(algt->type, algt->mask));
- + if (IS_ERR(auth))
- + return PTR_ERR(auth);
- +
- + auth_base = &auth->base;
- +
- + enc_name = crypto_attr_alg_name(tb[2]);
- + err = PTR_ERR(enc_name);
- + if (IS_ERR(enc_name))
- + goto out_put_auth;
- +
- + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- + err = -ENOMEM;
- + if (!inst)
- + goto out_put_auth;
- +
- + ctx = aead_instance_ctx(inst);
- +
- + err = crypto_init_ahash_spawn(&ctx->auth, auth,
- + aead_crypto_instance(inst));
- + if (err)
- + goto err_free_inst;
- +
- + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
- + crypto_requires_sync(algt->type,
- + algt->mask));
- + if (err)
- + goto err_drop_auth;
- +
- + enc = crypto_spawn_skcipher_alg(&ctx->enc);
- +
- + err = -ENAMETOOLONG;
- + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- + "tls10(%s,%s)", auth_base->cra_name,
- + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
- + goto err_drop_enc;
- +
- + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- + "tls10(%s,%s)", auth_base->cra_driver_name,
- + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- + goto err_drop_enc;
- +
- + inst->alg.base.cra_flags = (auth_base->cra_flags |
- + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
- + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
- + auth_base->cra_priority;
- + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
- + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- + enc->base.cra_alignmask;
- + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
- +
- + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
- + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
- + inst->alg.maxauthsize = auth->digestsize;
- +
- + inst->alg.init = crypto_tls_init_tfm;
- + inst->alg.exit = crypto_tls_exit_tfm;
- +
- + inst->alg.setkey = crypto_tls_setkey;
- + inst->alg.encrypt = crypto_tls_encrypt;
- + inst->alg.decrypt = crypto_tls_decrypt;
- +
- + inst->free = crypto_tls_free;
- +
- + err = aead_register_instance(tmpl, inst);
- + if (err)
- + goto err_drop_enc;
- +
- +out:
- + crypto_mod_put(auth_base);
- + return err;
- +
- +err_drop_enc:
- + crypto_drop_skcipher(&ctx->enc);
- +err_drop_auth:
- + crypto_drop_ahash(&ctx->auth);
- +err_free_inst:
- + kfree(inst);
- +out_put_auth:
- + goto out;
- +}
- +
- +static struct crypto_template crypto_tls_tmpl = {
- + .name = "tls10",
- + .create = crypto_tls_create,
- + .module = THIS_MODULE,
- +};
- +
- +static int __init crypto_tls_module_init(void)
- +{
- + return crypto_register_template(&crypto_tls_tmpl);
- +}
- +
- +static void __exit crypto_tls_module_exit(void)
- +{
- + crypto_unregister_template(&crypto_tls_tmpl);
- +}
- +
- +module_init(crypto_tls_module_init);
- +module_exit(crypto_tls_module_exit);
- +
- +MODULE_LICENSE("GPL");
- +MODULE_DESCRIPTION("TLS 1.0 record encryption");
- --- a/drivers/crypto/Makefile
- +++ b/drivers/crypto/Makefile
- @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
- obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
- obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
- obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
- -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
- +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
- obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
- obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
- obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
- --- a/drivers/crypto/caam/Kconfig
- +++ b/drivers/crypto/caam/Kconfig
- @@ -1,7 +1,11 @@
- +config CRYPTO_DEV_FSL_CAAM_COMMON
- + tristate
- +
- config CRYPTO_DEV_FSL_CAAM
- - tristate "Freescale CAAM-Multicore driver backend"
- + tristate "Freescale CAAM-Multicore platform driver backend"
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
- select SOC_BUS
- + select CRYPTO_DEV_FSL_CAAM_COMMON
- help
- Enables the driver module for Freescale's Cryptographic Accelerator
- and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
- @@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
- To compile this driver as a module, choose M here: the module
- will be called caam.
-
- +if CRYPTO_DEV_FSL_CAAM
- +
- +config CRYPTO_DEV_FSL_CAAM_DEBUG
- + bool "Enable debug output in CAAM driver"
- + help
- + Selecting this will enable printing of various debug
- + information in the CAAM driver.
- +
- config CRYPTO_DEV_FSL_CAAM_JR
- tristate "Freescale CAAM Job Ring driver backend"
- - depends on CRYPTO_DEV_FSL_CAAM
- default y
- help
- Enables the driver module for Job Rings which are part of
- @@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
- To compile this driver as a module, choose M here: the module
- will be called caam_jr.
-
- +if CRYPTO_DEV_FSL_CAAM_JR
- +
- config CRYPTO_DEV_FSL_CAAM_RINGSIZE
- int "Job Ring size"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- range 2 9
- default "9"
- help
- @@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
-
- config CRYPTO_DEV_FSL_CAAM_INTC
- bool "Job Ring interrupt coalescing"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- help
- Enable the Job Ring's interrupt coalescing feature.
-
- @@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- default y
- select CRYPTO_AEAD
- select CRYPTO_AUTHENC
- @@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
- - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
- + depends on FSL_SDK_DPA && NET
- default y
- select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
- @@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
-
- config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- default y
- select CRYPTO_HASH
- help
- @@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
-
- config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- default y
- select CRYPTO_RSA
- help
- @@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
-
- config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
- - depends on CRYPTO_DEV_FSL_CAAM_JR
- default y
- select CRYPTO_RNG
- select HW_RANDOM
- @@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
- To compile this as a module, choose M here: the module
- will be called caamrng.
-
- -config CRYPTO_DEV_FSL_CAAM_DEBUG
- - bool "Enable debug output in CAAM driver"
- - depends on CRYPTO_DEV_FSL_CAAM
- - help
- - Selecting this will enable printing of various debug
- - information in the CAAM driver.
- +endif # CRYPTO_DEV_FSL_CAAM_JR
- +
- +endif # CRYPTO_DEV_FSL_CAAM
- +
- +config CRYPTO_DEV_FSL_DPAA2_CAAM
- + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
- + depends on FSL_MC_DPIO
- + select CRYPTO_DEV_FSL_CAAM_COMMON
- + select CRYPTO_BLKCIPHER
- + select CRYPTO_AUTHENC
- + select CRYPTO_AEAD
- + select CRYPTO_HASH
- + ---help---
- + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
- + It handles DPSECI DPAA2 objects that sit on the Management Complex
- + (MC) fsl-mc bus.
- +
- + To compile this as a module, choose M here: the module
- + will be called dpaa2_caam.
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
- + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
- + CRYPTO_DEV_FSL_DPAA2_CAAM)
- +
- +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
- + CRYPTO_DEV_FSL_DPAA2_CAAM)
- --- a/drivers/crypto/caam/Makefile
- +++ b/drivers/crypto/caam/Makefile
- @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
- ccflags-y := -DDEBUG
- endif
-
- +ccflags-y += -DVERSION=\"\"
- +
- +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
- +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
-
- caam-objs := ctrl.o
- -caam_jr-objs := jr.o key_gen.o error.o
- +caam_jr-objs := jr.o key_gen.o
- caam_pkc-y := caampkc.o pkc_desc.o
- ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
- endif
- +
- +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
- +
- +dpaa2_caam-y := caamalg_qi2.o dpseci.o
- --- a/drivers/crypto/caam/caamalg.c
- +++ b/drivers/crypto/caam/caamalg.c
- @@ -108,6 +108,7 @@ struct caam_ctx {
- dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
- dma_addr_t key_dma;
- + enum dma_data_direction dir;
- struct device *jrdev;
- struct alginfo adata;
- struct alginfo cdata;
- @@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
- ctx->adata.keylen_pad;
- @@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
- - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
- + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
- + ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
- @@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
- + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
- + ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
- unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 ctx1_iv_off = 0;
- u32 *desc, *nonce = NULL;
- u32 inl_mask;
- @@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- - false);
- + false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- skip_enc:
- /*
- @@ -266,9 +271,9 @@ skip_enc:
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, alg->caam.geniv, is_rfc3686,
- - nonce, ctx1_iv_off, false);
- + nonce, ctx1_iv_off, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- if (!alg->caam.geniv)
- goto skip_givenc;
- @@ -300,9 +305,9 @@ skip_enc:
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, is_rfc3686, nonce,
- - ctx1_iv_off, false);
- + ctx1_iv_off, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- skip_givenc:
- return 0;
- @@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
- @@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
- }
-
- desc = ctx->sh_desc_enc;
- - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
- @@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
- }
-
- desc = ctx->sh_desc_dec;
- - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
- @@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_enc;
- - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
- @@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_dec;
- - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
- @@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_enc;
- - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
- @@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_dec;
- - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
- + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- struct crypto_authenc_keys keys;
- int ret = 0;
-
- @@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-
- + /*
- + * If DKP is supported, use it in the shared descriptor to generate
- + * the split key.
- + */
- + if (ctrlpriv->era >= 6) {
- + ctx->adata.keylen = keys.authkeylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- +
- + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- + goto badkey;
- +
- + memcpy(ctx->key, keys.authkey, keys.authkeylen);
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
- + keys.enckeylen);
- + dma_sync_single_for_device(jrdev, ctx->key_dma,
- + ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- + goto skip_split_key;
- + }
- +
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
- keys.authkeylen, CAAM_MAX_KEY_SIZE -
- keys.enckeylen);
- @@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
- /* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- - keys.enckeylen, DMA_TO_DEVICE);
- + keys.enckeylen, ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
- #endif
- +
- +skip_split_key:
- ctx->cdata.keylen = keys.enckeylen;
- return aead_set_sh_desc(aead);
- badkey:
- @@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
- #endif
-
- memcpy(ctx->key, key, keylen);
- - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
- + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
- ctx->cdata.keylen = keylen;
-
- return gcm_set_sh_desc(aead);
- @@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
- */
- ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- - DMA_TO_DEVICE);
- + ctx->dir);
- return rfc4106_set_sh_desc(aead);
- }
-
- @@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
- */
- ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- - DMA_TO_DEVICE);
- + ctx->dir);
- return rfc4543_set_sh_desc(aead);
- }
-
- @@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- /* xts_ablkcipher_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
-
- return 0;
- }
- @@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
- append_seq_out_ptr(desc, dst_dma,
- req->assoclen + req->cryptlen - authsize,
- out_options);
- -
- - /* REG3 = assoclen */
- - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
- }
-
- static void init_gcm_job(struct aead_request *req,
- @@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
- unsigned int last;
-
- init_aead_job(req, edesc, all_contig, encrypt);
- + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
-
- /* BUG This should not be specific to generic GCM. */
- last = 0;
- @@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
- struct caam_aead_alg, aead);
- unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = alg->caam.rfc3686;
- @@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
-
- init_aead_job(req, edesc, all_contig, encrypt);
-
- + /*
- + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
- + * having DPOVRD as destination.
- + */
- + if (ctrlpriv->era < 3)
- + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
- + else
- + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
- +
- if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
- append_load_as_imm(desc, req->iv, ivsize,
- LDST_CLASS_1_CCB |
- @@ -3204,9 +3248,11 @@ struct caam_crypto_alg {
- struct caam_alg_entry caam;
- };
-
- -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
- +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
- + bool uses_dkp)
- {
- dma_addr_t dma_addr;
- + struct caam_drv_private *priv;
-
- ctx->jrdev = caam_jr_alloc();
- if (IS_ERR(ctx->jrdev)) {
- @@ -3214,10 +3260,16 @@ static int caam_init_common(struct caam_
- return PTR_ERR(ctx->jrdev);
- }
-
- + priv = dev_get_drvdata(ctx->jrdev->parent);
- + if (priv->era >= 6 && uses_dkp)
- + ctx->dir = DMA_BIDIRECTIONAL;
- + else
- + ctx->dir = DMA_TO_DEVICE;
- +
- dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
- offsetof(struct caam_ctx,
- sh_desc_enc_dma),
- - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(ctx->jrdev, dma_addr)) {
- dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
- caam_jr_free(ctx->jrdev);
- @@ -3245,7 +3297,7 @@ static int caam_cra_init(struct crypto_t
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
-
- - return caam_init_common(ctx, &caam_alg->caam);
- + return caam_init_common(ctx, &caam_alg->caam, false);
- }
-
- static int caam_aead_init(struct crypto_aead *tfm)
- @@ -3255,14 +3307,15 @@ static int caam_aead_init(struct crypto_
- container_of(alg, struct caam_aead_alg, aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
-
- - return caam_init_common(ctx, &caam_alg->caam);
- + return caam_init_common(ctx, &caam_alg->caam,
- + alg->setkey == aead_setkey);
- }
-
- static void caam_exit_common(struct caam_ctx *ctx)
- {
- dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
- offsetof(struct caam_ctx, sh_desc_enc_dma),
- - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- caam_jr_free(ctx->jrdev);
- }
-
- --- a/drivers/crypto/caam/caamalg_desc.c
- +++ b/drivers/crypto/caam/caamalg_desc.c
- @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
- * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
- * (non-protocol) with no (null) encryption.
- * @desc: pointer to buffer used for descriptor construction
- - * @adata: pointer to authentication transform definitions. Note that since a
- - * split key is to be used, the size of the split key itself is
- - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values - one of
- + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
- + * with OP_ALG_AAI_HMAC_PRECOMP.
- * @icvsize: integrity check value (ICV) size (truncated or full)
- - *
- - * Note: Requires an MDHA split key.
- + * @era: SEC Era
- */
- void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- - unsigned int icvsize)
- + unsigned int icvsize, int era)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
- @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- - if (adata->key_inline)
- - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- - KEY_ENC);
- - else
- - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + if (era < 6) {
- + if (adata->key_inline)
- + append_key_as_imm(desc, adata->key_virt,
- + adata->keylen_pad, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT |
- + KEY_ENC);
- + else
- + append_key(desc, adata->key_dma, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + } else {
- + append_proto_dkp(desc, adata);
- + }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
- * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
- * (non-protocol) with no (null) decryption.
- * @desc: pointer to buffer used for descriptor construction
- - * @adata: pointer to authentication transform definitions. Note that since a
- - * split key is to be used, the size of the split key itself is
- - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values - one of
- + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
- + * with OP_ALG_AAI_HMAC_PRECOMP.
- * @icvsize: integrity check value (ICV) size (truncated or full)
- - *
- - * Note: Requires an MDHA split key.
- + * @era: SEC Era
- */
- void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- - unsigned int icvsize)
- + unsigned int icvsize, int era)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
-
- @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- - if (adata->key_inline)
- - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- - adata->keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- - else
- - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + if (era < 6) {
- + if (adata->key_inline)
- + append_key_as_imm(desc, adata->key_virt,
- + adata->keylen_pad, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT |
- + KEY_ENC);
- + else
- + append_key(desc, adata->key_dma, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + } else {
- + append_proto_dkp(desc, adata);
- + }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
- static void init_sh_desc_key_aead(u32 * const desc,
- struct alginfo * const cdata,
- struct alginfo * const adata,
- - const bool is_rfc3686, u32 *nonce)
- + const bool is_rfc3686, u32 *nonce, int era)
- {
- u32 *key_jump_cmd;
- unsigned int enckeylen = cdata->keylen;
- @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- - if (adata->key_inline)
- - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- - adata->keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- - else
- - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + if (era < 6) {
- + if (adata->key_inline)
- + append_key_as_imm(desc, adata->key_virt,
- + adata->keylen_pad, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT |
- + KEY_ENC);
- + else
- + append_key(desc, adata->key_dma, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + } else {
- + append_proto_dkp(desc, adata);
- + }
-
- if (cdata->key_inline)
- append_key_as_imm(desc, cdata->key_virt, enckeylen,
- @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- - * @adata: pointer to authentication transform definitions. Note that since a
- - * split key is to be used, the size of the split key itself is
- - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values - one of
- + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
- + * with OP_ALG_AAI_HMAC_PRECOMP.
- * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
- - *
- - * Note: Requires an MDHA split key.
- + * @era: SEC Era
- */
- void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
- + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
- + int era)
- {
- /* Note: Context registers are saved. */
- - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
- + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- /* Class 2 operation */
- append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
- @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
- }
-
- /* Read and write assoclen bytes */
- - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- + if (is_qi || era < 3) {
- + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- + } else {
- + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
- + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
- + }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
- @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- - * @adata: pointer to authentication transform definitions. Note that since a
- - * split key is to be used, the size of the split key itself is
- - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values - one of
- + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
- + * with OP_ALG_AAI_HMAC_PRECOMP.
- * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
- - *
- - * Note: Requires an MDHA split key.
- + * @era: SEC Era
- */
- void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool geniv,
- const bool is_rfc3686, u32 *nonce,
- - const u32 ctx1_iv_off, const bool is_qi)
- + const u32 ctx1_iv_off, const bool is_qi, int era)
- {
- /* Note: Context registers are saved. */
- - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
- + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- /* Class 2 operation */
- append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
- @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
- }
-
- /* Read and write assoclen bytes */
- - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- - if (geniv)
- - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- - else
- - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- + if (is_qi || era < 3) {
- + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- + if (geniv)
- + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
- + ivsize);
- + else
- + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
- + CAAM_CMD_SZ);
- + } else {
- + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
- + if (geniv)
- + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
- + ivsize);
- + else
- + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
- + CAAM_CMD_SZ);
- + }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
- @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- - * @adata: pointer to authentication transform definitions. Note that since a
- - * split key is to be used, the size of the split key itself is
- - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values - one of
- + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
- + * with OP_ALG_AAI_HMAC_PRECOMP.
- * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
- - *
- - * Note: Requires an MDHA split key.
- + * @era: SEC Era
- */
- void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
- - const bool is_qi)
- + const bool is_qi, int era)
- {
- u32 geniv, moveiv;
-
- /* Note: Context registers are saved. */
- - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
- + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- if (is_qi) {
- u32 *wait_load_cmd;
- @@ -528,8 +561,13 @@ copy_iv:
- OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- + if (is_qi || era < 3) {
- + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- + } else {
- + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
- + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
- + }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
- @@ -583,14 +621,431 @@ copy_iv:
- EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
-
- /**
- + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
- + * @desc: pointer to buffer used for descriptor construction
- + * @cdata: pointer to block cipher transform definitions
- + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
- + * with OP_ALG_AAI_CBC
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
- + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @assoclen: associated data length
- + * @ivsize: initialization vector size
- + * @authsize: authentication data size
- + * @blocksize: block cipher size
- + * @era: SEC Era
- + */
- +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
- + struct alginfo *adata, unsigned int assoclen,
- + unsigned int ivsize, unsigned int authsize,
- + unsigned int blocksize, int era)
- +{
- + u32 *key_jump_cmd, *zero_payload_jump_cmd;
- + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
- +
- + /*
- + * Compute the index (in bytes) for the LOAD with destination of
- + * Class 1 Data Size Register and for the LOAD that generates padding
- + */
- + if (adata->key_inline) {
- + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
- + cdata->keylen - 4 * CAAM_CMD_SZ;
- + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
- + cdata->keylen - 2 * CAAM_CMD_SZ;
- + } else {
- + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
- + 4 * CAAM_CMD_SZ;
- + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
- + 2 * CAAM_CMD_SZ;
- + }
- +
- + stidx = 1 << HDR_START_IDX_SHIFT;
- + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
- +
- + /* skip key loading if they are loaded due to sharing */
- + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_SHRD);
- +
- + if (era < 6) {
- + if (adata->key_inline)
- + append_key_as_imm(desc, adata->key_virt,
- + adata->keylen_pad, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT |
- + KEY_ENC);
- + else
- + append_key(desc, adata->key_dma, adata->keylen,
- + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + } else {
- + append_proto_dkp(desc, adata);
- + }
- +
- + if (cdata->key_inline)
- + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
- + else
- + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
- + KEY_DEST_CLASS_REG);
- +
- + set_jump_tgt_here(desc, key_jump_cmd);
- +
- + /* class 2 operation */
- + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
- + OP_ALG_ENCRYPT);
- + /* class 1 operation */
- + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- + OP_ALG_ENCRYPT);
- +
- + /* payloadlen = input data length - (assoclen + ivlen) */
- + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
- +
- + /* math1 = payloadlen + icvlen */
- + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
- +
- + /* padlen = block_size - math1 % block_size */
- + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
- + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
- +
- + /* cryptlen = payloadlen + icvlen + padlen */
- + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
- +
- + /*
- + * update immediate data with the padding length value
- + * for the LOAD in the class 1 data size register.
- + */
- + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
- + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
- + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
- +
- + /* overwrite PL field for the padding iNFO FIFO entry */
- + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
- + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
- + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
- +
- + /* store encrypted payload, icv and padding */
- + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
- +
- + /* if payload length is zero, jump to zero-payload commands */
- + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
- + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- + JUMP_COND_MATH_Z);
- +
- + /* load iv in context1 */
- + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
- + LDST_CLASS_1_CCB | ivsize);
- +
- + /* read assoc for authentication */
- + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
- + FIFOLD_TYPE_MSG);
- + /* insnoop payload */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
- + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
- +
- + /* jump the zero-payload commands */
- + append_jump(desc, JUMP_TEST_ALL | 3);
- +
- + /* zero-payload commands */
- + set_jump_tgt_here(desc, zero_payload_jump_cmd);
- +
- + /* load iv in context1 */
- + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
- + LDST_CLASS_1_CCB | ivsize);
- +
- + /* assoc data is the only data for authentication */
- + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
- + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
- +
- + /* send icv to encryption */
- + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
- + authsize);
- +
- + /* update class 1 data size register with padding length */
- + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
- + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
- +
- + /* generate padding and send it to encryption */
- + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
- + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
- + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
- + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc,
- + desc_bytes(desc), 1);
- +#endif
- +}
- +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
- +
- +/**
- + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
- + * @desc: pointer to buffer used for descriptor construction
- + * @cdata: pointer to block cipher transform definitions
- + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
- + * with OP_ALG_AAI_CBC
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
- + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
- + * @assoclen: associated data length
- + * @ivsize: initialization vector size
- + * @authsize: authentication data size
- + * @blocksize: block cipher size
- + * @era: SEC Era
- + */
- +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
- + struct alginfo *adata, unsigned int assoclen,
- + unsigned int ivsize, unsigned int authsize,
- + unsigned int blocksize, int era)
- +{
- + u32 stidx, jumpback;
- + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
- + /*
- + * Pointer Size bool determines the size of address pointers.
- + * false - Pointers fit in one 32-bit word.
- + * true - Pointers fit in two 32-bit words.
- + */
- + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
- +
- + stidx = 1 << HDR_START_IDX_SHIFT;
- + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
- +
- + /* skip key loading if they are loaded due to sharing */
- + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_SHRD);
- +
- + if (era < 6)
- + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- + KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + else
- + append_proto_dkp(desc, adata);
- +
- + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
- + KEY_DEST_CLASS_REG);
- +
- + set_jump_tgt_here(desc, key_jump_cmd);
- +
- + /* class 2 operation */
- + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
- + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- + /* class 1 operation */
- + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- + OP_ALG_DECRYPT);
- +
- + /* VSIL = input data length - 2 * block_size */
- + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
- + blocksize);
- +
- + /*
- + * payloadlen + icvlen + padlen = input data length - (assoclen +
- + * ivsize)
- + */
- + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
- +
- + /* skip data to the last but one cipher block */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
- +
- + /* load iv for the last cipher block */
- + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
- + LDST_CLASS_1_CCB | ivsize);
- +
- + /* read last cipher block */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
- + FIFOLD_TYPE_LAST1 | blocksize);
- +
- + /* move decrypted block into math0 and math1 */
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
- + blocksize);
- +
- + /* reset AES CHA */
- + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
- + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
- +
- + /* rewind input sequence */
- + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
- +
- + /* key1 is in decryption form */
- + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
- + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- +
- + /* load iv in context1 */
- + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
- + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
- +
- + /* read sequence number */
- + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
- + /* load Type, Version and Len fields in math0 */
- + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
- + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
- +
- + /* compute (padlen - 1) */
- + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
- +
- + /* math2 = icvlen + (padlen - 1) + 1 */
- + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
- +
- + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
- +
- + /* VSOL = payloadlen + icvlen + padlen */
- + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
- +
- + if (caam_little_end)
- + append_moveb(desc, MOVE_WAITCOMP |
- + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
- +
- + /* update Len field */
- + append_math_sub(desc, REG0, REG0, REG2, 8);
- +
- + /* store decrypted payload, icv and padding */
- + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
- +
- + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
- + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
- +
- + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- + JUMP_COND_MATH_Z);
- +
- + /* send Type, Version and Len(pre ICV) fields to authentication */
- + append_move(desc, MOVE_WAITCOMP |
- + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
- + (3 << MOVE_OFFSET_SHIFT) | 5);
- +
- + /* outsnooping payload */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
- + FIFOLDST_VLF);
- + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
- +
- + set_jump_tgt_here(desc, zero_payload_jump_cmd);
- + /* send Type, Version and Len(pre ICV) fields to authentication */
- + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
- + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
- + (3 << MOVE_OFFSET_SHIFT) | 5);
- +
- + set_jump_tgt_here(desc, skip_zero_jump_cmd);
- + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
- +
- + /* load icvlen and padlen */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
- + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
- +
- + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
- + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
- +
- + /*
- + * Start a new input sequence using the SEQ OUT PTR command options,
- + * pointer and length used when the current output sequence was defined.
- + */
- + if (ps) {
- + /*
- + * Move the lower 32 bits of Shared Descriptor address, the
- + * SEQ OUT PTR command, Output Pointer (2 words) and
- + * Output Length into math registers.
- + */
- + if (caam_little_end)
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
- + MOVE_DEST_MATH0 |
- + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
- + else
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
- + MOVE_DEST_MATH0 |
- + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
- +
- + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
- + append_math_and_imm_u32(desc, REG0, REG0, IMM,
- + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
- + /* Append a JUMP command after the copied fields */
- + jumpback = CMD_JUMP | (char)-9;
- + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
- + LDST_SRCDST_WORD_DECO_MATH2 |
- + (4 << LDST_OFFSET_SHIFT));
- + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
- + /* Move the updated fields back to the Job Descriptor */
- + if (caam_little_end)
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
- + MOVE_DEST_DESCBUF |
- + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
- + else
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
- + MOVE_DEST_DESCBUF |
- + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
- +
- + /*
- + * Read the new SEQ IN PTR command, Input Pointer, Input Length
- + * and then jump back to the next command from the
- + * Shared Descriptor.
- + */
- + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
- + } else {
- + /*
- + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
- + * Output Length into math registers.
- + */
- + if (caam_little_end)
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
- + MOVE_DEST_MATH0 |
- + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
- + else
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
- + MOVE_DEST_MATH0 |
- + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
- +
- + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
- + append_math_and_imm_u64(desc, REG0, REG0, IMM,
- + ~(((u64)(CMD_SEQ_IN_PTR ^
- + CMD_SEQ_OUT_PTR)) << 32));
- + /* Append a JUMP command after the copied fields */
- + jumpback = CMD_JUMP | (char)-7;
- + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
- + LDST_SRCDST_WORD_DECO_MATH1 |
- + (4 << LDST_OFFSET_SHIFT));
- + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
- + /* Move the updated fields back to the Job Descriptor */
- + if (caam_little_end)
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
- + MOVE_DEST_DESCBUF |
- + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
- + else
- + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
- + MOVE_DEST_DESCBUF |
- + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
- +
- + /*
- + * Read the new SEQ IN PTR command, Input Pointer, Input Length
- + * and then jump back to the next command from the
- + * Shared Descriptor.
- + */
- + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
- + }
- +
- + /* skip payload */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
- + /* check icv */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
- + FIFOLD_TYPE_LAST2 | authsize);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc,
- + desc_bytes(desc), 1);
- +#endif
- +}
- +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
- +
- +/**
- * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
- *zero_assoc_jump_cmd2;
- @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- + if (is_qi) {
- + u32 *wait_load_cmd;
- +
- + /* REG3 = assoclen */
- + append_seq_load(desc, 4, LDST_CLASS_DECO |
- + LDST_SRCDST_WORD_DECO_MATH3 |
- + (4 << LDST_OFFSET_SHIFT));
- +
- + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_CALM | JUMP_COND_NCP |
- + JUMP_COND_NOP | JUMP_COND_NIP |
- + JUMP_COND_NIFP);
- + set_jump_tgt_here(desc, wait_load_cmd);
- +
- + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
- + ivsize);
- + } else {
- + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
- + CAAM_CMD_SZ);
- + }
- +
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- + if (is_qi)
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- +
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- - /* jump the zero-payload commands */
- - append_jump(desc, JUMP_TEST_ALL | 2);
- + /* jump to ICV writing */
- + if (is_qi)
- + append_jump(desc, JUMP_TEST_ALL | 4);
- + else
- + append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
- @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
- + if (is_qi)
- + /* jump to ICV writing */
- + append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- + if (is_qi)
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
- + FIFOLD_TYPE_LAST1);
- +
- /* write ICV */
- append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
- @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
-
- @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- + if (is_qi) {
- + u32 *wait_load_cmd;
- +
- + /* REG3 = assoclen */
- + append_seq_load(desc, 4, LDST_CLASS_DECO |
- + LDST_SRCDST_WORD_DECO_MATH3 |
- + (4 << LDST_OFFSET_SHIFT));
- +
- + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_CALM | JUMP_COND_NCP |
- + JUMP_COND_NOP | JUMP_COND_NIP |
- + JUMP_COND_NIFP);
- + set_jump_tgt_here(desc, wait_load_cmd);
- +
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- + }
- +
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd;
-
- @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- + if (is_qi) {
- + u32 *wait_load_cmd;
- +
- + /* REG3 = assoclen */
- + append_seq_load(desc, 4, LDST_CLASS_DECO |
- + LDST_SRCDST_WORD_DECO_MATH3 |
- + (4 << LDST_OFFSET_SHIFT));
- +
- + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_CALM | JUMP_COND_NCP |
- + JUMP_COND_NOP | JUMP_COND_NIP |
- + JUMP_COND_NIFP);
- + set_jump_tgt_here(desc, wait_load_cmd);
- +
- + /* Read salt and IV */
- + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
- + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- + }
- +
- + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd;
-
- @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- + if (is_qi) {
- + u32 *wait_load_cmd;
- +
- + /* REG3 = assoclen */
- + append_seq_load(desc, 4, LDST_CLASS_DECO |
- + LDST_SRCDST_WORD_DECO_MATH3 |
- + (4 << LDST_OFFSET_SHIFT));
- +
- + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_CALM | JUMP_COND_NCP |
- + JUMP_COND_NOP | JUMP_COND_NIP |
- + JUMP_COND_NIFP);
- + set_jump_tgt_here(desc, wait_load_cmd);
- +
- + /* Read salt and IV */
- + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
- + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- + }
- +
- + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
- @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
- @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- + if (is_qi) {
- + /* assoclen is not needed, skip it */
- + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
- +
- + /* Read salt and IV */
- + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
- + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- + }
- +
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
- + * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- + * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize)
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
- @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- + if (is_qi) {
- + /* assoclen is not needed, skip it */
- + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
- +
- + /* Read salt and IV */
- + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
- + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV);
- + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
- + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
- + }
- +
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- @@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- - u8 *nonce = cdata->key_virt + cdata->keylen;
- + const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- @@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- - u8 *nonce = cdata->key_virt + cdata->keylen;
- + const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- @@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- - u8 *nonce = cdata->key_virt + cdata->keylen;
- + const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- --- a/drivers/crypto/caam/caamalg_desc.h
- +++ b/drivers/crypto/caam/caamalg_desc.h
- @@ -17,6 +17,9 @@
- #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
- #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
-
- +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
- +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
- +
- /* Note: Nonce is counted in cdata.keylen */
- #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
- @@ -27,14 +30,20 @@
- #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
- #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
- #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
- +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
- +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
-
- #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
- #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
- #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
- +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
- +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
-
- #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
- #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
- #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
- +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
- +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-
- #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
- #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- @@ -43,46 +52,62 @@
- 15 * CAAM_CMD_SZ)
-
- void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- - unsigned int icvsize);
- + unsigned int icvsize, int era);
-
- void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- - unsigned int icvsize);
- + unsigned int icvsize, int era);
-
- void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
- - const bool is_qi);
- + const bool is_qi, int era);
-
- void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool geniv,
- const bool is_rfc3686, u32 *nonce,
- - const u32 ctx1_iv_off, const bool is_qi);
- + const u32 ctx1_iv_off, const bool is_qi, int era);
-
- void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
- - const bool is_qi);
- + const bool is_qi, int era);
- +
- +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
- + struct alginfo *adata, unsigned int assoclen,
- + unsigned int ivsize, unsigned int authsize,
- + unsigned int blocksize, int era);
- +
- +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
- + struct alginfo *adata, unsigned int assoclen,
- + unsigned int ivsize, unsigned int authsize,
- + unsigned int blocksize, int era);
-
- void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- - unsigned int icvsize);
- + unsigned int ivsize, unsigned int icvsize,
- + const bool is_qi);
-
- void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- --- a/drivers/crypto/caam/caamalg_qi.c
- +++ b/drivers/crypto/caam/caamalg_qi.c
- @@ -7,7 +7,7 @@
- */
-
- #include "compat.h"
- -
- +#include "ctrl.h"
- #include "regs.h"
- #include "intern.h"
- #include "desc_constr.h"
- @@ -53,6 +53,7 @@ struct caam_ctx {
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
- u8 key[CAAM_MAX_KEY_SIZE];
- dma_addr_t key_dma;
- + enum dma_data_direction dir;
- struct alginfo adata;
- struct alginfo cdata;
- unsigned int authsize;
- @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = alg->caam.rfc3686;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
-
- if (!ctx->cdata.keylen || !ctx->authsize)
- return 0;
- @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
-
- cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, is_rfc3686, nonce,
- - ctx1_iv_off, true);
- + ctx1_iv_off, true, ctrlpriv->era);
-
- skip_enc:
- /* aead_decrypt shared descriptor */
- @@ -149,7 +151,8 @@ skip_enc:
-
- cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, alg->caam.geniv,
- - is_rfc3686, nonce, ctx1_iv_off, true);
- + is_rfc3686, nonce, ctx1_iv_off, true,
- + ctrlpriv->era);
-
- if (!alg->caam.geniv)
- goto skip_givenc;
- @@ -176,7 +179,7 @@ skip_enc:
-
- cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, is_rfc3686, nonce,
- - ctx1_iv_off, true);
- + ctx1_iv_off, true, ctrlpriv->era);
-
- skip_givenc:
- return 0;
- @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- struct crypto_authenc_keys keys;
- int ret = 0;
-
- @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-
- + /*
- + * If DKP is supported, use it in the shared descriptor to generate
- + * the split key.
- + */
- + if (ctrlpriv->era >= 6) {
- + ctx->adata.keylen = keys.authkeylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- +
- + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- + goto badkey;
- +
- + memcpy(ctx->key, keys.authkey, keys.authkeylen);
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
- + keys.enckeylen);
- + dma_sync_single_for_device(jrdev, ctx->key_dma,
- + ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- + goto skip_split_key;
- + }
- +
- ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
- keys.authkeylen, CAAM_MAX_KEY_SIZE -
- keys.enckeylen);
- @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
- /* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- - keys.enckeylen, DMA_TO_DEVICE);
- + keys.enckeylen, ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
- #endif
-
- +skip_split_key:
- ctx->cdata.keylen = keys.enckeylen;
-
- ret = aead_set_sh_desc(aead);
- @@ -258,6 +284,468 @@ badkey:
- return -EINVAL;
- }
-
- +static int tls_set_sh_desc(struct crypto_aead *tls)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + unsigned int ivsize = crypto_aead_ivsize(tls);
- + unsigned int blocksize = crypto_aead_blocksize(tls);
- + unsigned int assoclen = 13; /* always 13 bytes for TLS */
- + unsigned int data_len[2];
- + u32 inl_mask;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + /*
- + * TLS 1.0 encrypt shared descriptor
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + data_len[0] = ctx->adata.keylen_pad;
- + data_len[1] = ctx->cdata.keylen;
- +
- + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
- + &inl_mask, ARRAY_SIZE(data_len)) < 0)
- + return -EINVAL;
- +
- + if (inl_mask & 1)
- + ctx->adata.key_virt = ctx->key;
- + else
- + ctx->adata.key_dma = ctx->key_dma;
- +
- + if (inl_mask & 2)
- + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- + else
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + ctx->adata.key_inline = !!(inl_mask & 1);
- + ctx->cdata.key_inline = !!(inl_mask & 2);
- +
- + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
- + assoclen, ivsize, ctx->authsize, blocksize,
- + ctrlpriv->era);
- +
- + /*
- + * TLS 1.0 decrypt shared descriptor
- + * Keys do not fit inline, regardless of algorithms used
- + */
- + ctx->adata.key_inline = false;
- + ctx->adata.key_dma = ctx->key_dma;
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
- + assoclen, ivsize, ctx->authsize, blocksize,
- + ctrlpriv->era);
- +
- + return 0;
- +}
- +
- +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- +
- + ctx->authsize = authsize;
- + tls_set_sh_desc(tls);
- +
- + return 0;
- +}
- +
- +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- + struct crypto_authenc_keys keys;
- + int ret = 0;
- +
- + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- + goto badkey;
- +
- +#ifdef DEBUG
- + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
- + keys.authkeylen + keys.enckeylen, keys.enckeylen,
- + keys.authkeylen);
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + /*
- + * If DKP is supported, use it in the shared descriptor to generate
- + * the split key.
- + */
- + if (ctrlpriv->era >= 6) {
- + ctx->adata.keylen = keys.authkeylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- +
- + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- + goto badkey;
- +
- + memcpy(ctx->key, keys.authkey, keys.authkeylen);
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
- + keys.enckeylen);
- + dma_sync_single_for_device(jrdev, ctx->key_dma,
- + ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- + goto skip_split_key;
- + }
- +
- + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
- + keys.authkeylen, CAAM_MAX_KEY_SIZE -
- + keys.enckeylen);
- + if (ret)
- + goto badkey;
- +
- + /* postpend encryption key to auth split key */
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- +
- +#ifdef DEBUG
- + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
- + ctx->adata.keylen, ctx->adata.keylen_pad);
- + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- + ctx->adata.keylen_pad + keys.enckeylen, 1);
- +#endif
- +
- +skip_split_key:
- + ctx->cdata.keylen = keys.enckeylen;
- +
- + ret = tls_set_sh_desc(tls);
- + if (ret)
- + goto badkey;
- +
- + /* Now update the driver contexts with the new shared descriptor */
- + if (ctx->drv_ctx[ENCRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
- + ctx->sh_desc_enc);
- + if (ret) {
- + dev_err(jrdev, "driver enc context update failed\n");
- + goto badkey;
- + }
- + }
- +
- + if (ctx->drv_ctx[DECRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
- + ctx->sh_desc_dec);
- + if (ret) {
- + dev_err(jrdev, "driver dec context update failed\n");
- + goto badkey;
- + }
- + }
- +
- + return ret;
- +badkey:
- + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + return -EINVAL;
- +}
- +
- +static int gcm_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + ctx->cdata.key_virt = ctx->key;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + ctx->cdata.key_virt = ctx->key;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + return 0;
- +}
- +
- +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + gcm_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int gcm_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *jrdev = ctx->jrdev;
- + int ret;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
- + ctx->cdata.keylen = keylen;
- +
- + ret = gcm_set_sh_desc(aead);
- + if (ret)
- + return ret;
- +
- + /* Now update the driver contexts with the new shared descriptor */
- + if (ctx->drv_ctx[ENCRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
- + ctx->sh_desc_enc);
- + if (ret) {
- + dev_err(jrdev, "driver enc context update failed\n");
- + return ret;
- + }
- + }
- +
- + if (ctx->drv_ctx[DECRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
- + ctx->sh_desc_dec);
- + if (ret) {
- + dev_err(jrdev, "driver dec context update failed\n");
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + ctx->cdata.key_virt = ctx->key;
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + return 0;
- +}
- +
- +static int rfc4106_setauthsize(struct crypto_aead *authenc,
- + unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + rfc4106_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int rfc4106_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *jrdev = ctx->jrdev;
- + int ret;
- +
- + if (keylen < 4)
- + return -EINVAL;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + /*
- + * The last four bytes of the key material are used as the salt value
- + * in the nonce. Update the AES key length.
- + */
- + ctx->cdata.keylen = keylen - 4;
- + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- + ctx->dir);
- +
- + ret = rfc4106_set_sh_desc(aead);
- + if (ret)
- + return ret;
- +
- + /* Now update the driver contexts with the new shared descriptor */
- + if (ctx->drv_ctx[ENCRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
- + ctx->sh_desc_enc);
- + if (ret) {
- + dev_err(jrdev, "driver enc context update failed\n");
- + return ret;
- + }
- + }
- +
- + if (ctx->drv_ctx[DECRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
- + ctx->sh_desc_dec);
- + if (ret) {
- + dev_err(jrdev, "driver dec context update failed\n");
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + ctx->cdata.key_virt = ctx->key;
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + /*
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- + ctx->authsize, true);
- +
- + return 0;
- +}
- +
- +static int rfc4543_setauthsize(struct crypto_aead *authenc,
- + unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + rfc4543_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int rfc4543_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *jrdev = ctx->jrdev;
- + int ret;
- +
- + if (keylen < 4)
- + return -EINVAL;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + /*
- + * The last four bytes of the key material are used as the salt value
- + * in the nonce. Update the AES key length.
- + */
- + ctx->cdata.keylen = keylen - 4;
- + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- + ctx->dir);
- +
- + ret = rfc4543_set_sh_desc(aead);
- + if (ret)
- + return ret;
- +
- + /* Now update the driver contexts with the new shared descriptor */
- + if (ctx->drv_ctx[ENCRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
- + ctx->sh_desc_enc);
- + if (ret) {
- + dev_err(jrdev, "driver enc context update failed\n");
- + return ret;
- + }
- + }
- +
- + if (ctx->drv_ctx[DECRYPT]) {
- + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
- + ctx->sh_desc_dec);
- + if (ret) {
- + dev_err(jrdev, "driver dec context update failed\n");
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
- {
- @@ -414,6 +902,29 @@ struct aead_edesc {
- };
-
- /*
- + * tls_edesc - s/w-extended tls descriptor
- + * @src_nents: number of segments in input scatterlist
- + * @dst_nents: number of segments in output scatterlist
- + * @iv_dma: dma address of iv for checking continuity and link table
- + * @qm_sg_bytes: length of dma mapped h/w link table
- + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
- + * @qm_sg_dma: bus physical mapped address of h/w link table
- + * @drv_req: driver-specific request structure
- + * @sgt: the h/w link table, followed by IV
- + */
- +struct tls_edesc {
- + int src_nents;
- + int dst_nents;
- + dma_addr_t iv_dma;
- + int qm_sg_bytes;
- + dma_addr_t qm_sg_dma;
- + struct scatterlist tmp[2];
- + struct scatterlist *dst;
- + struct caam_drv_req drv_req;
- + struct qm_sg_entry sgt[0];
- +};
- +
- +/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
- @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- }
-
- +static void tls_unmap(struct device *dev,
- + struct tls_edesc *edesc,
- + struct aead_request *req)
- +{
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + int ivsize = crypto_aead_ivsize(aead);
- +
- + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
- + edesc->dst_nents, edesc->iv_dma, ivsize,
- + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
- + edesc->qm_sg_bytes);
- +}
- +
- static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
- @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
- qidev = caam_ctx->qidev;
-
- if (unlikely(status)) {
- + u32 ssrc = status & JRSTA_SSRC_MASK;
- + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
- +
- caam_jr_strstatus(qidev, status);
- - ecode = -EIO;
- + /*
- + * verify hw auth check passed else return -EBADMSG
- + */
- + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
- + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
- + ecode = -EBADMSG;
- + else
- + ecode = -EIO;
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
- @@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ
- return aead_crypt(req, false);
- }
-
- +static int ipsec_gcm_encrypt(struct aead_request *req)
- +{
- + if (req->assoclen < 8)
- + return -EINVAL;
- +
- + return aead_crypt(req, true);
- +}
- +
- +static int ipsec_gcm_decrypt(struct aead_request *req)
- +{
- + if (req->assoclen < 8)
- + return -EINVAL;
- +
- + return aead_crypt(req, false);
- +}
- +
- +static void tls_done(struct caam_drv_req *drv_req, u32 status)
- +{
- + struct device *qidev;
- + struct tls_edesc *edesc;
- + struct aead_request *aead_req = drv_req->app_ctx;
- + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
- + int ecode = 0;
- +
- + qidev = caam_ctx->qidev;
- +
- + if (unlikely(status)) {
- + caam_jr_strstatus(qidev, status);
- + ecode = -EIO;
- + }
- +
- + edesc = container_of(drv_req, typeof(*edesc), drv_req);
- + tls_unmap(qidev, edesc, aead_req);
- +
- + aead_request_complete(aead_req, ecode);
- + qi_cache_free(edesc);
- +}
- +
- +/*
- + * allocate and map the tls extended descriptor
- + */
- +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
- +{
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + unsigned int blocksize = crypto_aead_blocksize(aead);
- + unsigned int padsize, authsize;
- + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- + typeof(*alg), aead);
- + struct device *qidev = ctx->qidev;
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- + struct tls_edesc *edesc;
- + dma_addr_t qm_sg_dma, iv_dma = 0;
- + int ivsize = 0;
- + u8 *iv;
- + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
- + int in_len, out_len;
- + struct qm_sg_entry *sg_table, *fd_sgt;
- + struct caam_drv_ctx *drv_ctx;
- + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- + struct scatterlist *dst;
- +
- + if (encrypt) {
- + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
- + blocksize);
- + authsize = ctx->authsize + padsize;
- + } else {
- + authsize = ctx->authsize;
- + }
- +
- + drv_ctx = get_drv_ctx(ctx, op_type);
- + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- + return (struct tls_edesc *)drv_ctx;
- +
- + /* allocate space for base edesc, link tables and IV */
- + edesc = qi_cache_alloc(GFP_DMA | flags);
- + if (unlikely(!edesc)) {
- + dev_err(qidev, "could not allocate extended descriptor\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + if (likely(req->src == req->dst)) {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen +
- + (encrypt ? authsize : 0));
- + if (unlikely(src_nents < 0)) {
- + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen +
- + (encrypt ? authsize : 0));
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(qidev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + dst = req->dst;
- + } else {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen);
- + if (unlikely(src_nents < 0)) {
- + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen);
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
- + dst_nents = sg_nents_for_len(dst, req->cryptlen +
- + (encrypt ? authsize : 0));
- + if (unlikely(dst_nents < 0)) {
- + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- + req->cryptlen +
- + (encrypt ? authsize : 0));
- + qi_cache_free(edesc);
- + return ERR_PTR(dst_nents);
- + }
- +
- + if (src_nents) {
- + mapped_src_nents = dma_map_sg(qidev, req->src,
- + src_nents, DMA_TO_DEVICE);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(qidev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + } else {
- + mapped_src_nents = 0;
- + }
- +
- + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
- + DMA_FROM_DEVICE);
- + if (unlikely(!mapped_dst_nents)) {
- + dev_err(qidev, "unable to map destination\n");
- + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- +
- + /*
- + * Create S/G table: IV, src, dst.
- + * Input is not contiguous.
- + */
- + qm_sg_ents = 1 + mapped_src_nents +
- + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- + sg_table = &edesc->sgt[0];
- + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
- +
- + ivsize = crypto_aead_ivsize(aead);
- + iv = (u8 *)(sg_table + qm_sg_ents);
- + /* Make sure IV is located in a DMAable area */
- + memcpy(iv, req->iv, ivsize);
- + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
- + if (dma_mapping_error(qidev, iv_dma)) {
- + dev_err(qidev, "unable to map IV\n");
- + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
- + 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->src_nents = src_nents;
- + edesc->dst_nents = dst_nents;
- + edesc->dst = dst;
- + edesc->iv_dma = iv_dma;
- + edesc->drv_req.app_ctx = req;
- + edesc->drv_req.cbk = tls_done;
- + edesc->drv_req.drv_ctx = drv_ctx;
- +
- + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- + qm_sg_index = 1;
- +
- + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
- + qm_sg_index += mapped_src_nents;
- +
- + if (mapped_dst_nents > 1)
- + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
- + qm_sg_index, 0);
- +
- + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(qidev, qm_sg_dma)) {
- + dev_err(qidev, "unable to map S/G table\n");
- + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
- + ivsize, op_type, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->qm_sg_dma = qm_sg_dma;
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + out_len = req->cryptlen + (encrypt ? authsize : 0);
- + in_len = ivsize + req->assoclen + req->cryptlen;
- +
- + fd_sgt = &edesc->drv_req.fd_sgt[0];
- +
- + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
- +
- + if (req->dst == req->src)
- + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
- + (sg_nents_for_len(req->src, req->assoclen) +
- + 1) * sizeof(*sg_table), out_len, 0);
- + else if (mapped_dst_nents == 1)
- + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
- + else
- + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
- + qm_sg_index, out_len, 0);
- +
- + return edesc;
- +}
- +
- +static int tls_crypt(struct aead_request *req, bool encrypt)
- +{
- + struct tls_edesc *edesc;
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + int ret;
- +
- + if (unlikely(caam_congested))
- + return -EAGAIN;
- +
- + edesc = tls_edesc_alloc(req, encrypt);
- + if (IS_ERR_OR_NULL(edesc))
- + return PTR_ERR(edesc);
- +
- + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- + if (!ret) {
- + ret = -EINPROGRESS;
- + } else {
- + tls_unmap(ctx->qidev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int tls_encrypt(struct aead_request *req)
- +{
- + return tls_crypt(req, true);
- +}
- +
- +static int tls_decrypt(struct aead_request *req)
- +{
- + return tls_crypt(req, false);
- +}
- +
- static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct ablkcipher_edesc *edesc;
- @@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a
- };
-
- static struct caam_aead_alg driver_aeads[] = {
- + {
- + .aead = {
- + .base = {
- + .cra_name = "rfc4106(gcm(aes))",
- + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
- + .cra_blocksize = 1,
- + },
- + .setkey = rfc4106_setkey,
- + .setauthsize = rfc4106_setauthsize,
- + .encrypt = ipsec_gcm_encrypt,
- + .decrypt = ipsec_gcm_decrypt,
- + .ivsize = 8,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "rfc4543(gcm(aes))",
- + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
- + .cra_blocksize = 1,
- + },
- + .setkey = rfc4543_setkey,
- + .setauthsize = rfc4543_setauthsize,
- + .encrypt = ipsec_gcm_encrypt,
- + .decrypt = ipsec_gcm_decrypt,
- + .ivsize = 8,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + },
- + },
- + /* Galois Counter Mode */
- + {
- + .aead = {
- + .base = {
- + .cra_name = "gcm(aes)",
- + .cra_driver_name = "gcm-aes-caam-qi",
- + .cra_blocksize = 1,
- + },
- + .setkey = gcm_setkey,
- + .setauthsize = gcm_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = 12,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + }
- + },
- /* single-pass ipsec_esp descriptor */
- {
- .aead = {
- @@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads
- .geniv = true,
- }
- },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "tls10(hmac(sha1),cbc(aes))",
- + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = tls_setkey,
- + .setauthsize = tls_setauthsize,
- + .encrypt = tls_encrypt,
- + .decrypt = tls_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + }
- };
-
- struct caam_crypto_alg {
- @@ -2126,9 +2989,20 @@ struct caam_crypto_alg {
- struct caam_alg_entry caam;
- };
-
- -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
- +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
- + bool uses_dkp)
- {
- struct caam_drv_private *priv;
- + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
- + static const u8 digest_size[] = {
- + MD5_DIGEST_SIZE,
- + SHA1_DIGEST_SIZE,
- + SHA224_DIGEST_SIZE,
- + SHA256_DIGEST_SIZE,
- + SHA384_DIGEST_SIZE,
- + SHA512_DIGEST_SIZE
- + };
- + u8 op_id;
-
- /*
- * distribute tfms across job rings to ensure in-order
- @@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_
- return PTR_ERR(ctx->jrdev);
- }
-
- + priv = dev_get_drvdata(ctx->jrdev->parent);
- + if (priv->era >= 6 && uses_dkp)
- + ctx->dir = DMA_BIDIRECTIONAL;
- + else
- + ctx->dir = DMA_TO_DEVICE;
- +
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- - DMA_TO_DEVICE);
- + ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
- caam_jr_free(ctx->jrdev);
- @@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_
- ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
-
- - priv = dev_get_drvdata(ctx->jrdev->parent);
- + if (ctx->adata.algtype) {
- + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
- + >> OP_ALG_ALGSEL_SHIFT;
- + if (op_id < ARRAY_SIZE(digest_size)) {
- + ctx->authsize = digest_size[op_id];
- + } else {
- + dev_err(ctx->jrdev,
- + "incorrect op_id %d; must be less than %zu\n",
- + op_id, ARRAY_SIZE(digest_size));
- + caam_jr_free(ctx->jrdev);
- + return -EINVAL;
- + }
- + } else {
- + ctx->authsize = 0;
- + }
- +
- ctx->qidev = priv->qidev;
-
- spin_lock_init(&ctx->lock);
- @@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
-
- - return caam_init_common(ctx, &caam_alg->caam);
- + return caam_init_common(ctx, &caam_alg->caam, false);
- }
-
- static int caam_aead_init(struct crypto_aead *tfm)
- @@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_
- aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
-
- - return caam_init_common(ctx, &caam_alg->caam);
- + return caam_init_common(ctx, &caam_alg->caam,
- + (alg->setkey == aead_setkey) ||
- + (alg->setkey == tls_setkey));
- }
-
- static void caam_exit_common(struct caam_ctx *ctx)
- @@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam
- caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
-
- - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- - DMA_TO_DEVICE);
- + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
-
- caam_jr_free(ctx->jrdev);
- }
- @@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo
- if (!priv || !priv->qi_present)
- return -ENODEV;
-
- + if (caam_dpaa2) {
- + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- + return -ENODEV;
- + }
- +
- INIT_LIST_HEAD(&alg_list);
-
- /*
- --- /dev/null
- +++ b/drivers/crypto/caam/caamalg_qi2.c
- @@ -0,0 +1,5691 @@
- +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- +/*
- + * Copyright 2015-2016 Freescale Semiconductor Inc.
- + * Copyright 2017-2018 NXP
- + */
- +
- +#include <linux/fsl/mc.h>
- +#include "compat.h"
- +#include "regs.h"
- +#include "caamalg_qi2.h"
- +#include "dpseci_cmd.h"
- +#include "desc_constr.h"
- +#include "error.h"
- +#include "sg_sw_sec4.h"
- +#include "sg_sw_qm2.h"
- +#include "key_gen.h"
- +#include "caamalg_desc.h"
- +#include "caamhash_desc.h"
- +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
- +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
- +
- +#define CAAM_CRA_PRIORITY 2000
- +
- +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
- +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
- + SHA512_DIGEST_SIZE * 2)
- +
- +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
- +bool caam_little_end;
- +EXPORT_SYMBOL(caam_little_end);
- +bool caam_imx;
- +EXPORT_SYMBOL(caam_imx);
- +#endif
- +
- +/*
- + * This is a a cache of buffers, from which the users of CAAM QI driver
- + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
- + * NOTE: A more elegant solution would be to have some headroom in the frames
- + * being processed. This can be added by the dpaa2-eth driver. This would
- + * pose a problem for userspace application processing which cannot
- + * know of this limitation. So for now, this will work.
- + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
- + */
- +static struct kmem_cache *qi_cache;
- +
- +struct caam_alg_entry {
- + struct device *dev;
- + int class1_alg_type;
- + int class2_alg_type;
- + bool rfc3686;
- + bool geniv;
- +};
- +
- +struct caam_aead_alg {
- + struct aead_alg aead;
- + struct caam_alg_entry caam;
- + bool registered;
- +};
- +
- +struct caam_skcipher_alg {
- + struct skcipher_alg skcipher;
- + struct caam_alg_entry caam;
- + bool registered;
- +};
- +
- +/**
- + * caam_ctx - per-session context
- + * @flc: Flow Contexts array
- + * @key: virtual address of the key(s): [authentication key], encryption key
- + * @flc_dma: I/O virtual addresses of the Flow Contexts
- + * @key_dma: I/O virtual address of the key
- + * @dir: DMA direction for mapping key and Flow Contexts
- + * @dev: dpseci device
- + * @adata: authentication algorithm details
- + * @cdata: encryption algorithm details
- + * @authsize: authentication tag (a.k.a. ICV / MAC) size
- + */
- +struct caam_ctx {
- + struct caam_flc flc[NUM_OP];
- + u8 key[CAAM_MAX_KEY_SIZE];
- + dma_addr_t flc_dma[NUM_OP];
- + dma_addr_t key_dma;
- + enum dma_data_direction dir;
- + struct device *dev;
- + struct alginfo adata;
- + struct alginfo cdata;
- + unsigned int authsize;
- +};
- +
- +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
- + dma_addr_t iova_addr)
- +{
- + phys_addr_t phys_addr;
- +
- + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
- + iova_addr;
- +
- + return phys_to_virt(phys_addr);
- +}
- +
- +/*
- + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
- + *
- + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
- + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
- + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
- + * hosting 16 SG entries.
- + *
- + * @flags - flags that would be used for the equivalent kmalloc(..) call
- + *
- + * Returns a pointer to a retrieved buffer on success or NULL on failure.
- + */
- +static inline void *qi_cache_zalloc(gfp_t flags)
- +{
- + return kmem_cache_zalloc(qi_cache, flags);
- +}
- +
- +/*
- + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
- + *
- + * @obj - buffer previously allocated by qi_cache_zalloc
- + *
- + * No checking is being done, the call is a passthrough call to
- + * kmem_cache_free(...)
- + */
- +static inline void qi_cache_free(void *obj)
- +{
- + kmem_cache_free(qi_cache, obj);
- +}
- +
- +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
- +{
- + switch (crypto_tfm_alg_type(areq->tfm)) {
- + case CRYPTO_ALG_TYPE_SKCIPHER:
- + return skcipher_request_ctx(skcipher_request_cast(areq));
- + case CRYPTO_ALG_TYPE_AEAD:
- + return aead_request_ctx(container_of(areq, struct aead_request,
- + base));
- + case CRYPTO_ALG_TYPE_AHASH:
- + return ahash_request_ctx(ahash_request_cast(areq));
- + default:
- + return ERR_PTR(-EINVAL);
- + }
- +}
- +
- +static void caam_unmap(struct device *dev, struct scatterlist *src,
- + struct scatterlist *dst, int src_nents,
- + int dst_nents, dma_addr_t iv_dma, int ivsize,
- + dma_addr_t qm_sg_dma, int qm_sg_bytes)
- +{
- + if (dst != src) {
- + if (src_nents)
- + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
- + } else {
- + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
- + }
- +
- + if (iv_dma)
- + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- +
- + if (qm_sg_bytes)
- + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
- +}
- +
- +static int aead_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- + typeof(*alg), aead);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + struct device *dev = ctx->dev;
- + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
- + struct caam_flc *flc;
- + u32 *desc;
- + u32 ctx1_iv_off = 0;
- + u32 *nonce = NULL;
- + unsigned int data_len[2];
- + u32 inl_mask;
- + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- + OP_ALG_AAI_CTR_MOD128);
- + const bool is_rfc3686 = alg->caam.rfc3686;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + /*
- + * AES-CTR needs to load IV in CONTEXT1 reg
- + * at an offset of 128bits (16bytes)
- + * CONTEXT1[255:128] = IV
- + */
- + if (ctr_mode)
- + ctx1_iv_off = 16;
- +
- + /*
- + * RFC3686 specific:
- + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- + */
- + if (is_rfc3686) {
- + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
- + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
- + }
- +
- + data_len[0] = ctx->adata.keylen_pad;
- + data_len[1] = ctx->cdata.keylen;
- +
- + /* aead_encrypt shared descriptor */
- + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
- + DESC_QI_AEAD_ENC_LEN) +
- + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
- + DESC_JOB_IO_LEN, data_len, &inl_mask,
- + ARRAY_SIZE(data_len)) < 0)
- + return -EINVAL;
- +
- + if (inl_mask & 1)
- + ctx->adata.key_virt = ctx->key;
- + else
- + ctx->adata.key_dma = ctx->key_dma;
- +
- + if (inl_mask & 2)
- + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- + else
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + ctx->adata.key_inline = !!(inl_mask & 1);
- + ctx->cdata.key_inline = !!(inl_mask & 2);
- +
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- +
- + if (alg->caam.geniv)
- + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
- + ivsize, ctx->authsize, is_rfc3686,
- + nonce, ctx1_iv_off, true,
- + priv->sec_attr.era);
- + else
- + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
- + ivsize, ctx->authsize, is_rfc3686, nonce,
- + ctx1_iv_off, true, priv->sec_attr.era);
- +
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /* aead_decrypt shared descriptor */
- + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
- + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
- + DESC_JOB_IO_LEN, data_len, &inl_mask,
- + ARRAY_SIZE(data_len)) < 0)
- + return -EINVAL;
- +
- + if (inl_mask & 1)
- + ctx->adata.key_virt = ctx->key;
- + else
- + ctx->adata.key_dma = ctx->key_dma;
- +
- + if (inl_mask & 2)
- + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- + else
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + ctx->adata.key_inline = !!(inl_mask & 1);
- + ctx->cdata.key_inline = !!(inl_mask & 2);
- +
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
- + ivsize, ctx->authsize, alg->caam.geniv,
- + is_rfc3686, nonce, ctx1_iv_off, true,
- + priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + aead_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +struct split_key_sh_result {
- + struct completion completion;
- + int err;
- + struct device *dev;
- +};
- +
- +static void split_key_sh_done(void *cbk_ctx, u32 err)
- +{
- + struct split_key_sh_result *res = cbk_ctx;
- +
- +#ifdef DEBUG
- + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- +#endif
- +
- + if (err)
- + caam_qi2_strstatus(res->dev, err);
- +
- + res->err = err;
- + complete(&res->completion);
- +}
- +
- +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- + struct crypto_authenc_keys keys;
- +
- + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- + goto badkey;
- +
- +#ifdef DEBUG
- + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
- + keys.authkeylen + keys.enckeylen, keys.enckeylen,
- + keys.authkeylen);
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + ctx->adata.keylen = keys.authkeylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- +
- + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- + goto badkey;
- +
- + memcpy(ctx->key, keys.authkey, keys.authkeylen);
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- + ctx->adata.keylen_pad + keys.enckeylen, 1);
- +#endif
- +
- + ctx->cdata.keylen = keys.enckeylen;
- +
- + return aead_set_sh_desc(aead);
- +badkey:
- + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + return -EINVAL;
- +}
- +
- +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- + bool encrypt)
- +{
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_request *req_ctx = aead_request_ctx(req);
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- + typeof(*alg), aead);
- + struct device *dev = ctx->dev;
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- + struct aead_edesc *edesc;
- + dma_addr_t qm_sg_dma, iv_dma = 0;
- + int ivsize = 0;
- + unsigned int authsize = ctx->authsize;
- + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
- + int in_len, out_len;
- + struct dpaa2_sg_entry *sg_table;
- +
- + /* allocate space for base edesc, link tables and IV */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (unlikely(!edesc)) {
- + dev_err(dev, "could not allocate extended descriptor\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + if (unlikely(req->dst != req->src)) {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen);
- + if (unlikely(src_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen);
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- + req->cryptlen +
- + (encrypt ? authsize :
- + (-authsize)));
- + if (unlikely(dst_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- + req->assoclen + req->cryptlen +
- + (encrypt ? authsize : (-authsize)));
- + qi_cache_free(edesc);
- + return ERR_PTR(dst_nents);
- + }
- +
- + if (src_nents) {
- + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + } else {
- + mapped_src_nents = 0;
- + }
- +
- + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
- + DMA_FROM_DEVICE);
- + if (unlikely(!mapped_dst_nents)) {
- + dev_err(dev, "unable to map destination\n");
- + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + } else {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen +
- + (encrypt ? authsize : 0));
- + if (unlikely(src_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen +
- + (encrypt ? authsize : 0));
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- +
- + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
- + ivsize = crypto_aead_ivsize(aead);
- +
- + /*
- + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
- + * Input is not contiguous.
- + */
- + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
- + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- + sg_table = &edesc->sgt[0];
- + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
- + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
- + CAAM_QI_MEMCACHE_SIZE)) {
- + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
- + qm_sg_nents, ivsize);
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- + 0, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + if (ivsize) {
- + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
- +
- + /* Make sure IV is located in a DMAable area */
- + memcpy(iv, req->iv, ivsize);
- +
- + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, iv_dma)) {
- + dev_err(dev, "unable to map IV\n");
- + caam_unmap(dev, req->src, req->dst, src_nents,
- + dst_nents, 0, 0, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- +
- + edesc->src_nents = src_nents;
- + edesc->dst_nents = dst_nents;
- + edesc->iv_dma = iv_dma;
- +
- + edesc->assoclen = cpu_to_caam32(req->assoclen);
- + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
- + dev_err(dev, "unable to map assoclen\n");
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- + iv_dma, ivsize, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
- + qm_sg_index++;
- + if (ivsize) {
- + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
- + qm_sg_index++;
- + }
- + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
- + qm_sg_index += mapped_src_nents;
- +
- + if (mapped_dst_nents > 1)
- + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- + qm_sg_index, 0);
- +
- + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, qm_sg_dma)) {
- + dev_err(dev, "unable to map S/G table\n");
- + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- + iv_dma, ivsize, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->qm_sg_dma = qm_sg_dma;
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + out_len = req->assoclen + req->cryptlen +
- + (encrypt ? ctx->authsize : (-ctx->authsize));
- + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, in_len);
- +
- + if (req->dst == req->src) {
- + if (mapped_src_nents == 1) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
- + } else {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
- + (1 + !!ivsize) * sizeof(*sg_table));
- + }
- + } else if (mapped_dst_nents == 1) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- + } else {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
- + sizeof(*sg_table));
- + }
- +
- + dpaa2_fl_set_len(out_fle, out_len);
- +
- + return edesc;
- +}
- +
- +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
- + bool encrypt)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + unsigned int blocksize = crypto_aead_blocksize(tls);
- + unsigned int padsize, authsize;
- + struct caam_request *req_ctx = aead_request_ctx(req);
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
- + typeof(*alg), aead);
- + struct device *dev = ctx->dev;
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- + struct tls_edesc *edesc;
- + dma_addr_t qm_sg_dma, iv_dma = 0;
- + int ivsize = 0;
- + u8 *iv;
- + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
- + int in_len, out_len;
- + struct dpaa2_sg_entry *sg_table;
- + struct scatterlist *dst;
- +
- + if (encrypt) {
- + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
- + blocksize);
- + authsize = ctx->authsize + padsize;
- + } else {
- + authsize = ctx->authsize;
- + }
- +
- + /* allocate space for base edesc, link tables and IV */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (unlikely(!edesc)) {
- + dev_err(dev, "could not allocate extended descriptor\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + if (likely(req->src == req->dst)) {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen +
- + (encrypt ? authsize : 0));
- + if (unlikely(src_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen +
- + (encrypt ? authsize : 0));
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + dst = req->dst;
- + } else {
- + src_nents = sg_nents_for_len(req->src, req->assoclen +
- + req->cryptlen);
- + if (unlikely(src_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- + req->assoclen + req->cryptlen);
- + qi_cache_free(edesc);
- + return ERR_PTR(src_nents);
- + }
- +
- + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
- + dst_nents = sg_nents_for_len(dst, req->cryptlen +
- + (encrypt ? authsize : 0));
- + if (unlikely(dst_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- + req->cryptlen +
- + (encrypt ? authsize : 0));
- + qi_cache_free(edesc);
- + return ERR_PTR(dst_nents);
- + }
- +
- + if (src_nents) {
- + mapped_src_nents = dma_map_sg(dev, req->src,
- + src_nents, DMA_TO_DEVICE);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + } else {
- + mapped_src_nents = 0;
- + }
- +
- + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
- + DMA_FROM_DEVICE);
- + if (unlikely(!mapped_dst_nents)) {
- + dev_err(dev, "unable to map destination\n");
- + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- +
- + /*
- + * Create S/G table: IV, src, dst.
- + * Input is not contiguous.
- + */
- + qm_sg_ents = 1 + mapped_src_nents +
- + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- + sg_table = &edesc->sgt[0];
- + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
- +
- + ivsize = crypto_aead_ivsize(tls);
- + iv = (u8 *)(sg_table + qm_sg_ents);
- + /* Make sure IV is located in a DMAable area */
- + memcpy(iv, req->iv, ivsize);
- + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, iv_dma)) {
- + dev_err(dev, "unable to map IV\n");
- + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
- + 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->src_nents = src_nents;
- + edesc->dst_nents = dst_nents;
- + edesc->dst = dst;
- + edesc->iv_dma = iv_dma;
- +
- + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- + qm_sg_index = 1;
- +
- + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
- + qm_sg_index += mapped_src_nents;
- +
- + if (mapped_dst_nents > 1)
- + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
- + qm_sg_index, 0);
- +
- + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, qm_sg_dma)) {
- + dev_err(dev, "unable to map S/G table\n");
- + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
- + ivsize, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->qm_sg_dma = qm_sg_dma;
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + out_len = req->cryptlen + (encrypt ? authsize : 0);
- + in_len = ivsize + req->assoclen + req->cryptlen;
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, in_len);
- +
- + if (req->dst == req->src) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
- + (sg_nents_for_len(req->src, req->assoclen) +
- + 1) * sizeof(*sg_table));
- + } else if (mapped_dst_nents == 1) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
- + } else {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
- + sizeof(*sg_table));
- + }
- +
- + dpaa2_fl_set_len(out_fle, out_len);
- +
- + return edesc;
- +}
- +
- +static int tls_set_sh_desc(struct crypto_aead *tls)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + unsigned int ivsize = crypto_aead_ivsize(tls);
- + unsigned int blocksize = crypto_aead_blocksize(tls);
- + struct device *dev = ctx->dev;
- + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
- + struct caam_flc *flc;
- + u32 *desc;
- + unsigned int assoclen = 13; /* always 13 bytes for TLS */
- + unsigned int data_len[2];
- + u32 inl_mask;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + /*
- + * TLS 1.0 encrypt shared descriptor
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + data_len[0] = ctx->adata.keylen_pad;
- + data_len[1] = ctx->cdata.keylen;
- +
- + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
- + &inl_mask, ARRAY_SIZE(data_len)) < 0)
- + return -EINVAL;
- +
- + if (inl_mask & 1)
- + ctx->adata.key_virt = ctx->key;
- + else
- + ctx->adata.key_dma = ctx->key_dma;
- +
- + if (inl_mask & 2)
- + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- + else
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + ctx->adata.key_inline = !!(inl_mask & 1);
- + ctx->cdata.key_inline = !!(inl_mask & 2);
- +
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
- + assoclen, ivsize, ctx->authsize, blocksize,
- + priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc));
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /*
- + * TLS 1.0 decrypt shared descriptor
- + * Keys do not fit inline, regardless of algorithms used
- + */
- + ctx->adata.key_inline = false;
- + ctx->adata.key_dma = ctx->key_dma;
- + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- +
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
- + ctx->authsize, blocksize, priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + struct device *dev = ctx->dev;
- + struct crypto_authenc_keys keys;
- +
- + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- + goto badkey;
- +
- +#ifdef DEBUG
- + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
- + keys.authkeylen + keys.enckeylen, keys.enckeylen,
- + keys.authkeylen);
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + ctx->adata.keylen = keys.authkeylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- +
- + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- + goto badkey;
- +
- + memcpy(ctx->key, keys.authkey, keys.authkeylen);
- + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
- + keys.enckeylen, ctx->dir);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- + ctx->adata.keylen_pad + keys.enckeylen, 1);
- +#endif
- +
- + ctx->cdata.keylen = keys.enckeylen;
- +
- + return tls_set_sh_desc(tls);
- +badkey:
- + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + return -EINVAL;
- +}
- +
- +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- +
- + ctx->authsize = authsize;
- + tls_set_sh_desc(tls);
- +
- + return 0;
- +}
- +
- +static int gcm_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + struct caam_flc *flc;
- + u32 *desc;
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + /*
- + * AES GCM encrypt shared descriptor
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + ctx->cdata.key_virt = ctx->key;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /*
- + * Job Descriptor and Shared Descriptors
- + * must all fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + ctx->cdata.key_virt = ctx->key;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + gcm_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int gcm_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
- + ctx->cdata.keylen = keylen;
- +
- + return gcm_set_sh_desc(aead);
- +}
- +
- +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + struct caam_flc *flc;
- + u32 *desc;
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + ctx->cdata.key_virt = ctx->key;
- +
- + /*
- + * RFC4106 encrypt shared descriptor
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /*
- + * Job Descriptor and Shared Descriptors
- + * must all fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int rfc4106_setauthsize(struct crypto_aead *authenc,
- + unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + rfc4106_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int rfc4106_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- +
- + if (keylen < 4)
- + return -EINVAL;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + /*
- + * The last four bytes of the key material are used as the salt value
- + * in the nonce. Update the AES key length.
- + */
- + ctx->cdata.keylen = keylen - 4;
- + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
- + ctx->dir);
- +
- + return rfc4106_set_sh_desc(aead);
- +}
- +
- +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- + unsigned int ivsize = crypto_aead_ivsize(aead);
- + struct caam_flc *flc;
- + u32 *desc;
- + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
- + ctx->cdata.keylen;
- +
- + if (!ctx->cdata.keylen || !ctx->authsize)
- + return 0;
- +
- + ctx->cdata.key_virt = ctx->key;
- +
- + /*
- + * RFC4543 encrypt shared descriptor
- + * Job Descriptor and Shared Descriptor
- + * must fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /*
- + * Job Descriptor and Shared Descriptors
- + * must all fit into the 64-word Descriptor h/w Buffer
- + */
- + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
- + ctx->cdata.key_inline = true;
- + } else {
- + ctx->cdata.key_inline = false;
- + ctx->cdata.key_dma = ctx->key_dma;
- + }
- +
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
- + true);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int rfc4543_setauthsize(struct crypto_aead *authenc,
- + unsigned int authsize)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
- +
- + ctx->authsize = authsize;
- + rfc4543_set_sh_desc(authenc);
- +
- + return 0;
- +}
- +
- +static int rfc4543_setkey(struct crypto_aead *aead,
- + const u8 *key, unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct device *dev = ctx->dev;
- +
- + if (keylen < 4)
- + return -EINVAL;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- +
- + memcpy(ctx->key, key, keylen);
- + /*
- + * The last four bytes of the key material are used as the salt value
- + * in the nonce. Update the AES key length.
- + */
- + ctx->cdata.keylen = keylen - 4;
- + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
- + ctx->dir);
- +
- + return rfc4543_set_sh_desc(aead);
- +}
- +
- +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct caam_skcipher_alg *alg =
- + container_of(crypto_skcipher_alg(skcipher),
- + struct caam_skcipher_alg, skcipher);
- + struct device *dev = ctx->dev;
- + struct caam_flc *flc;
- + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
- + u32 *desc;
- + u32 ctx1_iv_off = 0;
- + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- + OP_ALG_AAI_CTR_MOD128);
- + const bool is_rfc3686 = alg->caam.rfc3686;
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- +#endif
- + /*
- + * AES-CTR needs to load IV in CONTEXT1 reg
- + * at an offset of 128bits (16bytes)
- + * CONTEXT1[255:128] = IV
- + */
- + if (ctr_mode)
- + ctx1_iv_off = 16;
- +
- + /*
- + * RFC3686 specific:
- + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- + * | *key = {KEY, NONCE}
- + */
- + if (is_rfc3686) {
- + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- + keylen -= CTR_RFC3686_NONCE_SIZE;
- + }
- +
- + ctx->cdata.keylen = keylen;
- + ctx->cdata.key_virt = key;
- + ctx->cdata.key_inline = true;
- +
- + /* skcipher_encrypt shared descriptor */
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
- + is_rfc3686, ctx1_iv_off);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /* skcipher_decrypt shared descriptor */
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
- + is_rfc3686, ctx1_iv_off);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct device *dev = ctx->dev;
- + struct caam_flc *flc;
- + u32 *desc;
- +
- + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- + dev_err(dev, "key size mismatch\n");
- + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + return -EINVAL;
- + }
- +
- + ctx->cdata.keylen = keylen;
- + ctx->cdata.key_virt = key;
- + ctx->cdata.key_inline = true;
- +
- + /* xts_skcipher_encrypt shared descriptor */
- + flc = &ctx->flc[ENCRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + /* xts_skcipher_decrypt shared descriptor */
- + flc = &ctx->flc[DECRYPT];
- + desc = flc->sh_desc;
- + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
- + sizeof(flc->flc) + desc_bytes(desc),
- + ctx->dir);
- +
- + return 0;
- +}
- +
- +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
- +{
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + struct caam_request *req_ctx = skcipher_request_ctx(req);
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct device *dev = ctx->dev;
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- + struct skcipher_edesc *edesc;
- + dma_addr_t iv_dma;
- + u8 *iv;
- + int ivsize = crypto_skcipher_ivsize(skcipher);
- + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- + struct dpaa2_sg_entry *sg_table;
- +
- + src_nents = sg_nents_for_len(req->src, req->cryptlen);
- + if (unlikely(src_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- + req->cryptlen);
- + return ERR_PTR(src_nents);
- + }
- +
- + if (unlikely(req->dst != req->src)) {
- + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
- + if (unlikely(dst_nents < 0)) {
- + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- + req->cryptlen);
- + return ERR_PTR(dst_nents);
- + }
- +
- + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
- + DMA_FROM_DEVICE);
- + if (unlikely(!mapped_dst_nents)) {
- + dev_err(dev, "unable to map destination\n");
- + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- + return ERR_PTR(-ENOMEM);
- + }
- + } else {
- + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(!mapped_src_nents)) {
- + dev_err(dev, "unable to map source\n");
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- +
- + qm_sg_ents = 1 + mapped_src_nents;
- + dst_sg_idx = qm_sg_ents;
- +
- + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
- + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
- + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
- + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
- + qm_sg_ents, ivsize);
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- + 0, 0, 0);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + /* allocate space for base edesc, link tables and IV */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (unlikely(!edesc)) {
- + dev_err(dev, "could not allocate extended descriptor\n");
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- + 0, 0, 0);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + /* Make sure IV is located in a DMAable area */
- + sg_table = &edesc->sgt[0];
- + iv = (u8 *)(sg_table + qm_sg_ents);
- + memcpy(iv, req->iv, ivsize);
- +
- + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, iv_dma)) {
- + dev_err(dev, "unable to map IV\n");
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- + 0, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + edesc->src_nents = src_nents;
- + edesc->dst_nents = dst_nents;
- + edesc->iv_dma = iv_dma;
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
- +
- + if (mapped_dst_nents > 1)
- + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- + dst_sg_idx, 0);
- +
- + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
- + dev_err(dev, "unable to map S/G table\n");
- + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- + iv_dma, ivsize, 0, 0);
- + qi_cache_free(edesc);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
- + dpaa2_fl_set_len(out_fle, req->cryptlen);
- +
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- +
- + if (req->src == req->dst) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
- + sizeof(*sg_table));
- + } else if (mapped_dst_nents > 1) {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
- + sizeof(*sg_table));
- + } else {
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- + }
- +
- + return edesc;
- +}
- +
- +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
- + struct aead_request *req)
- +{
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + int ivsize = crypto_aead_ivsize(aead);
- +
- + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
- + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- +}
- +
- +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
- + struct aead_request *req)
- +{
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + int ivsize = crypto_aead_ivsize(tls);
- +
- + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
- + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
- + edesc->qm_sg_bytes);
- +}
- +
- +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
- + struct skcipher_request *req)
- +{
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + int ivsize = crypto_skcipher_ivsize(skcipher);
- +
- + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
- +}
- +
- +static void aead_encrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct aead_request *req = container_of(areq, struct aead_request,
- + base);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct aead_edesc *edesc = req_ctx->edesc;
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + aead_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + aead_request_complete(req, ecode);
- +}
- +
- +static void aead_decrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct aead_request *req = container_of(areq, struct aead_request,
- + base);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct aead_edesc *edesc = req_ctx->edesc;
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + /*
- + * verify hw auth check passed else return -EBADMSG
- + */
- + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
- + JRSTA_CCBERR_ERRID_ICVCHK)
- + ecode = -EBADMSG;
- + else
- + ecode = -EIO;
- + }
- +
- + aead_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + aead_request_complete(req, ecode);
- +}
- +
- +static int aead_encrypt(struct aead_request *req)
- +{
- + struct aead_edesc *edesc;
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct caam_request *caam_req = aead_request_ctx(req);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = aead_edesc_alloc(req, true);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + caam_req->flc = &ctx->flc[ENCRYPT];
- + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
- + caam_req->cbk = aead_encrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + aead_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int aead_decrypt(struct aead_request *req)
- +{
- + struct aead_edesc *edesc;
- + struct crypto_aead *aead = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(aead);
- + struct caam_request *caam_req = aead_request_ctx(req);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = aead_edesc_alloc(req, false);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + caam_req->flc = &ctx->flc[DECRYPT];
- + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
- + caam_req->cbk = aead_decrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + aead_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static void tls_encrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct aead_request *req = container_of(areq, struct aead_request,
- + base);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct tls_edesc *edesc = req_ctx->edesc;
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + tls_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + aead_request_complete(req, ecode);
- +}
- +
- +static void tls_decrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct aead_request *req = container_of(areq, struct aead_request,
- + base);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct tls_edesc *edesc = req_ctx->edesc;
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + /*
- + * verify hw auth check passed else return -EBADMSG
- + */
- + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
- + JRSTA_CCBERR_ERRID_ICVCHK)
- + ecode = -EBADMSG;
- + else
- + ecode = -EIO;
- + }
- +
- + tls_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + aead_request_complete(req, ecode);
- +}
- +
- +static int tls_encrypt(struct aead_request *req)
- +{
- + struct tls_edesc *edesc;
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + struct caam_request *caam_req = aead_request_ctx(req);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = tls_edesc_alloc(req, true);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + caam_req->flc = &ctx->flc[ENCRYPT];
- + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
- + caam_req->cbk = tls_encrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + tls_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int tls_decrypt(struct aead_request *req)
- +{
- + struct tls_edesc *edesc;
- + struct crypto_aead *tls = crypto_aead_reqtfm(req);
- + struct caam_ctx *ctx = crypto_aead_ctx(tls);
- + struct caam_request *caam_req = aead_request_ctx(req);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = tls_edesc_alloc(req, false);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + caam_req->flc = &ctx->flc[DECRYPT];
- + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
- + caam_req->cbk = tls_decrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + tls_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int ipsec_gcm_encrypt(struct aead_request *req)
- +{
- + if (req->assoclen < 8)
- + return -EINVAL;
- +
- + return aead_encrypt(req);
- +}
- +
- +static int ipsec_gcm_decrypt(struct aead_request *req)
- +{
- + if (req->assoclen < 8)
- + return -EINVAL;
- +
- + return aead_decrypt(req);
- +}
- +
- +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct skcipher_request *req = skcipher_request_cast(areq);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct skcipher_edesc *edesc = req_ctx->edesc;
- + int ecode = 0;
- + int ivsize = crypto_skcipher_ivsize(skcipher);
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- + edesc->src_nents > 1 ? 100 : ivsize, 1);
- + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- +#endif
- +
- + skcipher_unmap(ctx->dev, edesc, req);
- +
- + /*
- + * The crypto API expects us to set the IV (req->iv) to the last
- + * ciphertext block. This is used e.g. by the CTS mode.
- + */
- + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- + ivsize, 0);
- +
- + qi_cache_free(edesc);
- + skcipher_request_complete(req, ecode);
- +}
- +
- +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct skcipher_request *req = skcipher_request_cast(areq);
- + struct caam_request *req_ctx = to_caam_req(areq);
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct skcipher_edesc *edesc = req_ctx->edesc;
- + int ecode = 0;
- +#ifdef DEBUG
- + int ivsize = crypto_skcipher_ivsize(skcipher);
- +
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- + edesc->src_nents > 1 ? 100 : ivsize, 1);
- + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- +#endif
- +
- + skcipher_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + skcipher_request_complete(req, ecode);
- +}
- +
- +static int skcipher_encrypt(struct skcipher_request *req)
- +{
- + struct skcipher_edesc *edesc;
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct caam_request *caam_req = skcipher_request_ctx(req);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = skcipher_edesc_alloc(req);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + caam_req->flc = &ctx->flc[ENCRYPT];
- + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
- + caam_req->cbk = skcipher_encrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + skcipher_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int skcipher_decrypt(struct skcipher_request *req)
- +{
- + struct skcipher_edesc *edesc;
- + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- + struct caam_request *caam_req = skcipher_request_ctx(req);
- + int ivsize = crypto_skcipher_ivsize(skcipher);
- + int ret;
- +
- + /* allocate extended descriptor */
- + edesc = skcipher_edesc_alloc(req);
- + if (IS_ERR(edesc))
- + return PTR_ERR(edesc);
- +
- + /*
- + * The crypto API expects us to set the IV (req->iv) to the last
- + * ciphertext block.
- + */
- + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- + ivsize, 0);
- +
- + caam_req->flc = &ctx->flc[DECRYPT];
- + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
- + caam_req->cbk = skcipher_decrypt_done;
- + caam_req->ctx = &req->base;
- + caam_req->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- + skcipher_unmap(ctx->dev, edesc, req);
- + qi_cache_free(edesc);
- + }
- +
- + return ret;
- +}
- +
- +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
- + bool uses_dkp)
- +{
- + dma_addr_t dma_addr;
- + int i;
- +
- + /* copy descriptor header template value */
- + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- +
- + ctx->dev = caam->dev;
- + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
- +
- + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
- + offsetof(struct caam_ctx, flc_dma),
- + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- + if (dma_mapping_error(ctx->dev, dma_addr)) {
- + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
- + return -ENOMEM;
- + }
- +
- + for (i = 0; i < NUM_OP; i++)
- + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
- + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
- +
- + return 0;
- +}
- +
- +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
- +{
- + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- + struct caam_skcipher_alg *caam_alg =
- + container_of(alg, typeof(*caam_alg), skcipher);
- +
- + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
- + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
- +}
- +
- +static int caam_cra_init_aead(struct crypto_aead *tfm)
- +{
- + struct aead_alg *alg = crypto_aead_alg(tfm);
- + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- + aead);
- +
- + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
- + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
- + (alg->setkey == aead_setkey) ||
- + (alg->setkey == tls_setkey));
- +}
- +
- +static void caam_exit_common(struct caam_ctx *ctx)
- +{
- + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
- + offsetof(struct caam_ctx, flc_dma), ctx->dir,
- + DMA_ATTR_SKIP_CPU_SYNC);
- +}
- +
- +static void caam_cra_exit(struct crypto_skcipher *tfm)
- +{
- + caam_exit_common(crypto_skcipher_ctx(tfm));
- +}
- +
- +static void caam_cra_exit_aead(struct crypto_aead *tfm)
- +{
- + caam_exit_common(crypto_aead_ctx(tfm));
- +}
- +
- +static struct caam_skcipher_alg driver_algs[] = {
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "cbc(aes)",
- + .cra_driver_name = "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = AES_MIN_KEY_SIZE,
- + .max_keysize = AES_MAX_KEY_SIZE,
- + .ivsize = AES_BLOCK_SIZE,
- + },
- + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + },
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "cbc(des3_ede)",
- + .cra_driver_name = "cbc-3des-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = DES3_EDE_KEY_SIZE,
- + .max_keysize = DES3_EDE_KEY_SIZE,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + },
- + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + },
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "cbc(des)",
- + .cra_driver_name = "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = DES_KEY_SIZE,
- + .max_keysize = DES_KEY_SIZE,
- + .ivsize = DES_BLOCK_SIZE,
- + },
- + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + },
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "ctr(aes)",
- + .cra_driver_name = "ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = AES_MIN_KEY_SIZE,
- + .max_keysize = AES_MAX_KEY_SIZE,
- + .ivsize = AES_BLOCK_SIZE,
- + .chunksize = AES_BLOCK_SIZE,
- + },
- + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + },
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "rfc3686(ctr(aes))",
- + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = AES_MIN_KEY_SIZE +
- + CTR_RFC3686_NONCE_SIZE,
- + .max_keysize = AES_MAX_KEY_SIZE +
- + CTR_RFC3686_NONCE_SIZE,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .chunksize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .skcipher = {
- + .base = {
- + .cra_name = "xts(aes)",
- + .cra_driver_name = "xts-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = xts_skcipher_setkey,
- + .encrypt = skcipher_encrypt,
- + .decrypt = skcipher_decrypt,
- + .min_keysize = 2 * AES_MIN_KEY_SIZE,
- + .max_keysize = 2 * AES_MAX_KEY_SIZE,
- + .ivsize = AES_BLOCK_SIZE,
- + },
- + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
- + }
- +};
- +
- +static struct caam_aead_alg driver_aeads[] = {
- + {
- + .aead = {
- + .base = {
- + .cra_name = "rfc4106(gcm(aes))",
- + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = rfc4106_setkey,
- + .setauthsize = rfc4106_setauthsize,
- + .encrypt = ipsec_gcm_encrypt,
- + .decrypt = ipsec_gcm_decrypt,
- + .ivsize = 8,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "rfc4543(gcm(aes))",
- + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = rfc4543_setkey,
- + .setauthsize = rfc4543_setauthsize,
- + .encrypt = ipsec_gcm_encrypt,
- + .decrypt = ipsec_gcm_decrypt,
- + .ivsize = 8,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + },
- + },
- + /* Galois Counter Mode */
- + {
- + .aead = {
- + .base = {
- + .cra_name = "gcm(aes)",
- + .cra_driver_name = "gcm-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = gcm_setkey,
- + .setauthsize = gcm_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = 12,
- + .maxauthsize = AES_BLOCK_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- + }
- + },
- + /* single-pass ipsec_esp descriptor */
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(md5),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-md5-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(md5),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-hmac-md5-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha1),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-sha1-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha1),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha1-cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha224),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-sha224-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha224),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha224-cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha256),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-sha256-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha256),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha256-cbc-aes-"
- + "caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha384),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-sha384-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha384),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha384-cbc-aes-"
- + "caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha512),cbc(aes))",
- + .cra_driver_name = "authenc-hmac-sha512-"
- + "cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha512),"
- + "cbc(aes)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha512-cbc-aes-"
- + "caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-md5-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(md5),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-hmac-md5-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha1),"
- + "cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-sha1-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha1),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha1-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha224),"
- + "cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-sha224-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha224),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha224-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha256),"
- + "cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-sha256-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha256),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha256-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha384),"
- + "cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-sha384-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha384),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha384-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha512),"
- + "cbc(des3_ede))",
- + .cra_driver_name = "authenc-hmac-sha512-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha512),"
- + "cbc(des3_ede)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha512-"
- + "cbc-des3_ede-caam-qi2",
- + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES3_EDE_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(md5),cbc(des))",
- + .cra_driver_name = "authenc-hmac-md5-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(md5),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-hmac-md5-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha1),cbc(des))",
- + .cra_driver_name = "authenc-hmac-sha1-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha1),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha1-cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha224),cbc(des))",
- + .cra_driver_name = "authenc-hmac-sha224-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha224),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha224-cbc-des-"
- + "caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha256),cbc(des))",
- + .cra_driver_name = "authenc-hmac-sha256-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha256),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha256-cbc-desi-"
- + "caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha384),cbc(des))",
- + .cra_driver_name = "authenc-hmac-sha384-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha384),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha384-cbc-des-"
- + "caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha512),cbc(des))",
- + .cra_driver_name = "authenc-hmac-sha512-"
- + "cbc-des-caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "echainiv(authenc(hmac(sha512),"
- + "cbc(des)))",
- + .cra_driver_name = "echainiv-authenc-"
- + "hmac-sha512-cbc-des-"
- + "caam-qi2",
- + .cra_blocksize = DES_BLOCK_SIZE,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = DES_BLOCK_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .geniv = true,
- + }
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(md5),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-md5-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc("
- + "hmac(md5),rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-md5-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = MD5_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha1),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-sha1-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc("
- + "hmac(sha1),rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha224),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-sha224-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc("
- + "hmac(sha224),rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA224_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha256),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-sha256-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc(hmac(sha256),"
- + "rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA256_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha384),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-sha384-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc(hmac(sha384),"
- + "rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA384_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "authenc(hmac(sha512),"
- + "rfc3686(ctr(aes)))",
- + .cra_driver_name = "authenc-hmac-sha512-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "seqiv(authenc(hmac(sha512),"
- + "rfc3686(ctr(aes))))",
- + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
- + "rfc3686-ctr-aes-caam-qi2",
- + .cra_blocksize = 1,
- + },
- + .setkey = aead_setkey,
- + .setauthsize = aead_setauthsize,
- + .encrypt = aead_encrypt,
- + .decrypt = aead_decrypt,
- + .ivsize = CTR_RFC3686_IV_SIZE,
- + .maxauthsize = SHA512_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES |
- + OP_ALG_AAI_CTR_MOD128,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + .rfc3686 = true,
- + .geniv = true,
- + },
- + },
- + {
- + .aead = {
- + .base = {
- + .cra_name = "tls10(hmac(sha1),cbc(aes))",
- + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
- + .cra_blocksize = AES_BLOCK_SIZE,
- + },
- + .setkey = tls_setkey,
- + .setauthsize = tls_setauthsize,
- + .encrypt = tls_encrypt,
- + .decrypt = tls_decrypt,
- + .ivsize = AES_BLOCK_SIZE,
- + .maxauthsize = SHA1_DIGEST_SIZE,
- + },
- + .caam = {
- + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
- + OP_ALG_AAI_HMAC_PRECOMP,
- + },
- + },
- +};
- +
- +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
- +{
- + struct skcipher_alg *alg = &t_alg->skcipher;
- +
- + alg->base.cra_module = THIS_MODULE;
- + alg->base.cra_priority = CAAM_CRA_PRIORITY;
- + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- +
- + alg->init = caam_cra_init_skcipher;
- + alg->exit = caam_cra_exit;
- +}
- +
- +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
- +{
- + struct aead_alg *alg = &t_alg->aead;
- +
- + alg->base.cra_module = THIS_MODULE;
- + alg->base.cra_priority = CAAM_CRA_PRIORITY;
- + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- +
- + alg->init = caam_cra_init_aead;
- + alg->exit = caam_cra_exit_aead;
- +}
- +
- +/* max hash key is max split key size */
- +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
- +
- +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
- +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
- +
- +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
- + CAAM_MAX_HASH_KEY_SIZE)
- +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
- +
- +/* caam context sizes for hashes: running digest + 8 */
- +#define HASH_MSG_LEN 8
- +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
- +
- +enum hash_optype {
- + UPDATE = 0,
- + UPDATE_FIRST,
- + FINALIZE,
- + DIGEST,
- + HASH_NUM_OP
- +};
- +
- +/**
- + * caam_hash_ctx - ahash per-session context
- + * @flc: Flow Contexts array
- + * @flc_dma: I/O virtual addresses of the Flow Contexts
- + * @key: virtual address of the authentication key
- + * @dev: dpseci device
- + * @ctx_len: size of Context Register
- + * @adata: hashing algorithm details
- + */
- +struct caam_hash_ctx {
- + struct caam_flc flc[HASH_NUM_OP];
- + dma_addr_t flc_dma[HASH_NUM_OP];
- + u8 key[CAAM_MAX_HASH_KEY_SIZE];
- + struct device *dev;
- + int ctx_len;
- + struct alginfo adata;
- +};
- +
- +/* ahash state */
- +struct caam_hash_state {
- + struct caam_request caam_req;
- + dma_addr_t buf_dma;
- + dma_addr_t ctx_dma;
- + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- + int buflen_0;
- + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- + int buflen_1;
- + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
- + int (*update)(struct ahash_request *req);
- + int (*final)(struct ahash_request *req);
- + int (*finup)(struct ahash_request *req);
- + int current_buf;
- +};
- +
- +struct caam_export_state {
- + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
- + u8 caam_ctx[MAX_CTX_LEN];
- + int buflen;
- + int (*update)(struct ahash_request *req);
- + int (*final)(struct ahash_request *req);
- + int (*finup)(struct ahash_request *req);
- +};
- +
- +static inline void switch_buf(struct caam_hash_state *state)
- +{
- + state->current_buf ^= 1;
- +}
- +
- +static inline u8 *current_buf(struct caam_hash_state *state)
- +{
- + return state->current_buf ? state->buf_1 : state->buf_0;
- +}
- +
- +static inline u8 *alt_buf(struct caam_hash_state *state)
- +{
- + return state->current_buf ? state->buf_0 : state->buf_1;
- +}
- +
- +static inline int *current_buflen(struct caam_hash_state *state)
- +{
- + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
- +}
- +
- +static inline int *alt_buflen(struct caam_hash_state *state)
- +{
- + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
- +}
- +
- +/* Map current buffer in state (if length > 0) and put it in link table */
- +static inline int buf_map_to_qm_sg(struct device *dev,
- + struct dpaa2_sg_entry *qm_sg,
- + struct caam_hash_state *state)
- +{
- + int buflen = *current_buflen(state);
- +
- + if (!buflen)
- + return 0;
- +
- + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(dev, state->buf_dma)) {
- + dev_err(dev, "unable to map buf\n");
- + state->buf_dma = 0;
- + return -ENOMEM;
- + }
- +
- + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
- +
- + return 0;
- +}
- +
- +/* Map state->caam_ctx, and add it to link table */
- +static inline int ctx_map_to_qm_sg(struct device *dev,
- + struct caam_hash_state *state, int ctx_len,
- + struct dpaa2_sg_entry *qm_sg, u32 flag)
- +{
- + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
- + if (dma_mapping_error(dev, state->ctx_dma)) {
- + dev_err(dev, "unable to map ctx\n");
- + state->ctx_dma = 0;
- + return -ENOMEM;
- + }
- +
- + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
- +
- + return 0;
- +}
- +
- +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
- +{
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + int digestsize = crypto_ahash_digestsize(ahash);
- + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
- + struct caam_flc *flc;
- + u32 *desc;
- +
- + ctx->adata.key_virt = ctx->key;
- + ctx->adata.key_inline = true;
- +
- + /* ahash_update shared descriptor */
- + flc = &ctx->flc[UPDATE];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
- + ctx->ctx_len, true, priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
- + desc_bytes(desc), DMA_BIDIRECTIONAL);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR,
- + "ahash update shdesc@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
- +#endif
- +
- + /* ahash_update_first shared descriptor */
- + flc = &ctx->flc[UPDATE_FIRST];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
- + ctx->ctx_len, false, priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
- + desc_bytes(desc), DMA_BIDIRECTIONAL);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR,
- + "ahash update first shdesc@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
- +#endif
- +
- + /* ahash_final shared descriptor */
- + flc = &ctx->flc[FINALIZE];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
- + ctx->ctx_len, true, priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
- + desc_bytes(desc), DMA_BIDIRECTIONAL);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR,
- + "ahash final shdesc@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
- +#endif
- +
- + /* ahash_digest shared descriptor */
- + flc = &ctx->flc[DIGEST];
- + desc = flc->sh_desc;
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
- + ctx->ctx_len, false, priv->sec_attr.era);
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
- + desc_bytes(desc), DMA_BIDIRECTIONAL);
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR,
- + "ahash digest shdesc@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
- +#endif
- +
- + return 0;
- +}
- +
- +/* Digest hash size if it is too large */
- +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- + u32 *keylen, u8 *key_out, u32 digestsize)
- +{
- + struct caam_request *req_ctx;
- + u32 *desc;
- + struct split_key_sh_result result;
- + dma_addr_t src_dma, dst_dma;
- + struct caam_flc *flc;
- + dma_addr_t flc_dma;
- + int ret = -ENOMEM;
- + struct dpaa2_fl_entry *in_fle, *out_fle;
- +
- + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
- + if (!req_ctx)
- + return -ENOMEM;
- +
- + in_fle = &req_ctx->fd_flt[1];
- + out_fle = &req_ctx->fd_flt[0];
- +
- + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
- + if (!flc)
- + goto err_flc;
- +
- + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, src_dma)) {
- + dev_err(ctx->dev, "unable to map key input memory\n");
- + goto err_src_dma;
- + }
- + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, dst_dma)) {
- + dev_err(ctx->dev, "unable to map key output memory\n");
- + goto err_dst_dma;
- + }
- +
- + desc = flc->sh_desc;
- +
- + init_sh_desc(desc, 0);
- +
- + /* descriptor to perform unkeyed hash on key_in */
- + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
- + OP_ALG_AS_INITFINAL);
- + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
- + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
- + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- + LDST_SRCDST_BYTE_CONTEXT);
- +
- + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
- + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
- + desc_bytes(desc), DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, flc_dma)) {
- + dev_err(ctx->dev, "unable to map shared descriptor\n");
- + goto err_flc_dma;
- + }
- +
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(in_fle, src_dma);
- + dpaa2_fl_set_len(in_fle, *keylen);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
- + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
- +#endif
- +
- + result.err = 0;
- + init_completion(&result.completion);
- + result.dev = ctx->dev;
- +
- + req_ctx->flc = flc;
- + req_ctx->flc_dma = flc_dma;
- + req_ctx->cbk = split_key_sh_done;
- + req_ctx->ctx = &result;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret == -EINPROGRESS) {
- + /* in progress */
- + wait_for_completion(&result.completion);
- + ret = result.err;
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR,
- + "digested key@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
- + 1);
- +#endif
- + }
- +
- + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
- + DMA_TO_DEVICE);
- +err_flc_dma:
- + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
- +err_dst_dma:
- + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
- +err_src_dma:
- + kfree(flc);
- +err_flc:
- + kfree(req_ctx);
- +
- + *keylen = digestsize;
- +
- + return ret;
- +}
- +
- +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
- + unsigned int keylen)
- +{
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
- + unsigned int digestsize = crypto_ahash_digestsize(ahash);
- + int ret;
- + u8 *hashed_key = NULL;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
- +#endif
- +
- + if (keylen > blocksize) {
- + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
- + GFP_KERNEL | GFP_DMA);
- + if (!hashed_key)
- + return -ENOMEM;
- + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- + digestsize);
- + if (ret)
- + goto bad_free_key;
- + key = hashed_key;
- + }
- +
- + ctx->adata.keylen = keylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
- + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
- + goto bad_free_key;
- +
- + memcpy(ctx->key, key, keylen);
- +
- + kfree(hashed_key);
- + return ahash_set_sh_desc(ahash);
- +bad_free_key:
- + kfree(hashed_key);
- + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- + return -EINVAL;
- +}
- +
- +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
- + struct ahash_request *req, int dst_len)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + if (edesc->src_nents)
- + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- + if (edesc->dst_dma)
- + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
- +
- + if (edesc->qm_sg_bytes)
- + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
- + DMA_TO_DEVICE);
- +
- + if (state->buf_dma) {
- + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
- + DMA_TO_DEVICE);
- + state->buf_dma = 0;
- + }
- +}
- +
- +static inline void ahash_unmap_ctx(struct device *dev,
- + struct ahash_edesc *edesc,
- + struct ahash_request *req, int dst_len,
- + u32 flag)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + if (state->ctx_dma) {
- + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
- + state->ctx_dma = 0;
- + }
- + ahash_unmap(dev, edesc, req, dst_len);
- +}
- +
- +static void ahash_done(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct ahash_request *req = ahash_request_cast(areq);
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct ahash_edesc *edesc = state->caam_req.edesc;
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + int digestsize = crypto_ahash_digestsize(ahash);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + ahash_unmap(ctx->dev, edesc, req, digestsize);
- + qi_cache_free(edesc);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- + ctx->ctx_len, 1);
- + if (req->result)
- + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- + digestsize, 1);
- +#endif
- +
- + req->base.complete(&req->base, ecode);
- +}
- +
- +static void ahash_done_bi(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct ahash_request *req = ahash_request_cast(areq);
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct ahash_edesc *edesc = state->caam_req.edesc;
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + int ecode = 0;
- +#ifdef DEBUG
- + int digestsize = crypto_ahash_digestsize(ahash);
- +
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- + switch_buf(state);
- + qi_cache_free(edesc);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- + ctx->ctx_len, 1);
- + if (req->result)
- + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- + digestsize, 1);
- +#endif
- +
- + req->base.complete(&req->base, ecode);
- +}
- +
- +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct ahash_request *req = ahash_request_cast(areq);
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct ahash_edesc *edesc = state->caam_req.edesc;
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + int digestsize = crypto_ahash_digestsize(ahash);
- + int ecode = 0;
- +
- +#ifdef DEBUG
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- + ctx->ctx_len, 1);
- + if (req->result)
- + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- + digestsize, 1);
- +#endif
- +
- + req->base.complete(&req->base, ecode);
- +}
- +
- +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
- +{
- + struct crypto_async_request *areq = cbk_ctx;
- + struct ahash_request *req = ahash_request_cast(areq);
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct ahash_edesc *edesc = state->caam_req.edesc;
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + int ecode = 0;
- +#ifdef DEBUG
- + int digestsize = crypto_ahash_digestsize(ahash);
- +
- + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- +#endif
- +
- + if (unlikely(status)) {
- + caam_qi2_strstatus(ctx->dev, status);
- + ecode = -EIO;
- + }
- +
- + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- + switch_buf(state);
- + qi_cache_free(edesc);
- +
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- + ctx->ctx_len, 1);
- + if (req->result)
- + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- + digestsize, 1);
- +#endif
- +
- + req->base.complete(&req->base, ecode);
- +}
- +
- +static int ahash_update_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + u8 *buf = current_buf(state);
- + int *buflen = current_buflen(state);
- + u8 *next_buf = alt_buf(state);
- + int *next_buflen = alt_buflen(state), last_buflen;
- + int in_len = *buflen + req->nbytes, to_hash;
- + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
- + struct ahash_edesc *edesc;
- + int ret = 0;
- +
- + last_buflen = *next_buflen;
- + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
- + to_hash = in_len - *next_buflen;
- +
- + if (to_hash) {
- + struct dpaa2_sg_entry *sg_table;
- +
- + src_nents = sg_nents_for_len(req->src,
- + req->nbytes - (*next_buflen));
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to DMA map source\n");
- + return -ENOMEM;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + return -ENOMEM;
- + }
- +
- + edesc->src_nents = src_nents;
- + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
- + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
- + sizeof(*sg_table);
- + sg_table = &edesc->sgt[0];
- +
- + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- + DMA_BIDIRECTIONAL);
- + if (ret)
- + goto unmap_ctx;
- +
- + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
- + if (ret)
- + goto unmap_ctx;
- +
- + if (mapped_nents) {
- + sg_to_qm_sg_last(req->src, mapped_nents,
- + sg_table + qm_sg_src_index, 0);
- + if (*next_buflen)
- + scatterwalk_map_and_copy(next_buf, req->src,
- + to_hash - *buflen,
- + *next_buflen, 0);
- + } else {
- + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
- + true);
- + }
- +
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
- + qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
- + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
- +
- + req_ctx->flc = &ctx->flc[UPDATE];
- + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
- + req_ctx->cbk = ahash_done_bi;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY &&
- + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + goto unmap_ctx;
- + } else if (*next_buflen) {
- + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
- + req->nbytes, 0);
- + *buflen = *next_buflen;
- + *next_buflen = last_buflen;
- + }
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- + *next_buflen, 1);
- +#endif
- +
- + return ret;
- +unmap_ctx:
- + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_final_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int buflen = *current_buflen(state);
- + int qm_sg_bytes, qm_sg_src_index;
- + int digestsize = crypto_ahash_digestsize(ahash);
- + struct ahash_edesc *edesc;
- + struct dpaa2_sg_entry *sg_table;
- + int ret;
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc)
- + return -ENOMEM;
- +
- + qm_sg_src_index = 1 + (buflen ? 1 : 0);
- + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
- + sg_table = &edesc->sgt[0];
- +
- + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- + DMA_TO_DEVICE);
- + if (ret)
- + goto unmap_ctx;
- +
- + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
- + if (ret)
- + goto unmap_ctx;
- +
- + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
- +
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- + dev_err(ctx->dev, "unable to map dst\n");
- + edesc->dst_dma = 0;
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- + req_ctx->flc = &ctx->flc[FINALIZE];
- + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
- + req_ctx->cbk = ahash_done_ctx_src;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret == -EINPROGRESS ||
- + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + return ret;
- +
- +unmap_ctx:
- + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_finup_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int buflen = *current_buflen(state);
- + int qm_sg_bytes, qm_sg_src_index;
- + int src_nents, mapped_nents;
- + int digestsize = crypto_ahash_digestsize(ahash);
- + struct ahash_edesc *edesc;
- + struct dpaa2_sg_entry *sg_table;
- + int ret;
- +
- + src_nents = sg_nents_for_len(req->src, req->nbytes);
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to DMA map source\n");
- + return -ENOMEM;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- + return -ENOMEM;
- + }
- +
- + edesc->src_nents = src_nents;
- + qm_sg_src_index = 1 + (buflen ? 1 : 0);
- + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
- + sg_table = &edesc->sgt[0];
- +
- + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- + DMA_TO_DEVICE);
- + if (ret)
- + goto unmap_ctx;
- +
- + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
- + if (ret)
- + goto unmap_ctx;
- +
- + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
- +
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- + dev_err(ctx->dev, "unable to map dst\n");
- + edesc->dst_dma = 0;
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- + req_ctx->flc = &ctx->flc[FINALIZE];
- + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
- + req_ctx->cbk = ahash_done_ctx_src;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret == -EINPROGRESS ||
- + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + return ret;
- +
- +unmap_ctx:
- + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_digest(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int digestsize = crypto_ahash_digestsize(ahash);
- + int src_nents, mapped_nents;
- + struct ahash_edesc *edesc;
- + int ret = -ENOMEM;
- +
- + state->buf_dma = 0;
- +
- + src_nents = sg_nents_for_len(req->src, req->nbytes);
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to map source for DMA\n");
- + return ret;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- + return ret;
- + }
- +
- + edesc->src_nents = src_nents;
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- +
- + if (mapped_nents > 1) {
- + int qm_sg_bytes;
- + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
- +
- + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
- + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
- + qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + goto unmap;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + } else {
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
- + }
- +
- + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- + dev_err(ctx->dev, "unable to map dst\n");
- + edesc->dst_dma = 0;
- + goto unmap;
- + }
- +
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_len(in_fle, req->nbytes);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- + req_ctx->flc = &ctx->flc[DIGEST];
- + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
- + req_ctx->cbk = ahash_done;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret == -EINPROGRESS ||
- + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + return ret;
- +
- +unmap:
- + ahash_unmap(ctx->dev, edesc, req, digestsize);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_final_no_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + u8 *buf = current_buf(state);
- + int buflen = *current_buflen(state);
- + int digestsize = crypto_ahash_digestsize(ahash);
- + struct ahash_edesc *edesc;
- + int ret = -ENOMEM;
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc)
- + return ret;
- +
- + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
- + dev_err(ctx->dev, "unable to map src\n");
- + goto unmap;
- + }
- +
- + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- + dev_err(ctx->dev, "unable to map dst\n");
- + edesc->dst_dma = 0;
- + goto unmap;
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(in_fle, state->buf_dma);
- + dpaa2_fl_set_len(in_fle, buflen);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- + req_ctx->flc = &ctx->flc[DIGEST];
- + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
- + req_ctx->cbk = ahash_done;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret == -EINPROGRESS ||
- + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + return ret;
- +
- +unmap:
- + ahash_unmap(ctx->dev, edesc, req, digestsize);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_update_no_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + u8 *buf = current_buf(state);
- + int *buflen = current_buflen(state);
- + u8 *next_buf = alt_buf(state);
- + int *next_buflen = alt_buflen(state);
- + int in_len = *buflen + req->nbytes, to_hash;
- + int qm_sg_bytes, src_nents, mapped_nents;
- + struct ahash_edesc *edesc;
- + int ret = 0;
- +
- + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
- + to_hash = in_len - *next_buflen;
- +
- + if (to_hash) {
- + struct dpaa2_sg_entry *sg_table;
- +
- + src_nents = sg_nents_for_len(req->src,
- + req->nbytes - *next_buflen);
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to DMA map source\n");
- + return -ENOMEM;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + return -ENOMEM;
- + }
- +
- + edesc->src_nents = src_nents;
- + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
- + sg_table = &edesc->sgt[0];
- +
- + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
- + if (ret)
- + goto unmap_ctx;
- +
- + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
- +
- + if (*next_buflen)
- + scatterwalk_map_and_copy(next_buf, req->src,
- + to_hash - *buflen,
- + *next_buflen, 0);
- +
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
- + qm_sg_bytes, DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
- + ctx->ctx_len, DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
- + dev_err(ctx->dev, "unable to map ctx\n");
- + state->ctx_dma = 0;
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, to_hash);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
- + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
- +
- + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
- + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
- + req_ctx->cbk = ahash_done_ctx_dst;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY &&
- + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + goto unmap_ctx;
- +
- + state->update = ahash_update_ctx;
- + state->finup = ahash_finup_ctx;
- + state->final = ahash_final_ctx;
- + } else if (*next_buflen) {
- + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
- + req->nbytes, 0);
- + *buflen = *next_buflen;
- + *next_buflen = 0;
- + }
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- + *next_buflen, 1);
- +#endif
- +
- + return ret;
- +unmap_ctx:
- + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_finup_no_ctx(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + int buflen = *current_buflen(state);
- + int qm_sg_bytes, src_nents, mapped_nents;
- + int digestsize = crypto_ahash_digestsize(ahash);
- + struct ahash_edesc *edesc;
- + struct dpaa2_sg_entry *sg_table;
- + int ret;
- +
- + src_nents = sg_nents_for_len(req->src, req->nbytes);
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to DMA map source\n");
- + return -ENOMEM;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- + return -ENOMEM;
- + }
- +
- + edesc->src_nents = src_nents;
- + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
- + sg_table = &edesc->sgt[0];
- +
- + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
- + if (ret)
- + goto unmap;
- +
- + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
- +
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- +
- + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- + DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- + dev_err(ctx->dev, "unable to map dst\n");
- + edesc->dst_dma = 0;
- + ret = -ENOMEM;
- + goto unmap;
- + }
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
- + dpaa2_fl_set_len(out_fle, digestsize);
- +
- + req_ctx->flc = &ctx->flc[DIGEST];
- + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
- + req_ctx->cbk = ahash_done;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- + goto unmap;
- +
- + return ret;
- +unmap:
- + ahash_unmap(ctx->dev, edesc, req, digestsize);
- + qi_cache_free(edesc);
- + return -ENOMEM;
- +}
- +
- +static int ahash_update_first(struct ahash_request *req)
- +{
- + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_request *req_ctx = &state->caam_req;
- + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
- + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- + GFP_KERNEL : GFP_ATOMIC;
- + u8 *next_buf = alt_buf(state);
- + int *next_buflen = alt_buflen(state);
- + int to_hash;
- + int src_nents, mapped_nents;
- + struct ahash_edesc *edesc;
- + int ret = 0;
- +
- + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
- + 1);
- + to_hash = req->nbytes - *next_buflen;
- +
- + if (to_hash) {
- + struct dpaa2_sg_entry *sg_table;
- +
- + src_nents = sg_nents_for_len(req->src,
- + req->nbytes - (*next_buflen));
- + if (src_nents < 0) {
- + dev_err(ctx->dev, "Invalid number of src SG.\n");
- + return src_nents;
- + }
- +
- + if (src_nents) {
- + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + if (!mapped_nents) {
- + dev_err(ctx->dev, "unable to map source for DMA\n");
- + return -ENOMEM;
- + }
- + } else {
- + mapped_nents = 0;
- + }
- +
- + /* allocate space for base edesc and link tables */
- + edesc = qi_cache_zalloc(GFP_DMA | flags);
- + if (!edesc) {
- + dma_unmap_sg(ctx->dev, req->src, src_nents,
- + DMA_TO_DEVICE);
- + return -ENOMEM;
- + }
- +
- + edesc->src_nents = src_nents;
- + sg_table = &edesc->sgt[0];
- +
- + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
- + dpaa2_fl_set_final(in_fle, true);
- + dpaa2_fl_set_len(in_fle, to_hash);
- +
- + if (mapped_nents > 1) {
- + int qm_sg_bytes;
- +
- + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
- + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
- + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
- + qm_sg_bytes,
- + DMA_TO_DEVICE);
- + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
- + dev_err(ctx->dev, "unable to map S/G table\n");
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- + edesc->qm_sg_bytes = qm_sg_bytes;
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
- + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- + } else {
- + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
- + }
- +
- + if (*next_buflen)
- + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- + *next_buflen, 0);
- +
- + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
- + ctx->ctx_len, DMA_FROM_DEVICE);
- + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
- + dev_err(ctx->dev, "unable to map ctx\n");
- + state->ctx_dma = 0;
- + ret = -ENOMEM;
- + goto unmap_ctx;
- + }
- +
- + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
- + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
- +
- + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
- + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
- + req_ctx->cbk = ahash_done_ctx_dst;
- + req_ctx->ctx = &req->base;
- + req_ctx->edesc = edesc;
- +
- + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
- + if (ret != -EINPROGRESS &&
- + !(ret == -EBUSY && req->base.flags &
- + CRYPTO_TFM_REQ_MAY_BACKLOG))
- + goto unmap_ctx;
- +
- + state->update = ahash_update_ctx;
- + state->finup = ahash_finup_ctx;
- + state->final = ahash_final_ctx;
- + } else if (*next_buflen) {
- + state->update = ahash_update_no_ctx;
- + state->finup = ahash_finup_no_ctx;
- + state->final = ahash_final_no_ctx;
- + scatterwalk_map_and_copy(next_buf, req->src, 0,
- + req->nbytes, 0);
- + switch_buf(state);
- + }
- +#ifdef DEBUG
- + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
- + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
- +#endif
- +
- + return ret;
- +unmap_ctx:
- + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- + qi_cache_free(edesc);
- + return ret;
- +}
- +
- +static int ahash_finup_first(struct ahash_request *req)
- +{
- + return ahash_digest(req);
- +}
- +
- +static int ahash_init(struct ahash_request *req)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + state->update = ahash_update_first;
- + state->finup = ahash_finup_first;
- + state->final = ahash_final_no_ctx;
- +
- + state->ctx_dma = 0;
- + state->current_buf = 0;
- + state->buf_dma = 0;
- + state->buflen_0 = 0;
- + state->buflen_1 = 0;
- +
- + return 0;
- +}
- +
- +static int ahash_update(struct ahash_request *req)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + return state->update(req);
- +}
- +
- +static int ahash_finup(struct ahash_request *req)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + return state->finup(req);
- +}
- +
- +static int ahash_final(struct ahash_request *req)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- +
- + return state->final(req);
- +}
- +
- +static int ahash_export(struct ahash_request *req, void *out)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + struct caam_export_state *export = out;
- + int len;
- + u8 *buf;
- +
- + if (state->current_buf) {
- + buf = state->buf_1;
- + len = state->buflen_1;
- + } else {
- + buf = state->buf_0;
- + len = state->buflen_0;
- + }
- +
- + memcpy(export->buf, buf, len);
- + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
- + export->buflen = len;
- + export->update = state->update;
- + export->final = state->final;
- + export->finup = state->finup;
- +
- + return 0;
- +}
- +
- +static int ahash_import(struct ahash_request *req, const void *in)
- +{
- + struct caam_hash_state *state = ahash_request_ctx(req);
- + const struct caam_export_state *export = in;
- +
- + memset(state, 0, sizeof(*state));
- + memcpy(state->buf_0, export->buf, export->buflen);
- + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- + state->buflen_0 = export->buflen;
- + state->update = export->update;
- + state->final = export->final;
- + state->finup = export->finup;
- +
- + return 0;
- +}
- +
- +struct caam_hash_template {
- + char name[CRYPTO_MAX_ALG_NAME];
- + char driver_name[CRYPTO_MAX_ALG_NAME];
- + char hmac_name[CRYPTO_MAX_ALG_NAME];
- + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
- + unsigned int blocksize;
- + struct ahash_alg template_ahash;
- + u32 alg_type;
- +};
- +
- +/* ahash descriptors */
- +static struct caam_hash_template driver_hash[] = {
- + {
- + .name = "sha1",
- + .driver_name = "sha1-caam-qi2",
- + .hmac_name = "hmac(sha1)",
- + .hmac_driver_name = "hmac-sha1-caam-qi2",
- + .blocksize = SHA1_BLOCK_SIZE,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = SHA1_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_SHA1,
- + }, {
- + .name = "sha224",
- + .driver_name = "sha224-caam-qi2",
- + .hmac_name = "hmac(sha224)",
- + .hmac_driver_name = "hmac-sha224-caam-qi2",
- + .blocksize = SHA224_BLOCK_SIZE,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = SHA224_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_SHA224,
- + }, {
- + .name = "sha256",
- + .driver_name = "sha256-caam-qi2",
- + .hmac_name = "hmac(sha256)",
- + .hmac_driver_name = "hmac-sha256-caam-qi2",
- + .blocksize = SHA256_BLOCK_SIZE,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = SHA256_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_SHA256,
- + }, {
- + .name = "sha384",
- + .driver_name = "sha384-caam-qi2",
- + .hmac_name = "hmac(sha384)",
- + .hmac_driver_name = "hmac-sha384-caam-qi2",
- + .blocksize = SHA384_BLOCK_SIZE,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = SHA384_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_SHA384,
- + }, {
- + .name = "sha512",
- + .driver_name = "sha512-caam-qi2",
- + .hmac_name = "hmac(sha512)",
- + .hmac_driver_name = "hmac-sha512-caam-qi2",
- + .blocksize = SHA512_BLOCK_SIZE,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = SHA512_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_SHA512,
- + }, {
- + .name = "md5",
- + .driver_name = "md5-caam-qi2",
- + .hmac_name = "hmac(md5)",
- + .hmac_driver_name = "hmac-md5-caam-qi2",
- + .blocksize = MD5_BLOCK_WORDS * 4,
- + .template_ahash = {
- + .init = ahash_init,
- + .update = ahash_update,
- + .final = ahash_final,
- + .finup = ahash_finup,
- + .digest = ahash_digest,
- + .export = ahash_export,
- + .import = ahash_import,
- + .setkey = ahash_setkey,
- + .halg = {
- + .digestsize = MD5_DIGEST_SIZE,
- + .statesize = sizeof(struct caam_export_state),
- + },
- + },
- + .alg_type = OP_ALG_ALGSEL_MD5,
- + }
- +};
- +
- +struct caam_hash_alg {
- + struct list_head entry;
- + struct device *dev;
- + int alg_type;
- + struct ahash_alg ahash_alg;
- +};
- +
- +static int caam_hash_cra_init(struct crypto_tfm *tfm)
- +{
- + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- + struct crypto_alg *base = tfm->__crt_alg;
- + struct hash_alg_common *halg =
- + container_of(base, struct hash_alg_common, base);
- + struct ahash_alg *alg =
- + container_of(halg, struct ahash_alg, halg);
- + struct caam_hash_alg *caam_hash =
- + container_of(alg, struct caam_hash_alg, ahash_alg);
- + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
- + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
- + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
- + HASH_MSG_LEN + 32,
- + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
- + HASH_MSG_LEN + 64,
- + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- + dma_addr_t dma_addr;
- + int i;
- +
- + ctx->dev = caam_hash->dev;
- +
- + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
- + DMA_BIDIRECTIONAL,
- + DMA_ATTR_SKIP_CPU_SYNC);
- + if (dma_mapping_error(ctx->dev, dma_addr)) {
- + dev_err(ctx->dev, "unable to map shared descriptors\n");
- + return -ENOMEM;
- + }
- +
- + for (i = 0; i < HASH_NUM_OP; i++)
- + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
- +
- + /* copy descriptor header template value */
- + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- +
- + ctx->ctx_len = runninglen[(ctx->adata.algtype &
- + OP_ALG_ALGSEL_SUBMASK) >>
- + OP_ALG_ALGSEL_SHIFT];
- +
- + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- + sizeof(struct caam_hash_state));
- +
- + return ahash_set_sh_desc(ahash);
- +}
- +
- +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
- +{
- + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- +
- + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
- + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
- +}
- +
- +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
- + struct caam_hash_template *template, bool keyed)
- +{
- + struct caam_hash_alg *t_alg;
- + struct ahash_alg *halg;
- + struct crypto_alg *alg;
- +
- + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- + if (!t_alg)
- + return ERR_PTR(-ENOMEM);
- +
- + t_alg->ahash_alg = template->template_ahash;
- + halg = &t_alg->ahash_alg;
- + alg = &halg->halg.base;
- +
- + if (keyed) {
- + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- + template->hmac_name);
- + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- + template->hmac_driver_name);
- + } else {
- + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- + template->name);
- + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- + template->driver_name);
- + t_alg->ahash_alg.setkey = NULL;
- + }
- + alg->cra_module = THIS_MODULE;
- + alg->cra_init = caam_hash_cra_init;
- + alg->cra_exit = caam_hash_cra_exit;
- + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
- + alg->cra_priority = CAAM_CRA_PRIORITY;
- + alg->cra_blocksize = template->blocksize;
- + alg->cra_alignmask = 0;
- + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
- + alg->cra_type = &crypto_ahash_type;
- +
- + t_alg->alg_type = template->alg_type;
- + t_alg->dev = dev;
- +
- + return t_alg;
- +}
- +
- +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
- +{
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- +
- + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
- + napi_schedule_irqoff(&ppriv->napi);
- +}
- +
- +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
- +{
- + struct device *dev = priv->dev;
- + struct dpaa2_io_notification_ctx *nctx;
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + int err, i = 0, cpu;
- +
- + for_each_online_cpu(cpu) {
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- + ppriv->priv = priv;
- + nctx = &ppriv->nctx;
- + nctx->is_cdan = 0;
- + nctx->id = ppriv->rsp_fqid;
- + nctx->desired_cpu = cpu;
- + nctx->cb = dpaa2_caam_fqdan_cb;
- +
- + /* Register notification callbacks */
- + err = dpaa2_io_service_register(NULL, nctx);
- + if (unlikely(err)) {
- + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
- + nctx->cb = NULL;
- + /*
- + * If no affine DPIO for this core, there's probably
- + * none available for next cores either. Signal we want
- + * to retry later, in case the DPIO devices weren't
- + * probed yet.
- + */
- + err = -EPROBE_DEFER;
- + goto err;
- + }
- +
- + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
- + dev);
- + if (unlikely(!ppriv->store)) {
- + dev_err(dev, "dpaa2_io_store_create() failed\n");
- + goto err;
- + }
- +
- + if (++i == priv->num_pairs)
- + break;
- + }
- +
- + return 0;
- +
- +err:
- + for_each_online_cpu(cpu) {
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- + if (!ppriv->nctx.cb)
- + break;
- + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
- + }
- +
- + for_each_online_cpu(cpu) {
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- + if (!ppriv->store)
- + break;
- + dpaa2_io_store_destroy(ppriv->store);
- + }
- +
- + return err;
- +}
- +
- +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
- +{
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + int i = 0, cpu;
- +
- + for_each_online_cpu(cpu) {
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
- + dpaa2_io_store_destroy(ppriv->store);
- +
- + if (++i == priv->num_pairs)
- + return;
- + }
- +}
- +
- +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
- +{
- + struct dpseci_rx_queue_cfg rx_queue_cfg;
- + struct device *dev = priv->dev;
- + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + int err = 0, i = 0, cpu;
- +
- + /* Configure Rx queues */
- + for_each_online_cpu(cpu) {
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- +
- + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
- + DPSECI_QUEUE_OPT_USER_CTX;
- + rx_queue_cfg.order_preservation_en = 0;
- + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
- + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
- + /*
- + * Rx priority (WQ) doesn't really matter, since we use
- + * pull mode, i.e. volatile dequeues from specific FQs
- + */
- + rx_queue_cfg.dest_cfg.priority = 0;
- + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
- +
- + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
- + &rx_queue_cfg);
- + if (err) {
- + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
- + err);
- + return err;
- + }
- +
- + if (++i == priv->num_pairs)
- + break;
- + }
- +
- + return err;
- +}
- +
- +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
- +{
- + struct device *dev = priv->dev;
- +
- + if (!priv->cscn_mem)
- + return;
- +
- + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
- + kfree(priv->cscn_mem);
- +}
- +
- +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
- +{
- + struct device *dev = priv->dev;
- + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- +
- + dpaa2_dpseci_congestion_free(priv);
- + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
- +}
- +
- +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
- + const struct dpaa2_fd *fd)
- +{
- + struct caam_request *req;
- + u32 fd_err;
- +
- + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
- + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
- + return;
- + }
- +
- + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
- + if (unlikely(fd_err))
- + dev_err(priv->dev, "FD error: %08x\n", fd_err);
- +
- + /*
- + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
- + * in FD[ERR] or FD[FRC].
- + */
- + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
- + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
- + DMA_BIDIRECTIONAL);
- + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
- +}
- +
- +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
- +{
- + int err;
- +
- + /* Retry while portal is busy */
- + do {
- + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
- + ppriv->store);
- + } while (err == -EBUSY);
- +
- + if (unlikely(err))
- + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
- +
- + return err;
- +}
- +
- +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
- +{
- + struct dpaa2_dq *dq;
- + int cleaned = 0, is_last;
- +
- + do {
- + dq = dpaa2_io_store_next(ppriv->store, &is_last);
- + if (unlikely(!dq)) {
- + if (unlikely(!is_last)) {
- + dev_dbg(ppriv->priv->dev,
- + "FQ %d returned no valid frames\n",
- + ppriv->rsp_fqid);
- + /*
- + * MUST retry until we get some sort of
- + * valid response token (be it "empty dequeue"
- + * or a valid frame).
- + */
- + continue;
- + }
- + break;
- + }
- +
- + /* Process FD */
- + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
- + cleaned++;
- + } while (!is_last);
- +
- + return cleaned;
- +}
- +
- +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
- +{
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + struct dpaa2_caam_priv *priv;
- + int err, cleaned = 0, store_cleaned;
- +
- + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
- + priv = ppriv->priv;
- +
- + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
- + return 0;
- +
- + do {
- + store_cleaned = dpaa2_caam_store_consume(ppriv);
- + cleaned += store_cleaned;
- +
- + if (store_cleaned == 0 ||
- + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
- + break;
- +
- + /* Try to dequeue some more */
- + err = dpaa2_caam_pull_fq(ppriv);
- + if (unlikely(err))
- + break;
- + } while (1);
- +
- + if (cleaned < budget) {
- + napi_complete_done(napi, cleaned);
- + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
- + if (unlikely(err))
- + dev_err(priv->dev, "Notification rearm failed: %d\n",
- + err);
- + }
- +
- + return cleaned;
- +}
- +
- +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
- + u16 token)
- +{
- + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
- + struct device *dev = priv->dev;
- + int err;
- +
- + /*
- + * Congestion group feature supported starting with DPSECI API v5.1
- + * and only when object has been created with this capability.
- + */
- + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
- + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
- + return 0;
- +
- + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
- + GFP_KERNEL | GFP_DMA);
- + if (!priv->cscn_mem)
- + return -ENOMEM;
- +
- + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
- + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
- + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
- + if (dma_mapping_error(dev, priv->cscn_dma)) {
- + dev_err(dev, "Error mapping CSCN memory area\n");
- + err = -ENOMEM;
- + goto err_dma_map;
- + }
- +
- + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
- + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
- + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
- + cong_notif_cfg.message_ctx = (u64)priv;
- + cong_notif_cfg.message_iova = priv->cscn_dma;
- + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
- + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
- + DPSECI_CGN_MODE_COHERENT_WRITE;
- +
- + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
- + &cong_notif_cfg);
- + if (err) {
- + dev_err(dev, "dpseci_set_congestion_notification failed\n");
- + goto err_set_cong;
- + }
- +
- + return 0;
- +
- +err_set_cong:
- + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
- +err_dma_map:
- + kfree(priv->cscn_mem);
- +
- + return err;
- +}
- +
- +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
- +{
- + struct device *dev = &ls_dev->dev;
- + struct dpaa2_caam_priv *priv;
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + int err, cpu;
- + u8 i;
- +
- + priv = dev_get_drvdata(dev);
- +
- + priv->dev = dev;
- + priv->dpsec_id = ls_dev->obj_desc.id;
- +
- + /* Get a handle for the DPSECI this interface is associate with */
- + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
- + if (err) {
- + dev_err(dev, "dpsec_open() failed: %d\n", err);
- + goto err_open;
- + }
- +
- + dev_info(dev, "Opened dpseci object successfully\n");
- +
- + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
- + &priv->minor_ver);
- + if (err) {
- + dev_err(dev, "dpseci_get_api_version() failed\n");
- + goto err_get_vers;
- + }
- +
- + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
- + &priv->dpseci_attr);
- + if (err) {
- + dev_err(dev, "dpseci_get_attributes() failed\n");
- + goto err_get_vers;
- + }
- +
- + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
- + &priv->sec_attr);
- + if (err) {
- + dev_err(dev, "dpseci_get_sec_attr() failed\n");
- + goto err_get_vers;
- + }
- +
- + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
- + if (err) {
- + dev_err(dev, "setup_congestion() failed\n");
- + goto err_get_vers;
- + }
- +
- + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
- + priv->dpseci_attr.num_tx_queues);
- + if (priv->num_pairs > num_online_cpus()) {
- + dev_warn(dev, "%d queues won't be used\n",
- + priv->num_pairs - num_online_cpus());
- + priv->num_pairs = num_online_cpus();
- + }
- +
- + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
- + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
- + &priv->rx_queue_attr[i]);
- + if (err) {
- + dev_err(dev, "dpseci_get_rx_queue() failed\n");
- + goto err_get_rx_queue;
- + }
- + }
- +
- + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
- + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
- + &priv->tx_queue_attr[i]);
- + if (err) {
- + dev_err(dev, "dpseci_get_tx_queue() failed\n");
- + goto err_get_rx_queue;
- + }
- + }
- +
- + i = 0;
- + for_each_online_cpu(cpu) {
- + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i,
- + priv->rx_queue_attr[i].fqid,
- + priv->tx_queue_attr[i].fqid);
- +
- + ppriv = per_cpu_ptr(priv->ppriv, cpu);
- + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
- + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
- + ppriv->prio = i;
- +
- + ppriv->net_dev.dev = *dev;
- + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
- + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
- + DPAA2_CAAM_NAPI_WEIGHT);
- + if (++i == priv->num_pairs)
- + break;
- + }
- +
- + return 0;
- +
- +err_get_rx_queue:
- + dpaa2_dpseci_congestion_free(priv);
- +err_get_vers:
- + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
- +err_open:
- + return err;
- +}
- +
- +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
- +{
- + struct device *dev = priv->dev;
- + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + int err, i;
- +
- + for (i = 0; i < priv->num_pairs; i++) {
- + ppriv = per_cpu_ptr(priv->ppriv, i);
- + napi_enable(&ppriv->napi);
- + }
- +
- + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
- + if (err) {
- + dev_err(dev, "dpseci_enable() failed\n");
- + return err;
- + }
- +
- + dev_info(dev, "DPSECI version %d.%d\n",
- + priv->major_ver,
- + priv->minor_ver);
- +
- + return 0;
- +}
- +
- +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
- +{
- + struct device *dev = priv->dev;
- + struct dpaa2_caam_priv_per_cpu *ppriv;
- + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- + int i, err = 0, enabled;
- +
- + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
- + if (err) {
- + dev_err(dev, "dpseci_disable() failed\n");
- + return err;
- + }
- +
- + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
- + if (err) {
- + dev_err(dev, "dpseci_is_enabled() failed\n");
- + return err;
- + }
- +
- + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
- +
- + for (i = 0; i < priv->num_pairs; i++) {
- + ppriv = per_cpu_ptr(priv->ppriv, i);
- + napi_disable(&ppriv->napi);
- + netif_napi_del(&ppriv->napi);
- + }
- +
- + return 0;
- +}
- +
- +static struct list_head hash_list;
- +
- +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
- +{
- + struct device *dev;
- + struct dpaa2_caam_priv *priv;
- + int i, err = 0;
- + bool registered = false;
- +
- + /*
- + * There is no way to get CAAM endianness - there is no direct register
- + * space access and MC f/w does not provide this attribute.
- + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
- + * property.
- + */
- + caam_little_end = true;
- +
- + caam_imx = false;
- +
- + dev = &dpseci_dev->dev;
- +
- + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- + if (!priv)
- + return -ENOMEM;
- +
- + dev_set_drvdata(dev, priv);
- +
- + priv->domain = iommu_get_domain_for_dev(dev);
- +
- + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
- + 0, SLAB_CACHE_DMA, NULL);
- + if (!qi_cache) {
- + dev_err(dev, "Can't allocate SEC cache\n");
- + err = -ENOMEM;
- + goto err_qicache;
- + }
- +
- + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
- + if (err) {
- + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
- + goto err_dma_mask;
- + }
- +
- + /* Obtain a MC portal */
- + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
- + if (err) {
- + if (err == -ENXIO)
- + err = -EPROBE_DEFER;
- + else
- + dev_err(dev, "MC portal allocation failed\n");
- +
- + goto err_dma_mask;
- + }
- +
- + priv->ppriv = alloc_percpu(*priv->ppriv);
- + if (!priv->ppriv) {
- + dev_err(dev, "alloc_percpu() failed\n");
- + goto err_alloc_ppriv;
- + }
- +
- + /* DPSECI initialization */
- + err = dpaa2_dpseci_setup(dpseci_dev);
- + if (err < 0) {
- + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
- + goto err_dpseci_setup;
- + }
- +
- + /* DPIO */
- + err = dpaa2_dpseci_dpio_setup(priv);
- + if (err) {
- + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
- + goto err_dpio_setup;
- + }
- +
- + /* DPSECI binding to DPIO */
- + err = dpaa2_dpseci_bind(priv);
- + if (err) {
- + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
- + goto err_bind;
- + }
- +
- + /* DPSECI enable */
- + err = dpaa2_dpseci_enable(priv);
- + if (err) {
- + dev_err(dev, "dpaa2_dpseci_enable() failed");
- + goto err_bind;
- + }
- +
- + /* register crypto algorithms the device supports */
- + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- + struct caam_skcipher_alg *t_alg = driver_algs + i;
- + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
- +
- + /* Skip DES algorithms if not supported by device */
- + if (!priv->sec_attr.des_acc_num &&
- + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
- + (alg_sel == OP_ALG_ALGSEL_DES)))
- + continue;
- +
- + /* Skip AES algorithms if not supported by device */
- + if (!priv->sec_attr.aes_acc_num &&
- + (alg_sel == OP_ALG_ALGSEL_AES))
- + continue;
- +
- + t_alg->caam.dev = dev;
- + caam_skcipher_alg_init(t_alg);
- +
- + err = crypto_register_skcipher(&t_alg->skcipher);
- + if (err) {
- + dev_warn(dev, "%s alg registration failed: %d\n",
- + t_alg->skcipher.base.cra_driver_name, err);
- + continue;
- + }
- +
- + t_alg->registered = true;
- + registered = true;
- + }
- +
- + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
- + struct caam_aead_alg *t_alg = driver_aeads + i;
- + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
- + OP_ALG_ALGSEL_MASK;
- + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
- + OP_ALG_ALGSEL_MASK;
- +
- + /* Skip DES algorithms if not supported by device */
- + if (!priv->sec_attr.des_acc_num &&
- + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
- + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
- + continue;
- +
- + /* Skip AES algorithms if not supported by device */
- + if (!priv->sec_attr.aes_acc_num &&
- + (c1_alg_sel == OP_ALG_ALGSEL_AES))
- + continue;
- +
- + /*
- + * Skip algorithms requiring message digests
- + * if MD not supported by device.
- + */
- + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
- + continue;
- +
- + t_alg->caam.dev = dev;
- + caam_aead_alg_init(t_alg);
- +
- + err = crypto_register_aead(&t_alg->aead);
- + if (err) {
- + dev_warn(dev, "%s alg registration failed: %d\n",
- + t_alg->aead.base.cra_driver_name, err);
- + continue;
- + }
- +
- + t_alg->registered = true;
- + registered = true;
- + }
- + if (registered)
- + dev_info(dev, "algorithms registered in /proc/crypto\n");
- +
- + /* register hash algorithms the device supports */
- + INIT_LIST_HEAD(&hash_list);
- +
- + /*
- + * Skip registration of any hashing algorithms if MD block
- + * is not present.
- + */
- + if (!priv->sec_attr.md_acc_num)
- + return 0;
- +
- + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
- + struct caam_hash_alg *t_alg;
- + struct caam_hash_template *alg = driver_hash + i;
- +
- + /* register hmac version */
- + t_alg = caam_hash_alloc(dev, alg, true);
- + if (IS_ERR(t_alg)) {
- + err = PTR_ERR(t_alg);
- + dev_warn(dev, "%s hash alg allocation failed: %d\n",
- + alg->driver_name, err);
- + continue;
- + }
- +
- + err = crypto_register_ahash(&t_alg->ahash_alg);
- + if (err) {
- + dev_warn(dev, "%s alg registration failed: %d\n",
- + t_alg->ahash_alg.halg.base.cra_driver_name,
- + err);
- + kfree(t_alg);
- + } else {
- + list_add_tail(&t_alg->entry, &hash_list);
- + }
- +
- + /* register unkeyed version */
- + t_alg = caam_hash_alloc(dev, alg, false);
- + if (IS_ERR(t_alg)) {
- + err = PTR_ERR(t_alg);
- + dev_warn(dev, "%s alg allocation failed: %d\n",
- + alg->driver_name, err);
- + continue;
- + }
- +
- + err = crypto_register_ahash(&t_alg->ahash_alg);
- + if (err) {
- + dev_warn(dev, "%s alg registration failed: %d\n",
- + t_alg->ahash_alg.halg.base.cra_driver_name,
- + err);
- + kfree(t_alg);
- + } else {
- + list_add_tail(&t_alg->entry, &hash_list);
- + }
- + }
- + if (!list_empty(&hash_list))
- + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
- +
- + return err;
- +
- +err_bind:
- + dpaa2_dpseci_dpio_free(priv);
- +err_dpio_setup:
- + dpaa2_dpseci_free(priv);
- +err_dpseci_setup:
- + free_percpu(priv->ppriv);
- +err_alloc_ppriv:
- + fsl_mc_portal_free(priv->mc_io);
- +err_dma_mask:
- + kmem_cache_destroy(qi_cache);
- +err_qicache:
- + dev_set_drvdata(dev, NULL);
- +
- + return err;
- +}
- +
- +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
- +{
- + struct device *dev;
- + struct dpaa2_caam_priv *priv;
- + int i;
- +
- + dev = &ls_dev->dev;
- + priv = dev_get_drvdata(dev);
- +
- + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
- + struct caam_aead_alg *t_alg = driver_aeads + i;
- +
- + if (t_alg->registered)
- + crypto_unregister_aead(&t_alg->aead);
- + }
- +
- + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- + struct caam_skcipher_alg *t_alg = driver_algs + i;
- +
- + if (t_alg->registered)
- + crypto_unregister_skcipher(&t_alg->skcipher);
- + }
- +
- + if (hash_list.next) {
- + struct caam_hash_alg *t_hash_alg, *p;
- +
- + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
- + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
- + list_del(&t_hash_alg->entry);
- + kfree(t_hash_alg);
- + }
- + }
- +
- + dpaa2_dpseci_disable(priv);
- + dpaa2_dpseci_dpio_free(priv);
- + dpaa2_dpseci_free(priv);
- + free_percpu(priv->ppriv);
- + fsl_mc_portal_free(priv->mc_io);
- + dev_set_drvdata(dev, NULL);
- + kmem_cache_destroy(qi_cache);
- +
- + return 0;
- +}
- +
- +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
- +{
- + struct dpaa2_fd fd;
- + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
- + int err = 0, i, id;
- +
- + if (IS_ERR(req))
- + return PTR_ERR(req);
- +
- + if (priv->cscn_mem) {
- + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
- + DPAA2_CSCN_SIZE,
- + DMA_FROM_DEVICE);
- + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
- + dev_dbg_ratelimited(dev, "Dropping request\n");
- + return -EBUSY;
- + }
- + }
- +
- + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
- +
- + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
- + DMA_BIDIRECTIONAL);
- + if (dma_mapping_error(dev, req->fd_flt_dma)) {
- + dev_err(dev, "DMA mapping error for QI enqueue request\n");
- + goto err_out;
- + }
- +
- + memset(&fd, 0, sizeof(fd));
- + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
- + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
- + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
- + dpaa2_fd_set_flc(&fd, req->flc_dma);
- +
- + /*
- + * There is no guarantee that preemption is disabled here,
- + * thus take action.
- + */
- + preempt_disable();
- + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
- + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
- + err = dpaa2_io_service_enqueue_fq(NULL,
- + priv->tx_queue_attr[id].fqid,
- + &fd);
- + if (err != -EBUSY)
- + break;
- + }
- + preempt_enable();
- +
- + if (unlikely(err < 0)) {
- + dev_err(dev, "Error enqueuing frame: %d\n", err);
- + goto err_out;
- + }
- +
- + return -EINPROGRESS;
- +
- +err_out:
- + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
- + DMA_BIDIRECTIONAL);
- + return -EIO;
- +}
- +EXPORT_SYMBOL(dpaa2_caam_enqueue);
- +
- +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
- + {
- + .vendor = FSL_MC_VENDOR_FREESCALE,
- + .obj_type = "dpseci",
- + },
- + { .vendor = 0x0 }
- +};
- +
- +static struct fsl_mc_driver dpaa2_caam_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME,
- + .owner = THIS_MODULE,
- + },
- + .probe = dpaa2_caam_probe,
- + .remove = dpaa2_caam_remove,
- + .match_id_table = dpaa2_caam_match_id_table
- +};
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +MODULE_AUTHOR("Freescale Semiconductor, Inc");
- +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
- +
- +module_fsl_mc_driver(dpaa2_caam_driver);
- --- /dev/null
- +++ b/drivers/crypto/caam/caamalg_qi2.h
- @@ -0,0 +1,274 @@
- +/*
- + * Copyright 2015-2016 Freescale Semiconductor Inc.
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef _CAAMALG_QI2_H_
- +#define _CAAMALG_QI2_H_
- +
- +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
- +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
- +#include <linux/threads.h>
- +#include "dpseci.h"
- +#include "desc_constr.h"
- +
- +#define DPAA2_CAAM_STORE_SIZE 16
- +/* NAPI weight *must* be a multiple of the store size. */
- +#define DPAA2_CAAM_NAPI_WEIGHT 64
- +
- +/* The congestion entrance threshold was chosen so that on LS2088
- + * we support the maximum throughput for the available memory
- + */
- +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
- +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
- +
- +/**
- + * dpaa2_caam_priv - driver private data
- + * @dpseci_id: DPSECI object unique ID
- + * @major_ver: DPSECI major version
- + * @minor_ver: DPSECI minor version
- + * @dpseci_attr: DPSECI attributes
- + * @sec_attr: SEC engine attributes
- + * @rx_queue_attr: array of Rx queue attributes
- + * @tx_queue_attr: array of Tx queue attributes
- + * @cscn_mem: pointer to memory region containing the
- + * dpaa2_cscn struct; it's size is larger than
- + * sizeof(struct dpaa2_cscn) to accommodate alignment
- + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
- + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
- + * @cscn_dma: dma address used by the QMAN to write CSCN messages
- + * @dev: device associated with the DPSECI object
- + * @mc_io: pointer to MC portal's I/O object
- + * @domain: IOMMU domain
- + * @ppriv: per CPU pointers to privata data
- + */
- +struct dpaa2_caam_priv {
- + int dpsec_id;
- +
- + u16 major_ver;
- + u16 minor_ver;
- +
- + struct dpseci_attr dpseci_attr;
- + struct dpseci_sec_attr sec_attr;
- + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
- + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
- + int num_pairs;
- +
- + /* congestion */
- + void *cscn_mem;
- + void *cscn_mem_aligned;
- + dma_addr_t cscn_dma;
- +
- + struct device *dev;
- + struct fsl_mc_io *mc_io;
- + struct iommu_domain *domain;
- +
- + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
- +};
- +
- +/**
- + * dpaa2_caam_priv_per_cpu - per CPU private data
- + * @napi: napi structure
- + * @net_dev: netdev used by napi
- + * @req_fqid: (virtual) request (Tx / enqueue) FQID
- + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
- + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
- + * @nctx: notification context of response FQ
- + * @store: where dequeued frames are stored
- + * @priv: backpointer to dpaa2_caam_priv
- + */
- +struct dpaa2_caam_priv_per_cpu {
- + struct napi_struct napi;
- + struct net_device net_dev;
- + int req_fqid;
- + int rsp_fqid;
- + int prio;
- + struct dpaa2_io_notification_ctx nctx;
- + struct dpaa2_io_store *store;
- + struct dpaa2_caam_priv *priv;
- +};
- +
- +/*
- + * The CAAM QI hardware constructs a job descriptor which points
- + * to shared descriptor (as pointed by context_a of FQ to CAAM).
- + * When the job descriptor is executed by deco, the whole job
- + * descriptor together with shared descriptor gets loaded in
- + * deco buffer which is 64 words long (each 32-bit).
- + *
- + * The job descriptor constructed by QI hardware has layout:
- + *
- + * HEADER (1 word)
- + * Shdesc ptr (1 or 2 words)
- + * SEQ_OUT_PTR (1 word)
- + * Out ptr (1 or 2 words)
- + * Out length (1 word)
- + * SEQ_IN_PTR (1 word)
- + * In ptr (1 or 2 words)
- + * In length (1 word)
- + *
- + * The shdesc ptr is used to fetch shared descriptor contents
- + * into deco buffer.
- + *
- + * Apart from shdesc contents, the total number of words that
- + * get loaded in deco buffer are '8' or '11'. The remaining words
- + * in deco buffer can be used for storing shared descriptor.
- + */
- +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
- +
- +/* Length of a single buffer in the QI driver memory cache */
- +#define CAAM_QI_MEMCACHE_SIZE 512
- +
- +/*
- + * aead_edesc - s/w-extended aead descriptor
- + * @src_nents: number of segments in input scatterlist
- + * @dst_nents: number of segments in output scatterlist
- + * @iv_dma: dma address of iv for checking continuity and link table
- + * @qm_sg_bytes: length of dma mapped h/w link table
- + * @qm_sg_dma: bus physical mapped address of h/w link table
- + * @assoclen: associated data length, in CAAM endianness
- + * @assoclen_dma: bus physical mapped address of req->assoclen
- + * @sgt: the h/w link table, followed by IV
- + */
- +struct aead_edesc {
- + int src_nents;
- + int dst_nents;
- + dma_addr_t iv_dma;
- + int qm_sg_bytes;
- + dma_addr_t qm_sg_dma;
- + unsigned int assoclen;
- + dma_addr_t assoclen_dma;
- + struct dpaa2_sg_entry sgt[0];
- +};
- +
- +/*
- + * tls_edesc - s/w-extended tls descriptor
- + * @src_nents: number of segments in input scatterlist
- + * @dst_nents: number of segments in output scatterlist
- + * @iv_dma: dma address of iv for checking continuity and link table
- + * @qm_sg_bytes: length of dma mapped h/w link table
- + * @qm_sg_dma: bus physical mapped address of h/w link table
- + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
- + * @dst: pointer to output scatterlist, usefull for unmapping
- + * @sgt: the h/w link table, followed by IV
- + */
- +struct tls_edesc {
- + int src_nents;
- + int dst_nents;
- + dma_addr_t iv_dma;
- + int qm_sg_bytes;
- + dma_addr_t qm_sg_dma;
- + struct scatterlist tmp[2];
- + struct scatterlist *dst;
- + struct dpaa2_sg_entry sgt[0];
- +};
- +
- +/*
- + * skcipher_edesc - s/w-extended skcipher descriptor
- + * @src_nents: number of segments in input scatterlist
- + * @dst_nents: number of segments in output scatterlist
- + * @iv_dma: dma address of iv for checking continuity and link table
- + * @qm_sg_bytes: length of dma mapped qm_sg space
- + * @qm_sg_dma: I/O virtual address of h/w link table
- + * @sgt: the h/w link table, followed by IV
- + */
- +struct skcipher_edesc {
- + int src_nents;
- + int dst_nents;
- + dma_addr_t iv_dma;
- + int qm_sg_bytes;
- + dma_addr_t qm_sg_dma;
- + struct dpaa2_sg_entry sgt[0];
- +};
- +
- +/*
- + * ahash_edesc - s/w-extended ahash descriptor
- + * @dst_dma: I/O virtual address of req->result
- + * @qm_sg_dma: I/O virtual address of h/w link table
- + * @src_nents: number of segments in input scatterlist
- + * @qm_sg_bytes: length of dma mapped qm_sg space
- + * @sgt: pointer to h/w link table
- + */
- +struct ahash_edesc {
- + dma_addr_t dst_dma;
- + dma_addr_t qm_sg_dma;
- + int src_nents;
- + int qm_sg_bytes;
- + struct dpaa2_sg_entry sgt[0];
- +};
- +
- +/**
- + * caam_flc - Flow Context (FLC)
- + * @flc: Flow Context options
- + * @sh_desc: Shared Descriptor
- + */
- +struct caam_flc {
- + u32 flc[16];
- + u32 sh_desc[MAX_SDLEN];
- +} ____cacheline_aligned;
- +
- +enum optype {
- + ENCRYPT = 0,
- + DECRYPT,
- + NUM_OP
- +};
- +
- +/**
- + * caam_request - the request structure the driver application should fill while
- + * submitting a job to driver.
- + * @fd_flt: Frame list table defining input and output
- + * fd_flt[0] - FLE pointing to output buffer
- + * fd_flt[1] - FLE pointing to input buffer
- + * @fd_flt_dma: DMA address for the frame list table
- + * @flc: Flow Context
- + * @flc_dma: I/O virtual address of Flow Context
- + * @cbk: Callback function to invoke when job is completed
- + * @ctx: arbit context attached with request by the application
- + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
- + */
- +struct caam_request {
- + struct dpaa2_fl_entry fd_flt[2];
- + dma_addr_t fd_flt_dma;
- + struct caam_flc *flc;
- + dma_addr_t flc_dma;
- + void (*cbk)(void *ctx, u32 err);
- + void *ctx;
- + void *edesc;
- +};
- +
- +/**
- + * dpaa2_caam_enqueue() - enqueue a crypto request
- + * @dev: device associated with the DPSECI object
- + * @req: pointer to caam_request
- + */
- +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
- +
- +#endif /* _CAAMALG_QI2_H_ */
- --- a/drivers/crypto/caam/caamhash.c
- +++ b/drivers/crypto/caam/caamhash.c
- @@ -62,6 +62,7 @@
- #include "error.h"
- #include "sg_sw_sec4.h"
- #include "key_gen.h"
- +#include "caamhash_desc.h"
-
- #define CAAM_CRA_PRIORITY 3000
-
- @@ -71,14 +72,6 @@
- #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
- #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-
- -/* length of descriptors text */
- -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
- -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
- -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
- -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
- -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- -
- #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
- CAAM_MAX_HASH_KEY_SIZE)
- #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
- @@ -107,6 +100,7 @@ struct caam_hash_ctx {
- dma_addr_t sh_desc_update_first_dma;
- dma_addr_t sh_desc_fin_dma;
- dma_addr_t sh_desc_digest_dma;
- + enum dma_data_direction dir;
- struct device *jrdev;
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
- int ctx_len;
- @@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str
- }
-
- /* Map state->caam_ctx, and add it to link table */
- -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
- +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
- struct caam_hash_state *state, int ctx_len,
- struct sec4_sg_entry *sec4_sg, u32 flag)
- {
- @@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
- return 0;
- }
-
- -/*
- - * For ahash update, final and finup (import_ctx = true)
- - * import context, read and write to seqout
- - * For ahash firsts and digest (import_ctx = false)
- - * read and write to seqout
- - */
- -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- - struct caam_hash_ctx *ctx, bool import_ctx)
- -{
- - u32 op = ctx->adata.algtype;
- - u32 *skip_key_load;
- -
- - init_sh_desc(desc, HDR_SHARE_SERIAL);
- -
- - /* Append key if it has been set; ahash update excluded */
- - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- - /* Skip key loading if already shared */
- - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- - JUMP_COND_SHRD);
- -
- - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- - ctx->adata.keylen, CLASS_2 |
- - KEY_DEST_MDHA_SPLIT | KEY_ENC);
- -
- - set_jump_tgt_here(desc, skip_key_load);
- -
- - op |= OP_ALG_AAI_HMAC_PRECOMP;
- - }
- -
- - /* If needed, import context from software */
- - if (import_ctx)
- - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- - LDST_SRCDST_BYTE_CONTEXT);
- -
- - /* Class 2 operation */
- - append_operation(desc, op | state | OP_ALG_ENCRYPT);
- -
- - /*
- - * Load from buf and/or src and write to req->result or state->context
- - * Calculate remaining bytes to read
- - */
- - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- - /* Read remaining bytes */
- - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- - FIFOLD_TYPE_MSG | KEY_VLF);
- - /* Store class2 context bytes */
- - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- - LDST_SRCDST_BYTE_CONTEXT);
- -}
- -
- static int ahash_set_sh_desc(struct crypto_ahash *ahash)
- {
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int digestsize = crypto_ahash_digestsize(ahash);
- struct device *jrdev = ctx->jrdev;
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 *desc;
-
- + ctx->adata.key_virt = ctx->key;
- +
- /* ahash_update shared descriptor */
- desc = ctx->sh_desc_update;
- - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
- + ctx->ctx_len, true, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update shdesc@"__stringify(__LINE__)": ",
- @@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_update_first shared descriptor */
- desc = ctx->sh_desc_update_first;
- - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
- + ctx->ctx_len, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update first shdesc@"__stringify(__LINE__)": ",
- @@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_final shared descriptor */
- desc = ctx->sh_desc_fin;
- - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
- + ctx->ctx_len, true, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- @@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_digest shared descriptor */
- desc = ctx->sh_desc_digest;
- - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
- + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
- + ctx->ctx_len, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- - desc_bytes(desc), DMA_TO_DEVICE);
- + desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash digest shdesc@"__stringify(__LINE__)": ",
- @@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
- int digestsize = crypto_ahash_digestsize(ahash);
- + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
- int ret;
- u8 *hashed_key = NULL;
-
- @@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah
- key = hashed_key;
- }
-
- - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- - CAAM_MAX_HASH_KEY_SIZE);
- - if (ret)
- - goto bad_free_key;
- + /*
- + * If DKP is supported, use it in the shared descriptor to generate
- + * the split key.
- + */
- + if (ctrlpriv->era >= 6) {
- + ctx->adata.key_inline = true;
- + ctx->adata.keylen = keylen;
- + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
- + OP_ALG_ALGSEL_MASK);
-
- -#ifdef DEBUG
- - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- - ctx->adata.keylen_pad, 1);
- -#endif
- + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
- + goto bad_free_key;
- +
- + memcpy(ctx->key, key, keylen);
- + } else {
- + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
- + keylen, CAAM_MAX_HASH_KEY_SIZE);
- + if (ret)
- + goto bad_free_key;
- + }
-
- kfree(hashed_key);
- return ahash_set_sh_desc(ahash);
- @@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash
- edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
-
- - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
- + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_BIDIRECTIONAL);
- if (ret)
- goto unmap_ctx;
- @@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_
- desc = edesc->hw_desc;
-
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- - edesc->src_nents = 0;
-
- - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
- + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
- if (ret)
- goto unmap_ctx;
- @@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_
-
- edesc->src_nents = src_nents;
-
- - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
- + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
- if (ret)
- goto unmap_ctx;
- @@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha
- dev_err(jrdev, "unable to map dst\n");
- goto unmap;
- }
- - edesc->src_nents = 0;
-
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- @@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah
-
- edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- - edesc->dst_dma = 0;
-
- ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
- if (ret)
- @@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha
- }
-
- edesc->src_nents = src_nents;
- - edesc->dst_dma = 0;
-
- ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
- to_hash);
- @@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry
- HASH_MSG_LEN + 64,
- HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- dma_addr_t dma_addr;
- + struct caam_drv_private *priv;
-
- /*
- * Get a Job ring from Job Ring driver to ensure in-order
- @@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry
- return PTR_ERR(ctx->jrdev);
- }
-
- + priv = dev_get_drvdata(ctx->jrdev->parent);
- + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
- +
- dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
- - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(ctx->jrdev, dma_addr)) {
- dev_err(ctx->jrdev, "unable to map shared descriptors\n");
- caam_jr_free(ctx->jrdev);
- @@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr
- dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
- - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- caam_jr_free(ctx->jrdev);
- }
-
- --- /dev/null
- +++ b/drivers/crypto/caam/caamhash_desc.c
- @@ -0,0 +1,108 @@
- +/*
- + * Shared descriptors for ahash algorithms
- + *
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include "compat.h"
- +#include "desc_constr.h"
- +#include "caamhash_desc.h"
- +
- +/**
- + * cnstr_shdsc_ahash - ahash shared descriptor
- + * @desc: pointer to buffer used for descriptor construction
- + * @adata: pointer to authentication transform definitions.
- + * A split key is required for SEC Era < 6; the size of the split key
- + * is specified in this case.
- + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
- + * SHA256, SHA384, SHA512}.
- + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
- + * @digestsize: algorithm's digest size
- + * @ctx_len: size of Context Register
- + * @import_ctx: true if previous Context Register needs to be restored
- + * must be true for ahash update and final
- + * must be false for for ahash first and digest
- + * @era: SEC Era
- + */
- +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
- + int digestsize, int ctx_len, bool import_ctx, int era)
- +{
- + u32 op = adata->algtype;
- +
- + init_sh_desc(desc, HDR_SHARE_SERIAL);
- +
- + /* Append key if it has been set; ahash update excluded */
- + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
- + u32 *skip_key_load;
- +
- + /* Skip key loading if already shared */
- + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- + JUMP_COND_SHRD);
- +
- + if (era < 6)
- + append_key_as_imm(desc, adata->key_virt,
- + adata->keylen_pad,
- + adata->keylen, CLASS_2 |
- + KEY_DEST_MDHA_SPLIT | KEY_ENC);
- + else
- + append_proto_dkp(desc, adata);
- +
- + set_jump_tgt_here(desc, skip_key_load);
- +
- + op |= OP_ALG_AAI_HMAC_PRECOMP;
- + }
- +
- + /* If needed, import context from software */
- + if (import_ctx)
- + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
- + LDST_SRCDST_BYTE_CONTEXT);
- +
- + /* Class 2 operation */
- + append_operation(desc, op | state | OP_ALG_ENCRYPT);
- +
- + /*
- + * Load from buf and/or src and write to req->result or state->context
- + * Calculate remaining bytes to read
- + */
- + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- + /* Read remaining bytes */
- + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- + FIFOLD_TYPE_MSG | KEY_VLF);
- + /* Store class2 context bytes */
- + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- + LDST_SRCDST_BYTE_CONTEXT);
- +}
- +EXPORT_SYMBOL(cnstr_shdsc_ahash);
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
- +MODULE_AUTHOR("NXP Semiconductors");
- --- /dev/null
- +++ b/drivers/crypto/caam/caamhash_desc.h
- @@ -0,0 +1,49 @@
- +/*
- + * Shared descriptors for ahash algorithms
- + *
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef _CAAMHASH_DESC_H_
- +#define _CAAMHASH_DESC_H_
- +
- +/* length of descriptors text */
- +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
- +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
- +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
- +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- +
- +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
- + int digestsize, int ctx_len, bool import_ctx, int era);
- +
- +#endif /* _CAAMHASH_DESC_H_ */
- --- a/drivers/crypto/caam/compat.h
- +++ b/drivers/crypto/caam/compat.h
- @@ -17,6 +17,7 @@
- #include <linux/of_platform.h>
- #include <linux/dma-mapping.h>
- #include <linux/io.h>
- +#include <linux/iommu.h>
- #include <linux/spinlock.h>
- #include <linux/rtnetlink.h>
- #include <linux/in.h>
- @@ -38,6 +39,7 @@
- #include <crypto/authenc.h>
- #include <crypto/akcipher.h>
- #include <crypto/scatterwalk.h>
- +#include <crypto/skcipher.h>
- #include <crypto/internal/skcipher.h>
- #include <crypto/internal/hash.h>
- #include <crypto/internal/rsa.h>
- --- a/drivers/crypto/caam/ctrl.c
- +++ b/drivers/crypto/caam/ctrl.c
- @@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx);
- #include "qi.h"
- #endif
-
- +static struct platform_device *caam_dma_dev;
- +
- /*
- * i.MX targets tend to have clock control subsystems that can
- * enable/disable clocking to our device.
- @@ -332,6 +334,9 @@ static int caam_remove(struct platform_d
- debugfs_remove_recursive(ctrlpriv->dfs_root);
- #endif
-
- + if (caam_dma_dev)
- + platform_device_unregister(caam_dma_dev);
- +
- /* Unmap controller region */
- iounmap(ctrl);
-
- @@ -433,6 +438,10 @@ static int caam_probe(struct platform_de
- {.family = "Freescale i.MX"},
- {},
- };
- + static struct platform_device_info caam_dma_pdev_info = {
- + .name = "caam-dma",
- + .id = PLATFORM_DEVID_NONE
- + };
- struct device *dev;
- struct device_node *nprop, *np;
- struct caam_ctrl __iomem *ctrl;
- @@ -615,6 +624,8 @@ static int caam_probe(struct platform_de
- goto iounmap_ctrl;
- }
-
- + ctrlpriv->era = caam_get_era();
- +
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- @@ -671,6 +682,16 @@ static int caam_probe(struct platform_de
- goto caam_remove;
- }
-
- + caam_dma_pdev_info.parent = dev;
- + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
- + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
- + if (IS_ERR(caam_dma_dev)) {
- + dev_err(dev, "Unable to create and register caam-dma dev\n");
- + caam_dma_dev = 0;
- + } else {
- + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
- + }
- +
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
-
- /*
- @@ -746,7 +767,7 @@ static int caam_probe(struct platform_de
-
- /* Report "alive" for developer to see */
- dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- - caam_get_era());
- + ctrlpriv->era);
- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
- caam_dpaa2 ? "yes" : "no");
- --- a/drivers/crypto/caam/desc.h
- +++ b/drivers/crypto/caam/desc.h
- @@ -42,6 +42,7 @@
- #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
- #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
- #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
- +#define CMD_MOVEB (0x07 << CMD_SHIFT)
- #define CMD_STORE (0x0a << CMD_SHIFT)
- #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
- #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
- @@ -355,6 +356,7 @@
- #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
- #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
- #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
- +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
-
- /* Other types. Need to OR in last/flush bits as desired */
- #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
- @@ -408,6 +410,7 @@
- #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
- +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
-
- /*
- @@ -444,6 +447,18 @@
- #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
- #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
- #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
- +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
-
- /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
- #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
- @@ -1093,6 +1108,22 @@
- /* MacSec protinfos */
- #define OP_PCL_MACSEC 0x0001
-
- +/* Derived Key Protocol (DKP) Protinfo */
- +#define OP_PCL_DKP_SRC_SHIFT 14
- +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
- +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
- +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
- +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
- +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
- +#define OP_PCL_DKP_DST_SHIFT 12
- +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
- +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
- +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
- +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
- +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
- +#define OP_PCL_DKP_KEY_SHIFT 0
- +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
- +
- /* PKI unidirectional protocol protinfo bits */
- #define OP_PCL_PKPROT_TEST 0x0008
- #define OP_PCL_PKPROT_DECRYPT 0x0004
- @@ -1440,10 +1471,11 @@
- #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
- -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
- +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
- #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
- #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
- +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
-
- /* Destination selectors */
- #define MATH_DEST_SHIFT 8
- @@ -1452,6 +1484,7 @@
- #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
- #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
- #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
- +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
- #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
- #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
- #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
- @@ -1624,4 +1657,31 @@
- /* Frame Descriptor Command for Replacement Job Descriptor */
- #define FD_CMD_REPLACE_JOB_DESC 0x20000000
-
- +/* CHA Control Register bits */
- +#define CCTRL_RESET_CHA_ALL 0x1
- +#define CCTRL_RESET_CHA_AESA 0x2
- +#define CCTRL_RESET_CHA_DESA 0x4
- +#define CCTRL_RESET_CHA_AFHA 0x8
- +#define CCTRL_RESET_CHA_KFHA 0x10
- +#define CCTRL_RESET_CHA_SF8A 0x20
- +#define CCTRL_RESET_CHA_PKHA 0x40
- +#define CCTRL_RESET_CHA_MDHA 0x80
- +#define CCTRL_RESET_CHA_CRCA 0x100
- +#define CCTRL_RESET_CHA_RNG 0x200
- +#define CCTRL_RESET_CHA_SF9A 0x400
- +#define CCTRL_RESET_CHA_ZUCE 0x800
- +#define CCTRL_RESET_CHA_ZUCA 0x1000
- +#define CCTRL_UNLOAD_PK_A0 0x10000
- +#define CCTRL_UNLOAD_PK_A1 0x20000
- +#define CCTRL_UNLOAD_PK_A2 0x40000
- +#define CCTRL_UNLOAD_PK_A3 0x80000
- +#define CCTRL_UNLOAD_PK_B0 0x100000
- +#define CCTRL_UNLOAD_PK_B1 0x200000
- +#define CCTRL_UNLOAD_PK_B2 0x400000
- +#define CCTRL_UNLOAD_PK_B3 0x800000
- +#define CCTRL_UNLOAD_PK_N 0x1000000
- +#define CCTRL_UNLOAD_PK_A 0x4000000
- +#define CCTRL_UNLOAD_PK_B 0x8000000
- +#define CCTRL_UNLOAD_SBOX 0x10000000
- +
- #endif /* DESC_H */
- --- a/drivers/crypto/caam/desc_constr.h
- +++ b/drivers/crypto/caam/desc_constr.h
- @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
- append_ptr(desc, ptr);
- }
-
- -static inline void append_data(u32 * const desc, void *data, int len)
- +static inline void append_data(u32 * const desc, const void *data, int len)
- {
- u32 *offset = desc_end(desc);
-
- @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
- append_cmd(desc, len);
- }
-
- -static inline void append_cmd_data(u32 * const desc, void *data, int len,
- +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
- u32 command)
- {
- append_cmd(desc, command | IMMEDIATE | len);
- @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co
- }
- APPEND_CMD_RET(jump, JUMP)
- APPEND_CMD_RET(move, MOVE)
- +APPEND_CMD_RET(moveb, MOVEB)
-
- static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
- {
- @@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
- APPEND_SEQ_PTR_INTLEN(out, OUT)
-
- #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
- -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
- +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
- unsigned int len, u32 options) \
- { \
- PRINT_POS; \
- @@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
- * from length of immediate data provided, e.g., split keys
- */
- #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
- -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
- +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
- unsigned int data_len, \
- unsigned int len, u32 options) \
- { \
- @@ -452,7 +453,7 @@ struct alginfo {
- unsigned int keylen_pad;
- union {
- dma_addr_t key_dma;
- - void *key_virt;
- + const void *key_virt;
- };
- bool key_inline;
- };
- @@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi
- return (rem_bytes >= 0) ? 0 : -1;
- }
-
- +/**
- + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
- + * @desc: pointer to buffer used for descriptor construction
- + * @adata: pointer to authentication transform definitions.
- + * keylen should be the length of initial key, while keylen_pad
- + * the length of the derived (split) key.
- + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
- + * SHA256, SHA384, SHA512}.
- + */
- +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
- +{
- + u32 protid;
- +
- + /*
- + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
- + * to OP_PCLID_DKP_{MD5, SHA*}
- + */
- + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
- + (0x20 << OP_ALG_ALGSEL_SHIFT);
- +
- + if (adata->key_inline) {
- + int words;
- +
- + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
- + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
- + adata->keylen);
- + append_data(desc, adata->key_virt, adata->keylen);
- +
- + /* Reserve space in descriptor buffer for the derived key */
- + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
- + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
- + if (words)
- + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
- + } else {
- + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
- + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
- + adata->keylen);
- + append_ptr(desc, adata->key_dma);
- + }
- +}
- +
- #endif /* DESC_CONSTR_H */
- --- /dev/null
- +++ b/drivers/crypto/caam/dpseci.c
- @@ -0,0 +1,865 @@
- +/*
- + * Copyright 2013-2016 Freescale Semiconductor Inc.
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/fsl/mc.h>
- +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
- +#include "dpseci.h"
- +#include "dpseci_cmd.h"
- +
- +/**
- + * dpseci_open() - Open a control session for the specified object
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @dpseci_id: DPSECI unique ID
- + * @token: Returned token; use in subsequent API calls
- + *
- + * This function can be used to open a control session for an already created
- + * object; an object may have been declared in the DPL or by calling the
- + * dpseci_create() function.
- + * This function returns a unique authentication token, associated with the
- + * specific object ID and the specific MC portal; this token must be used in all
- + * subsequent commands for this specific object.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
- + u16 *token)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_open *cmd_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
- + cmd_flags,
- + 0);
- + cmd_params = (struct dpseci_cmd_open *)cmd.params;
- + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + *token = mc_cmd_hdr_read_token(&cmd);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_close() - Close the control session of the object
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + *
- + * After this function is called, no further operations are allowed on the
- + * object without opening a new control session.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
- +{
- + struct fsl_mc_command cmd = { 0 };
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
- + cmd_flags,
- + token);
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_create() - Create the DPSECI object
- + * @mc_io: Pointer to MC portal's I/O object
- + * @dprc_token: Parent container token; '0' for default container
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @cfg: Configuration structure
- + * @obj_id: returned object id
- + *
- + * Create the DPSECI object, allocate required resources and perform required
- + * initialization.
- + *
- + * The object can be created either by declaring it in the DPL file, or by
- + * calling this function.
- + *
- + * The function accepts an authentication token of a parent container that this
- + * object should be assigned to. The token can be '0' so the object will be
- + * assigned to the default container.
- + * The newly created object can be opened with the returned object id and using
- + * the container's associated tokens and MC portals.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
- + const struct dpseci_cfg *cfg, u32 *obj_id)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_create *cmd_params;
- + int i, err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
- + cmd_flags,
- + dprc_token);
- + cmd_params = (struct dpseci_cmd_create *)cmd.params;
- + for (i = 0; i < 8; i++)
- + cmd_params->priorities[i] = cfg->priorities[i];
- + for (i = 0; i < 8; i++)
- + cmd_params->priorities2[i] = cfg->priorities[8 + i];
- + cmd_params->num_tx_queues = cfg->num_tx_queues;
- + cmd_params->num_rx_queues = cfg->num_rx_queues;
- + cmd_params->options = cpu_to_le32(cfg->options);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + *obj_id = mc_cmd_read_object_id(&cmd);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
- + * @mc_io: Pointer to MC portal's I/O object
- + * @dprc_token: Parent container token; '0' for default container
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @object_id: The object id; it must be a valid id within the container that
- + * created this object
- + *
- + * The function accepts the authentication token of the parent container that
- + * created the object (not the one that currently owns the object). The object
- + * is searched within parent using the provided 'object_id'.
- + * All tokens to the object must be closed before calling destroy.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
- + u32 object_id)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_destroy *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
- + cmd_flags,
- + dprc_token);
- + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
- + cmd_params->object_id = cpu_to_le32(object_id);
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
- +{
- + struct fsl_mc_command cmd = { 0 };
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
- + cmd_flags,
- + token);
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
- +{
- + struct fsl_mc_command cmd = { 0 };
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
- + cmd_flags,
- + token);
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_is_enabled() - Check if the DPSECI is enabled.
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @en: Returns '1' if object is enabled; '0' otherwise
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + int *en)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_rsp_is_enabled *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
- + cmd_flags,
- + token);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
- + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
- +{
- + struct fsl_mc_command cmd = { 0 };
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
- + cmd_flags,
- + token);
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_irq_enable() - Get overall interrupt state
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @en: Returned Interrupt state - enable = 1, disable = 0
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u8 *en)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_enable *cmd_params;
- + struct dpseci_rsp_get_irq_enable *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
- + cmd_params->irq_index = irq_index;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
- + *en = rsp_params->enable_state;
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_set_irq_enable() - Set overall interrupt state.
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @en: Interrupt state - enable = 1, disable = 0
- + *
- + * Allows GPP software to control when interrupts are generated.
- + * Each interrupt can have up to 32 causes. The enable/disable control's the
- + * overall interrupt state. If the interrupt is disabled no causes will cause
- + * an interrupt.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u8 en)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_enable *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
- + cmd_params->irq_index = irq_index;
- + cmd_params->enable_state = en;
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_irq_mask() - Get interrupt mask.
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @mask: Returned event mask to trigger interrupt
- + *
- + * Every interrupt can have up to 32 causes and the interrupt model supports
- + * masking/unmasking each cause independently.
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 *mask)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_mask *cmd_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
- + cmd_params->irq_index = irq_index;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + *mask = le32_to_cpu(cmd_params->mask);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_set_irq_mask() - Set interrupt mask.
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @mask: event mask to trigger interrupt;
- + * each bit:
- + * 0 = ignore event
- + * 1 = consider event for asserting IRQ
- + *
- + * Every interrupt can have up to 32 causes and the interrupt model supports
- + * masking/unmasking each cause independently
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 mask)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_mask *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
- + cmd_params->mask = cpu_to_le32(mask);
- + cmd_params->irq_index = irq_index;
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_irq_status() - Get the current status of any pending interrupts
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @status: Returned interrupts status - one bit per cause:
- + * 0 = no interrupt pending
- + * 1 = interrupt pending
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 *status)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_status *cmd_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
- + cmd_params->status = cpu_to_le32(*status);
- + cmd_params->irq_index = irq_index;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + *status = le32_to_cpu(cmd_params->status);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_clear_irq_status() - Clear a pending interrupt's status
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @irq_index: The interrupt index to configure
- + * @status: bits to clear (W1C) - one bit per cause:
- + * 0 = don't change
- + * 1 = clear status bit
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 status)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_irq_status *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
- + cmd_params->status = cpu_to_le32(status);
- + cmd_params->irq_index = irq_index;
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_attributes() - Retrieve DPSECI attributes
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @attr: Returned object's attributes
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_attr *attr)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_rsp_get_attributes *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
- + cmd_flags,
- + token);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
- + attr->id = le32_to_cpu(rsp_params->id);
- + attr->num_tx_queues = rsp_params->num_tx_queues;
- + attr->num_rx_queues = rsp_params->num_rx_queues;
- + attr->options = le32_to_cpu(rsp_params->options);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_set_rx_queue() - Set Rx queue configuration
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @queue: Select the queue relative to number of priorities configured at
- + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
- + * Rx queues identically.
- + * @cfg: Rx queue configuration
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_queue *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
- + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
- + cmd_params->priority = cfg->dest_cfg.priority;
- + cmd_params->queue = queue;
- + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
- + cfg->dest_cfg.dest_type);
- + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
- + cmd_params->options = cpu_to_le32(cfg->options);
- + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
- + cfg->order_preservation_en);
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @queue: Select the queue relative to number of priorities configured at
- + * DPSECI creation
- + * @attr: Returned Rx queue attributes
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, struct dpseci_rx_queue_attr *attr)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_queue *cmd_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
- + cmd_params->queue = queue;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
- + attr->dest_cfg.priority = cmd_params->priority;
- + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
- + DEST_TYPE);
- + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
- + attr->fqid = le32_to_cpu(cmd_params->fqid);
- + attr->order_preservation_en =
- + dpseci_get_field(cmd_params->order_preservation_en,
- + ORDER_PRESERVATION);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @queue: Select the queue relative to number of priorities configured at
- + * DPSECI creation
- + * @attr: Returned Tx queue attributes
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, struct dpseci_tx_queue_attr *attr)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_queue *cmd_params;
- + struct dpseci_rsp_get_tx_queue *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
- + cmd_params->queue = queue;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
- + attr->fqid = le32_to_cpu(rsp_params->fqid);
- + attr->priority = rsp_params->priority;
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @attr: Returned SEC attributes
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_sec_attr *attr)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_rsp_get_sec_attr *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
- + cmd_flags,
- + token);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
- + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
- + attr->major_rev = rsp_params->major_rev;
- + attr->minor_rev = rsp_params->minor_rev;
- + attr->era = rsp_params->era;
- + attr->deco_num = rsp_params->deco_num;
- + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
- + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
- + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
- + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
- + attr->crc_acc_num = rsp_params->crc_acc_num;
- + attr->pk_acc_num = rsp_params->pk_acc_num;
- + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
- + attr->rng_acc_num = rsp_params->rng_acc_num;
- + attr->md_acc_num = rsp_params->md_acc_num;
- + attr->arc4_acc_num = rsp_params->arc4_acc_num;
- + attr->des_acc_num = rsp_params->des_acc_num;
- + attr->aes_acc_num = rsp_params->aes_acc_num;
- + attr->ccha_acc_num = rsp_params->ccha_acc_num;
- + attr->ptha_acc_num = rsp_params->ptha_acc_num;
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @counters: Returned SEC counters
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_sec_counters *counters)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_rsp_get_sec_counters *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
- + cmd_flags,
- + token);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
- + counters->dequeued_requests =
- + le64_to_cpu(rsp_params->dequeued_requests);
- + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
- + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
- + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
- + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
- + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
- + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_get_api_version() - Get Data Path SEC Interface API version
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @major_ver: Major version of data path sec API
- + * @minor_ver: Minor version of data path sec API
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 *major_ver, u16 *minor_ver)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_rsp_get_api_version *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
- + cmd_flags, 0);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
- + *major_ver = le16_to_cpu(rsp_params->major);
- + *minor_ver = le16_to_cpu(rsp_params->minor);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_set_opr() - Set Order Restoration configuration
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @index: The queue index
- + * @options: Configuration mode options; can be OPR_OPT_CREATE or
- + * OPR_OPT_RETIRE
- + * @cfg: Configuration options for the OPR
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
- + u8 options, struct opr_cfg *cfg)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_opr *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(
- + DPSECI_CMDID_SET_OPR,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
- + cmd_params->index = index;
- + cmd_params->options = options;
- + cmd_params->oloe = cfg->oloe;
- + cmd_params->oeane = cfg->oeane;
- + cmd_params->olws = cfg->olws;
- + cmd_params->oa = cfg->oa;
- + cmd_params->oprrws = cfg->oprrws;
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_opr() - Retrieve Order Restoration config and query
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @index: The queue index
- + * @cfg: Returned OPR configuration
- + * @qry: Returned OPR query
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
- + struct opr_cfg *cfg, struct opr_qry *qry)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_opr *cmd_params;
- + struct dpseci_rsp_get_opr *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
- + cmd_params->index = index;
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
- + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
- + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
- + cfg->oloe = rsp_params->oloe;
- + cfg->oeane = rsp_params->oeane;
- + cfg->olws = rsp_params->olws;
- + cfg->oa = rsp_params->oa;
- + cfg->oprrws = rsp_params->oprrws;
- + qry->nesn = le16_to_cpu(rsp_params->nesn);
- + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
- + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
- + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
- + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
- + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
- + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
- + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
- + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
- + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
- +
- + return 0;
- +}
- +
- +/**
- + * dpseci_set_congestion_notification() - Set congestion group
- + * notification configuration
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @cfg: congestion notification configuration
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_congestion_notification *cmd_params;
- +
- + cmd.header = mc_encode_cmd_header(
- + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
- + cmd_flags,
- + token);
- + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
- + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
- + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
- + cmd_params->priority = cfg->dest_cfg.priority;
- + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
- + cfg->dest_cfg.dest_type);
- + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
- + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
- + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
- + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
- + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
- +
- + return mc_send_command(mc_io, &cmd);
- +}
- +
- +/**
- + * dpseci_get_congestion_notification() - Get congestion group notification
- + * configuration
- + * @mc_io: Pointer to MC portal's I/O object
- + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- + * @token: Token of DPSECI object
- + * @cfg: congestion notification configuration
- + *
- + * Return: '0' on success, error code otherwise
- + */
- +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 token, struct dpseci_congestion_notification_cfg *cfg)
- +{
- + struct fsl_mc_command cmd = { 0 };
- + struct dpseci_cmd_congestion_notification *rsp_params;
- + int err;
- +
- + cmd.header = mc_encode_cmd_header(
- + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
- + cmd_flags,
- + token);
- + err = mc_send_command(mc_io, &cmd);
- + if (err)
- + return err;
- +
- + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
- + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
- + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
- + cfg->dest_cfg.priority = rsp_params->priority;
- + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
- + CGN_DEST_TYPE);
- + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
- + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
- + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
- + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
- + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/drivers/crypto/caam/dpseci.h
- @@ -0,0 +1,433 @@
- +/*
- + * Copyright 2013-2016 Freescale Semiconductor Inc.
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +#ifndef _DPSECI_H_
- +#define _DPSECI_H_
- +
- +/*
- + * Data Path SEC Interface API
- + * Contains initialization APIs and runtime control APIs for DPSECI
- + */
- +
- +struct fsl_mc_io;
- +struct opr_cfg;
- +struct opr_qry;
- +
- +/**
- + * General DPSECI macros
- + */
- +
- +/**
- + * Maximum number of Tx/Rx queues per DPSECI object
- + */
- +#define DPSECI_MAX_QUEUE_NUM 16
- +
- +/**
- + * All queues considered; see dpseci_set_rx_queue()
- + */
- +#define DPSECI_ALL_QUEUES (u8)(-1)
- +
- +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
- + u16 *token);
- +
- +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
- +
- +/**
- + * Enable the Congestion Group support
- + */
- +#define DPSECI_OPT_HAS_CG 0x000020
- +
- +/**
- + * Enable the Order Restoration support
- + */
- +#define DPSECI_OPT_HAS_OPR 0x000040
- +
- +/**
- + * Order Point Records are shared for the entire DPSECI
- + */
- +#define DPSECI_OPT_OPR_SHARED 0x000080
- +
- +/**
- + * struct dpseci_cfg - Structure representing DPSECI configuration
- + * @options: Any combination of the following options:
- + * DPSECI_OPT_HAS_CG
- + * DPSECI_OPT_HAS_OPR
- + * DPSECI_OPT_OPR_SHARED
- + * @num_tx_queues: num of queues towards the SEC
- + * @num_rx_queues: num of queues back from the SEC
- + * @priorities: Priorities for the SEC hardware processing;
- + * each place in the array is the priority of the tx queue
- + * towards the SEC;
- + * valid priorities are configured with values 1-8;
- + */
- +struct dpseci_cfg {
- + u32 options;
- + u8 num_tx_queues;
- + u8 num_rx_queues;
- + u8 priorities[DPSECI_MAX_QUEUE_NUM];
- +};
- +
- +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
- + const struct dpseci_cfg *cfg, u32 *obj_id);
- +
- +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
- + u32 object_id);
- +
- +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
- +
- +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
- +
- +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + int *en);
- +
- +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
- +
- +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u8 *en);
- +
- +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u8 en);
- +
- +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 *mask);
- +
- +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 mask);
- +
- +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 *status);
- +
- +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 irq_index, u32 status);
- +
- +/**
- + * struct dpseci_attr - Structure representing DPSECI attributes
- + * @id: DPSECI object ID
- + * @num_tx_queues: number of queues towards the SEC
- + * @num_rx_queues: number of queues back from the SEC
- + * @options: any combination of the following options:
- + * DPSECI_OPT_HAS_CG
- + * DPSECI_OPT_HAS_OPR
- + * DPSECI_OPT_OPR_SHARED
- + */
- +struct dpseci_attr {
- + int id;
- + u8 num_tx_queues;
- + u8 num_rx_queues;
- + u32 options;
- +};
- +
- +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_attr *attr);
- +
- +/**
- + * enum dpseci_dest - DPSECI destination types
- + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
- + * and does not generate FQDAN notifications; user is expected to dequeue
- + * from the queue based on polling or other user-defined method
- + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
- + * notifications to the specified DPIO; user is expected to dequeue from
- + * the queue only after notification is received
- + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
- + * FQDAN notifications, but is connected to the specified DPCON object;
- + * user is expected to dequeue from the DPCON channel
- + */
- +enum dpseci_dest {
- + DPSECI_DEST_NONE = 0,
- + DPSECI_DEST_DPIO,
- + DPSECI_DEST_DPCON
- +};
- +
- +/**
- + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
- + * @dest_type: Destination type
- + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
- + * @priority: Priority selection within the DPIO or DPCON channel; valid values
- + * are 0-1 or 0-7, depending on the number of priorities in that channel;
- + * not relevant for 'DPSECI_DEST_NONE' option
- + */
- +struct dpseci_dest_cfg {
- + enum dpseci_dest dest_type;
- + int dest_id;
- + u8 priority;
- +};
- +
- +/**
- + * DPSECI queue modification options
- + */
- +
- +/**
- + * Select to modify the user's context associated with the queue
- + */
- +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
- +
- +/**
- + * Select to modify the queue's destination
- + */
- +#define DPSECI_QUEUE_OPT_DEST 0x00000002
- +
- +/**
- + * Select to modify the queue's order preservation
- + */
- +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
- +
- +/**
- + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
- + * @options: Flags representing the suggested modifications to the queue;
- + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
- + * @order_preservation_en: order preservation configuration for the rx queue
- + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
- + * @user_ctx: User context value provided in the frame descriptor of each
- + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
- + * in 'options'
- + * @dest_cfg: Queue destination parameters; valid only if
- + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
- + */
- +struct dpseci_rx_queue_cfg {
- + u32 options;
- + int order_preservation_en;
- + u64 user_ctx;
- + struct dpseci_dest_cfg dest_cfg;
- +};
- +
- +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
- +
- +/**
- + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
- + * @user_ctx: User context value provided in the frame descriptor of each
- + * dequeued frame
- + * @order_preservation_en: Status of the order preservation configuration on the
- + * queue
- + * @dest_cfg: Queue destination configuration
- + * @fqid: Virtual FQID value to be used for dequeue operations
- + */
- +struct dpseci_rx_queue_attr {
- + u64 user_ctx;
- + int order_preservation_en;
- + struct dpseci_dest_cfg dest_cfg;
- + u32 fqid;
- +};
- +
- +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, struct dpseci_rx_queue_attr *attr);
- +
- +/**
- + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
- + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
- + * @priority: SEC hardware processing priority for the queue
- + */
- +struct dpseci_tx_queue_attr {
- + u32 fqid;
- + u8 priority;
- +};
- +
- +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + u8 queue, struct dpseci_tx_queue_attr *attr);
- +
- +/**
- + * struct dpseci_sec_attr - Structure representing attributes of the SEC
- + * hardware accelerator
- + * @ip_id: ID for SEC
- + * @major_rev: Major revision number for SEC
- + * @minor_rev: Minor revision number for SEC
- + * @era: SEC Era
- + * @deco_num: The number of copies of the DECO that are implemented in this
- + * version of SEC
- + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
- + * version of SEC
- + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
- + * version of SEC
- + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
- + * implemented in this version of SEC
- + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
- + * implemented in this version of SEC
- + * @crc_acc_num: The number of copies of the CRC module that are implemented in
- + * this version of SEC
- + * @pk_acc_num: The number of copies of the Public Key module that are
- + * implemented in this version of SEC
- + * @kasumi_acc_num: The number of copies of the Kasumi module that are
- + * implemented in this version of SEC
- + * @rng_acc_num: The number of copies of the Random Number Generator that are
- + * implemented in this version of SEC
- + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
- + * implemented in this version of SEC
- + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
- + * in this version of SEC
- + * @des_acc_num: The number of copies of the DES module that are implemented in
- + * this version of SEC
- + * @aes_acc_num: The number of copies of the AES module that are implemented in
- + * this version of SEC
- + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
- + * implemented in this version of SEC.
- + * @ptha_acc_num: The number of copies of the Poly1305 module that are
- + * implemented in this version of SEC.
- + **/
- +struct dpseci_sec_attr {
- + u16 ip_id;
- + u8 major_rev;
- + u8 minor_rev;
- + u8 era;
- + u8 deco_num;
- + u8 zuc_auth_acc_num;
- + u8 zuc_enc_acc_num;
- + u8 snow_f8_acc_num;
- + u8 snow_f9_acc_num;
- + u8 crc_acc_num;
- + u8 pk_acc_num;
- + u8 kasumi_acc_num;
- + u8 rng_acc_num;
- + u8 md_acc_num;
- + u8 arc4_acc_num;
- + u8 des_acc_num;
- + u8 aes_acc_num;
- + u8 ccha_acc_num;
- + u8 ptha_acc_num;
- +};
- +
- +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_sec_attr *attr);
- +
- +/**
- + * struct dpseci_sec_counters - Structure representing global SEC counters and
- + * not per dpseci counters
- + * @dequeued_requests: Number of Requests Dequeued
- + * @ob_enc_requests: Number of Outbound Encrypt Requests
- + * @ib_dec_requests: Number of Inbound Decrypt Requests
- + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
- + * @ob_prot_bytes: Number of Outbound Bytes Protected
- + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
- + * @ib_valid_bytes: Number of Inbound Bytes Validated
- + */
- +struct dpseci_sec_counters {
- + u64 dequeued_requests;
- + u64 ob_enc_requests;
- + u64 ib_dec_requests;
- + u64 ob_enc_bytes;
- + u64 ob_prot_bytes;
- + u64 ib_dec_bytes;
- + u64 ib_valid_bytes;
- +};
- +
- +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- + struct dpseci_sec_counters *counters);
- +
- +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 *major_ver, u16 *minor_ver);
- +
- +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
- + u8 options, struct opr_cfg *cfg);
- +
- +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
- + struct opr_cfg *cfg, struct opr_qry *qry);
- +
- +/**
- + * enum dpseci_congestion_unit - DPSECI congestion units
- + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
- + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
- + */
- +enum dpseci_congestion_unit {
- + DPSECI_CONGESTION_UNIT_BYTES = 0,
- + DPSECI_CONGESTION_UNIT_FRAMES
- +};
- +
- +/**
- + * CSCN message is written to message_iova once entering a
- + * congestion state (see 'threshold_entry')
- + */
- +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
- +
- +/**
- + * CSCN message is written to message_iova once exiting a
- + * congestion state (see 'threshold_exit')
- + */
- +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
- +
- +/**
- + * CSCN write will attempt to allocate into a cache (coherent write);
- + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
- + */
- +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
- +
- +/**
- + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
- + * DPIO/DPCON's WQ channel once entering a congestion state
- + * (see 'threshold_entry')
- + */
- +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
- +
- +/**
- + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
- + * DPIO/DPCON's WQ channel once exiting a congestion state
- + * (see 'threshold_exit')
- + */
- +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
- +
- +/**
- + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
- + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
- + * (if enabled)
- + */
- +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
- +
- +/**
- + * struct dpseci_congestion_notification_cfg - congestion notification
- + * configuration
- + * @units: units type
- + * @threshold_entry: above this threshold we enter a congestion state.
- + * set it to '0' to disable it
- + * @threshold_exit: below this threshold we exit the congestion state.
- + * @message_ctx: The context that will be part of the CSCN message
- + * @message_iova: I/O virtual address (must be in DMA-able memory),
- + * must be 16B aligned;
- + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
- + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
- + * values
- + */
- +struct dpseci_congestion_notification_cfg {
- + enum dpseci_congestion_unit units;
- + u32 threshold_entry;
- + u32 threshold_exit;
- + u64 message_ctx;
- + u64 message_iova;
- + struct dpseci_dest_cfg dest_cfg;
- + u16 notification_mode;
- +};
- +
- +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
- +
- +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
- + u16 token, struct dpseci_congestion_notification_cfg *cfg);
- +
- +#endif /* _DPSECI_H_ */
- --- /dev/null
- +++ b/drivers/crypto/caam/dpseci_cmd.h
- @@ -0,0 +1,287 @@
- +/*
- + * Copyright 2013-2016 Freescale Semiconductor Inc.
- + * Copyright 2017 NXP
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the names of the above-listed copyright holders nor the
- + * names of any contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- + * POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef _DPSECI_CMD_H_
- +#define _DPSECI_CMD_H_
- +
- +/* DPSECI Version */
- +#define DPSECI_VER_MAJOR 5
- +#define DPSECI_VER_MINOR 3
- +
- +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
- +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
- +
- +/* Command versioning */
- +#define DPSECI_CMD_BASE_VERSION 1
- +#define DPSECI_CMD_BASE_VERSION_V2 2
- +#define DPSECI_CMD_BASE_VERSION_V3 3
- +#define DPSECI_CMD_ID_OFFSET 4
- +
- +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
- + DPSECI_CMD_BASE_VERSION)
- +
- +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
- + DPSECI_CMD_BASE_VERSION_V2)
- +
- +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
- + DPSECI_CMD_BASE_VERSION_V3)
- +
- +/* Command IDs */
- +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
- +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
- +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
- +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
- +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
- +
- +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
- +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
- +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
- +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
- +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
- +
- +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
- +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
- +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
- +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
- +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
- +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
- +
- +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
- +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
- +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
- +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
- +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
- +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
- +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
- +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
- +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
- +
- +/* Macros for accessing command fields smaller than 1 byte */
- +#define DPSECI_MASK(field) \
- + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
- + DPSECI_##field##_SHIFT)
- +
- +#define dpseci_set_field(var, field, val) \
- + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
- +
- +#define dpseci_get_field(var, field) \
- + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
- +
- +struct dpseci_cmd_open {
- + __le32 dpseci_id;
- +};
- +
- +struct dpseci_cmd_create {
- + u8 priorities[8];
- + u8 num_tx_queues;
- + u8 num_rx_queues;
- + u8 pad0[6];
- + __le32 options;
- + __le32 pad1;
- + u8 priorities2[8];
- +};
- +
- +struct dpseci_cmd_destroy {
- + __le32 object_id;
- +};
- +
- +#define DPSECI_ENABLE_SHIFT 0
- +#define DPSECI_ENABLE_SIZE 1
- +
- +struct dpseci_rsp_is_enabled {
- + u8 is_enabled;
- +};
- +
- +struct dpseci_cmd_irq_enable {
- + u8 enable_state;
- + u8 pad[3];
- + u8 irq_index;
- +};
- +
- +struct dpseci_rsp_get_irq_enable {
- + u8 enable_state;
- +};
- +
- +struct dpseci_cmd_irq_mask {
- + __le32 mask;
- + u8 irq_index;
- +};
- +
- +struct dpseci_cmd_irq_status {
- + __le32 status;
- + u8 irq_index;
- +};
- +
- +struct dpseci_rsp_get_attributes {
- + __le32 id;
- + __le32 pad0;
- + u8 num_tx_queues;
- + u8 num_rx_queues;
- + u8 pad1[6];
- + __le32 options;
- +};
- +
- +#define DPSECI_DEST_TYPE_SHIFT 0
- +#define DPSECI_DEST_TYPE_SIZE 4
- +
- +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
- +#define DPSECI_ORDER_PRESERVATION_SIZE 1
- +
- +struct dpseci_cmd_queue {
- + __le32 dest_id;
- + u8 priority;
- + u8 queue;
- + u8 dest_type;
- + u8 pad;
- + __le64 user_ctx;
- + union {
- + __le32 options;
- + __le32 fqid;
- + };
- + u8 order_preservation_en;
- +};
- +
- +struct dpseci_rsp_get_tx_queue {
- + __le32 pad;
- + __le32 fqid;
- + u8 priority;
- +};
- +
- +struct dpseci_rsp_get_sec_attr {
- + __le16 ip_id;
- + u8 major_rev;
- + u8 minor_rev;
- + u8 era;
- + u8 pad0[3];
- + u8 deco_num;
- + u8 zuc_auth_acc_num;
- + u8 zuc_enc_acc_num;
- + u8 pad1;
- + u8 snow_f8_acc_num;
- + u8 snow_f9_acc_num;
- + u8 crc_acc_num;
- + u8 pad2;
- + u8 pk_acc_num;
- + u8 kasumi_acc_num;
- + u8 rng_acc_num;
- + u8 pad3;
- + u8 md_acc_num;
- + u8 arc4_acc_num;
- + u8 des_acc_num;
- + u8 aes_acc_num;
- + u8 ccha_acc_num;
- + u8 ptha_acc_num;
- +};
- +
- +struct dpseci_rsp_get_sec_counters {
- + __le64 dequeued_requests;
- + __le64 ob_enc_requests;
- + __le64 ib_dec_requests;
- + __le64 ob_enc_bytes;
- + __le64 ob_prot_bytes;
- + __le64 ib_dec_bytes;
- + __le64 ib_valid_bytes;
- +};
- +
- +struct dpseci_rsp_get_api_version {
- + __le16 major;
- + __le16 minor;
- +};
- +
- +struct dpseci_cmd_opr {
- + __le16 pad;
- + u8 index;
- + u8 options;
- + u8 pad1[7];
- + u8 oloe;
- + u8 oeane;
- + u8 olws;
- + u8 oa;
- + u8 oprrws;
- +};
- +
- +#define DPSECI_OPR_RIP_SHIFT 0
- +#define DPSECI_OPR_RIP_SIZE 1
- +#define DPSECI_OPR_ENABLE_SHIFT 1
- +#define DPSECI_OPR_ENABLE_SIZE 1
- +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
- +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
- +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
- +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
- +
- +struct dpseci_rsp_get_opr {
- + __le64 pad;
- + u8 flags;
- + u8 pad0[2];
- + u8 oloe;
- + u8 oeane;
- + u8 olws;
- + u8 oa;
- + u8 oprrws;
- + __le16 nesn;
- + __le16 pad1;
- + __le16 ndsn;
- + __le16 pad2;
- + __le16 ea_tseq;
- + u8 tseq_nlis;
- + u8 pad3;
- + __le16 ea_hseq;
- + u8 hseq_nlis;
- + u8 pad4;
- + __le16 ea_hptr;
- + __le16 pad5;
- + __le16 ea_tptr;
- + __le16 pad6;
- + __le16 opr_vid;
- + __le16 pad7;
- + __le16 opr_id;
- +};
- +
- +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
- +#define DPSECI_CGN_DEST_TYPE_SIZE 4
- +#define DPSECI_CGN_UNITS_SHIFT 4
- +#define DPSECI_CGN_UNITS_SIZE 2
- +
- +struct dpseci_cmd_congestion_notification {
- + __le32 dest_id;
- + __le16 notification_mode;
- + u8 priority;
- + u8 options;
- + __le64 message_iova;
- + __le64 message_ctx;
- + __le32 threshold_entry;
- + __le32 threshold_exit;
- +};
- +
- +#endif /* _DPSECI_CMD_H_ */
- --- a/drivers/crypto/caam/error.c
- +++ b/drivers/crypto/caam/error.c
- @@ -108,6 +108,54 @@ static const struct {
- { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
- };
-
- +static const struct {
- + u8 value;
- + const char *error_text;
- +} qi_error_list[] = {
- + { 0x1F, "Job terminated by FQ or ICID flush" },
- + { 0x20, "FD format error"},
- + { 0x21, "FD command format error"},
- + { 0x23, "FL format error"},
- + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
- + { 0x30, "Max. buffer size too small"},
- + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
- + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
- + { 0x33, "Size over/underflow (allocate mode)"},
- + { 0x34, "Size over/underflow (reuse mode)"},
- + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
- + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
- + { 0x41, "SBC frame format not supported (allocate mode)"},
- + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
- + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
- + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
- + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
- + { 0x46, "Annotation length exceeds offset (reuse mode)"},
- + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
- + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
- + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
- + { 0x51, "Unsupported IF reuse mode"},
- + { 0x52, "Unsupported FL use mode"},
- + { 0x53, "Unsupported RJD use mode"},
- + { 0x54, "Unsupported inline descriptor use mode"},
- + { 0xC0, "Table buffer pool 0 depletion"},
- + { 0xC1, "Table buffer pool 1 depletion"},
- + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
- + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
- + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
- + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
- + { 0xD0, "FLC read error"},
- + { 0xD1, "FL read error"},
- + { 0xD2, "FL write error"},
- + { 0xD3, "OF SGT write error"},
- + { 0xD4, "PTA read error"},
- + { 0xD5, "PTA write error"},
- + { 0xD6, "OF SGT F-bit write error"},
- + { 0xD7, "ASA write error"},
- + { 0xE1, "FLC[ICR]=0 ICID error"},
- + { 0xE2, "FLC[ICR]=1 ICID error"},
- + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
- +};
- +
- static const char * const cha_id_list[] = {
- "",
- "AES",
- @@ -236,6 +284,27 @@ static void report_deco_status(struct de
- status, error, idx_str, idx, err_str, err_err_code);
- }
-
- +static void report_qi_status(struct device *qidev, const u32 status,
- + const char *error)
- +{
- + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
- + const char *err_str = "unidentified error value 0x";
- + char err_err_code[3] = { 0 };
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
- + if (qi_error_list[i].value == err_id)
- + break;
- +
- + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
- + err_str = qi_error_list[i].error_text;
- + else
- + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
- +
- + dev_err(qidev, "%08x: %s: %s%s\n",
- + status, error, err_str, err_err_code);
- +}
- +
- static void report_jr_status(struct device *jrdev, const u32 status,
- const char *error)
- {
- @@ -250,7 +319,7 @@ static void report_cond_code_status(stru
- status, error, __func__);
- }
-
- -void caam_jr_strstatus(struct device *jrdev, u32 status)
- +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
- {
- static const struct stat_src {
- void (*report_ssed)(struct device *jrdev, const u32 status,
- @@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr
- { report_ccb_status, "CCB" },
- { report_jump_status, "Jump" },
- { report_deco_status, "DECO" },
- - { NULL, "Queue Manager Interface" },
- + { report_qi_status, "Queue Manager Interface" },
- { report_jr_status, "Job Ring" },
- { report_cond_code_status, "Condition Code" },
- { NULL, NULL },
- @@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr
- else
- dev_err(jrdev, "%d: unknown error source\n", ssrc);
- }
- -EXPORT_SYMBOL(caam_jr_strstatus);
- +EXPORT_SYMBOL(caam_strstatus);
- --- a/drivers/crypto/caam/error.h
- +++ b/drivers/crypto/caam/error.h
- @@ -8,7 +8,11 @@
- #ifndef CAAM_ERROR_H
- #define CAAM_ERROR_H
- #define CAAM_ERROR_STR_MAX 302
- -void caam_jr_strstatus(struct device *jrdev, u32 status);
- +
- +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
- +
- +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
- +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
-
- void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
- int rowsize, int groupsize, struct scatterlist *sg,
- --- a/drivers/crypto/caam/intern.h
- +++ b/drivers/crypto/caam/intern.h
- @@ -84,6 +84,7 @@ struct caam_drv_private {
- u8 qi_present; /* Nonzero if QI present in device */
- int secvio_irq; /* Security violation interrupt number */
- int virt_en; /* Virtualization enabled in CAAM */
- + int era; /* CAAM Era (internal HW revision) */
-
- #define RNG4_MAX_HANDLES 2
- /* RNG4 block */
- --- a/drivers/crypto/caam/jr.c
- +++ b/drivers/crypto/caam/jr.c
- @@ -23,6 +23,14 @@ struct jr_driver_data {
-
- static struct jr_driver_data driver_data;
-
- +static int jr_driver_probed;
- +
- +int caam_jr_driver_probed(void)
- +{
- + return jr_driver_probed;
- +}
- +EXPORT_SYMBOL(caam_jr_driver_probed);
- +
- static int caam_reset_hw_jr(struct device *dev)
- {
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- @@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor
- dev_err(jrdev, "Failed to shut down job ring\n");
- irq_dispose_mapping(jrpriv->irq);
-
- + jr_driver_probed--;
- +
- return ret;
- }
-
- @@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void)
- EXPORT_SYMBOL(caam_jr_alloc);
-
- /**
- + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
- + *
- + * returns : pointer to the newly allocated physical
- + * JobR dev can be written to if successful.
- + **/
- +struct device *caam_jridx_alloc(int idx)
- +{
- + struct caam_drv_private_jr *jrpriv;
- + struct device *dev = ERR_PTR(-ENODEV);
- +
- + spin_lock(&driver_data.jr_alloc_lock);
- +
- + if (list_empty(&driver_data.jr_list))
- + goto end;
- +
- + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
- + if (jrpriv->ridx == idx) {
- + atomic_inc(&jrpriv->tfm_count);
- + dev = jrpriv->dev;
- + break;
- + }
- + }
- +
- +end:
- + spin_unlock(&driver_data.jr_alloc_lock);
- + return dev;
- +}
- +EXPORT_SYMBOL(caam_jridx_alloc);
- +
- +/**
- * caam_jr_free() - Free the Job Ring
- * @rdev - points to the dev that identifies the Job ring to
- * be released.
- @@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform
-
- atomic_set(&jrpriv->tfm_count, 0);
-
- + jr_driver_probed++;
- +
- return 0;
- }
-
- --- a/drivers/crypto/caam/jr.h
- +++ b/drivers/crypto/caam/jr.h
- @@ -9,7 +9,9 @@
- #define JR_H
-
- /* Prototypes for backend-level services exposed to APIs */
- +int caam_jr_driver_probed(void);
- struct device *caam_jr_alloc(void);
- +struct device *caam_jridx_alloc(int idx);
- void caam_jr_free(struct device *rdev);
- int caam_jr_enqueue(struct device *dev, u32 *desc,
- void (*cbk)(struct device *dev, u32 *desc, u32 status,
- --- a/drivers/crypto/caam/key_gen.c
- +++ b/drivers/crypto/caam/key_gen.c
- @@ -11,36 +11,6 @@
- #include "desc_constr.h"
- #include "key_gen.h"
-
- -/**
- - * split_key_len - Compute MDHA split key length for a given algorithm
- - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- - * SHA224, SHA384, SHA512.
- - *
- - * Return: MDHA split key length
- - */
- -static inline u32 split_key_len(u32 hash)
- -{
- - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- - u32 idx;
- -
- - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
- -
- - return (u32)(mdpadlen[idx] * 2);
- -}
- -
- -/**
- - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- - * SHA224, SHA384, SHA512.
- - *
- - * Return: MDHA split key pad length
- - */
- -static inline u32 split_key_pad_len(u32 hash)
- -{
- - return ALIGN(split_key_len(hash), 16);
- -}
- -
- void split_key_done(struct device *dev, u32 *desc, u32 err,
- void *context)
- {
- --- a/drivers/crypto/caam/key_gen.h
- +++ b/drivers/crypto/caam/key_gen.h
- @@ -6,6 +6,36 @@
- *
- */
-
- +/**
- + * split_key_len - Compute MDHA split key length for a given algorithm
- + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- + * SHA224, SHA384, SHA512.
- + *
- + * Return: MDHA split key length
- + */
- +static inline u32 split_key_len(u32 hash)
- +{
- + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- + u32 idx;
- +
- + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
- +
- + return (u32)(mdpadlen[idx] * 2);
- +}
- +
- +/**
- + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- + * SHA224, SHA384, SHA512.
- + *
- + * Return: MDHA split key pad length
- + */
- +static inline u32 split_key_pad_len(u32 hash)
- +{
- + return ALIGN(split_key_len(hash), 16);
- +}
- +
- struct split_key_result {
- struct completion completion;
- int err;
- --- a/drivers/crypto/caam/qi.c
- +++ b/drivers/crypto/caam/qi.c
- @@ -9,7 +9,7 @@
-
- #include <linux/cpumask.h>
- #include <linux/kthread.h>
- -#include <soc/fsl/qman.h>
- +#include <linux/fsl_qman.h>
-
- #include "regs.h"
- #include "qi.h"
- @@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache;
- int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
- {
- struct qm_fd fd;
- - dma_addr_t addr;
- int ret;
- int num_retries = 0;
-
- - qm_fd_clear_fd(&fd);
- - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
- -
- - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
- + fd.cmd = 0;
- + fd.format = qm_fd_compound;
- + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
- + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
- DMA_BIDIRECTIONAL);
- - if (dma_mapping_error(qidev, addr)) {
- + if (dma_mapping_error(qidev, fd.addr)) {
- dev_err(qidev, "DMA mapping error for QI enqueue request\n");
- return -EIO;
- }
- - qm_fd_addr_set64(&fd, addr);
-
- do {
- - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
- if (likely(!ret))
- return 0;
-
- @@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev
- EXPORT_SYMBOL(caam_qi_enqueue);
-
- static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
- - const union qm_mr_entry *msg)
- + const struct qm_mr_entry *msg)
- {
- const struct qm_fd *fd;
- struct caam_drv_req *drv_req;
- @@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p
-
- fd = &msg->ern.fd;
-
- - if (qm_fd_get_format(fd) != qm_fd_compound) {
- + if (fd->format != qm_fd_compound) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return;
- }
- @@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f
- req_fq->cb.fqs = NULL;
-
- ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
- - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
- + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
- + req_fq);
- if (ret) {
- dev_err(qidev, "Failed to create session req FQ\n");
- goto create_req_fq_fail;
- }
-
- - memset(&opts, 0, sizeof(opts));
- - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
- - QM_INITFQ_WE_CONTEXTB |
- - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
- - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
- - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
- - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
- - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
- + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
- + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
- + QM_INITFQ_WE_CGID;
- + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
- + opts.fqd.dest.channel = qm_channel_caam;
- + opts.fqd.dest.wq = 2;
- + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
- + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
- + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
- opts.fqd.cgid = qipriv.cgr.cgrid;
-
- ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
- @@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f
- return req_fq;
-
- init_req_fq_fail:
- - qman_destroy_fq(req_fq);
- + qman_destroy_fq(req_fq, 0);
- create_req_fq_fail:
- kfree(req_fq);
- return ERR_PTR(ret);
- @@ -275,7 +275,7 @@ empty_fq:
- if (ret)
- dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
-
- - qman_destroy_fq(fq);
- + qman_destroy_fq(fq, 0);
- kfree(fq);
-
- return ret;
- @@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq
- if (ret)
- return ret;
-
- - if (!qm_mcr_np_get(&np, frm_cnt))
- + if (!np.frm_cnt)
- break;
-
- msleep(20);
- @@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp
- struct caam_drv_req *drv_req;
- const struct qm_fd *fd;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
- - u32 status;
-
- if (caam_qi_napi_schedule(p, caam_napi))
- return qman_cb_dqrr_stop;
-
- fd = &dqrr->fd;
- - status = be32_to_cpu(fd->status);
- - if (unlikely(status))
- - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
- + if (unlikely(fd->status)) {
- + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
- + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
- +
- + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
- + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
- + dev_err(qidev, "Error: %#x in CAAM response FD\n",
- + fd->status);
- + }
-
- - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
- + if (unlikely(fd->format != qm_fd_compound)) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return qman_cb_dqrr_consume;
- }
-
- - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
- + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
- if (unlikely(!drv_req)) {
- dev_err(qidev,
- "Can't find original request for caam response\n");
- @@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp
- dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
- sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
-
- - drv_req->cbk(drv_req, status);
- + drv_req->cbk(drv_req, fd->status);
- return qman_cb_dqrr_consume;
- }
-
- @@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic
- return -ENODEV;
- }
-
- - memset(&opts, 0, sizeof(opts));
- - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
- - QM_INITFQ_WE_CONTEXTB |
- - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
- - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
- - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
- - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
- + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
- + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
- + QM_INITFQ_WE_CGID;
- + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
- + QM_FQCTRL_CGE;
- + opts.fqd.dest.channel = qman_affine_channel(cpu);
- + opts.fqd.dest.wq = 3;
- opts.fqd.cgid = qipriv.cgr.cgrid;
- opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
- QM_STASHING_EXCL_DATA;
- - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
- + opts.fqd.context_a.stashing.data_cl = 1;
- + opts.fqd.context_a.stashing.context_cl = 1;
-
- ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
- if (ret) {
- @@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev
-
- qipriv.cgr.cb = cgr_cb;
- memset(&opts, 0, sizeof(opts));
- - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
- - QM_CGR_WE_MODE);
- + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
- opts.cgr.cscn_en = QM_CGR_EN;
- opts.cgr.mode = QMAN_CGR_MODE_FRAME;
- qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
- --- a/drivers/crypto/caam/qi.h
- +++ b/drivers/crypto/caam/qi.h
- @@ -9,7 +9,7 @@
- #ifndef __QI_H__
- #define __QI_H__
-
- -#include <soc/fsl/qman.h>
- +#include <linux/fsl_qman.h>
- #include "compat.h"
- #include "desc.h"
- #include "desc_constr.h"
- --- a/drivers/crypto/caam/regs.h
- +++ b/drivers/crypto/caam/regs.h
- @@ -627,6 +627,8 @@ struct caam_job_ring {
- #define JRSTA_DECOERR_INVSIGN 0x86
- #define JRSTA_DECOERR_DSASIGN 0x87
-
- +#define JRSTA_QIERR_ERROR_MASK 0x00ff
- +
- #define JRSTA_CCBERR_JUMP 0x08000000
- #define JRSTA_CCBERR_INDEX_MASK 0xff00
- #define JRSTA_CCBERR_INDEX_SHIFT 8
- --- a/drivers/crypto/caam/sg_sw_qm.h
- +++ b/drivers/crypto/caam/sg_sw_qm.h
- @@ -34,46 +34,61 @@
- #ifndef __SG_SW_QM_H
- #define __SG_SW_QM_H
-
- -#include <soc/fsl/qman.h>
- +#include <linux/fsl_qman.h>
- #include "regs.h"
-
- +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
- +{
- + dma_addr_t addr = qm_sg_ptr->opaque;
- +
- + qm_sg_ptr->opaque = cpu_to_caam64(addr);
- + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
- +}
- +
- static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
- - u16 offset)
- + u32 len, u16 offset)
- {
- - qm_sg_entry_set64(qm_sg_ptr, dma);
- + qm_sg_ptr->addr = dma;
- + qm_sg_ptr->length = len;
- qm_sg_ptr->__reserved2 = 0;
- qm_sg_ptr->bpid = 0;
- - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
- + qm_sg_ptr->__reserved3 = 0;
- + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
- +
- + cpu_to_hw_sg(qm_sg_ptr);
- }
-
- static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
- - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
- - qm_sg_entry_set_len(qm_sg_ptr, len);
- + qm_sg_ptr->extension = 0;
- + qm_sg_ptr->final = 0;
- + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
- - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
- - qm_sg_entry_set_f(qm_sg_ptr, len);
- + qm_sg_ptr->extension = 0;
- + qm_sg_ptr->final = 1;
- + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
- - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
- - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
- + qm_sg_ptr->extension = 1;
- + qm_sg_ptr->final = 0;
- + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len,
- u16 offset)
- {
- - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
- - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
- - (len & QM_SG_LEN_MASK));
- + qm_sg_ptr->extension = 1;
- + qm_sg_ptr->final = 1;
- + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- /*
- @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
- struct qm_sg_entry *qm_sg_ptr, u16 offset)
- {
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
- - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
- +
- + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
- + qm_sg_ptr->final = 1;
- + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
- }
-
- #endif /* __SG_SW_QM_H */
- --- a/drivers/crypto/talitos.c
- +++ b/drivers/crypto/talitos.c
- @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
- ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
- sg_count, areq->assoclen, tbl_off, elen);
-
- + /*
- + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
- + * while extent is used for ICV len.
- + */
- + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
- + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
- + desc->ptr[4].len = cpu_to_be16(cryptlen);
- +
- if (ret > 1) {
- tbl_off += ret;
- sync_needed = true;
|