706-fsl_ppfe-support-layercape.patch 294 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654
  1. From 50fb2f2e93aeae0baed156eb4794a2f358376b77 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Thu, 5 Jul 2018 17:19:20 +0800
  4. Subject: [PATCH 12/32] fsl_ppfe: support layercape
  5. This is an integrated patch for layerscape pfe support.
  6. Calvin Johnson <[email protected]>
  7. Signed-off-by: Yangbo Lu <[email protected]>
  8. ---
  9. drivers/staging/fsl_ppfe/Kconfig | 20 +
  10. drivers/staging/fsl_ppfe/Makefile | 19 +
  11. drivers/staging/fsl_ppfe/TODO | 2 +
  12. drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
  13. .../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
  14. .../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
  15. .../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
  16. .../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
  17. .../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
  18. .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
  19. .../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
  20. .../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
  21. drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
  22. drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
  23. drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
  24. drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
  25. drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
  26. drivers/staging/fsl_ppfe/pfe_eth.c | 2491 +++++++++++++++++
  27. drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
  28. drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
  29. drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
  30. drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
  31. drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++
  32. drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
  33. drivers/staging/fsl_ppfe/pfe_hif_lib.c | 640 +++++
  34. drivers/staging/fsl_ppfe/pfe_hif_lib.h | 241 ++
  35. drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
  36. drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
  37. .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
  38. drivers/staging/fsl_ppfe/pfe_mod.c | 156 ++
  39. drivers/staging/fsl_ppfe/pfe_mod.h | 114 +
  40. drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
  41. drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++
  42. drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
  43. 34 files changed, 10472 insertions(+)
  44. create mode 100644 drivers/staging/fsl_ppfe/Kconfig
  45. create mode 100644 drivers/staging/fsl_ppfe/Makefile
  46. create mode 100644 drivers/staging/fsl_ppfe/TODO
  47. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
  48. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
  49. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
  50. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
  51. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
  52. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
  53. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
  54. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
  55. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
  56. create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
  57. create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
  58. create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
  59. create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
  60. create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
  61. create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
  62. create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
  63. create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
  64. create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
  65. create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
  66. create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
  67. create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
  68. create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
  69. create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
  70. create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
  71. create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
  72. create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
  73. create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
  74. create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
  75. create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
  76. create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
  77. create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
  78. --- /dev/null
  79. +++ b/drivers/staging/fsl_ppfe/Kconfig
  80. @@ -0,0 +1,20 @@
  81. +#
  82. +# Freescale Programmable Packet Forwarding Engine driver
  83. +#
  84. +config FSL_PPFE
  85. + bool "Freescale PPFE Driver"
  86. + default n
  87. + ---help---
  88. + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
  89. + It provides two high performance ethernet interfaces.
  90. + This driver initializes, programs and controls the PPFE.
  91. + Use this driver to enable network connectivity on LS1012A platforms.
  92. +
  93. +if FSL_PPFE
  94. +
  95. +config FSL_PPFE_UTIL_DISABLED
  96. + bool "Disable PPFE UTIL Processor Engine"
  97. + ---help---
  98. + UTIL PE has to be enabled only if required.
  99. +
  100. +endif # FSL_PPFE
  101. --- /dev/null
  102. +++ b/drivers/staging/fsl_ppfe/Makefile
  103. @@ -0,0 +1,19 @@
  104. +#
  105. +# Makefile for Freesecale PPFE driver
  106. +#
  107. +
  108. +ccflags-y += -I$(src)/include -I$(src)
  109. +
  110. +obj-m += pfe.o
  111. +
  112. +pfe-y += pfe_mod.o \
  113. + pfe_hw.o \
  114. + pfe_firmware.o \
  115. + pfe_ctrl.o \
  116. + pfe_hif.o \
  117. + pfe_hif_lib.o\
  118. + pfe_eth.o \
  119. + pfe_sysfs.o \
  120. + pfe_debugfs.o \
  121. + pfe_ls1012a_platform.o \
  122. + pfe_hal.o
  123. --- /dev/null
  124. +++ b/drivers/staging/fsl_ppfe/TODO
  125. @@ -0,0 +1,2 @@
  126. +TODO:
  127. + - provide pfe pe monitoring support
  128. --- /dev/null
  129. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
  130. @@ -0,0 +1,78 @@
  131. +/*
  132. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  133. + * Copyright 2017 NXP
  134. + *
  135. + * This program is free software; you can redistribute it and/or modify
  136. + * it under the terms of the GNU General Public License as published by
  137. + * the Free Software Foundation; either version 2 of the License, or
  138. + * (at your option) any later version.
  139. + *
  140. + * This program is distributed in the hope that it will be useful,
  141. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  142. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  143. + * GNU General Public License for more details.
  144. + *
  145. + * You should have received a copy of the GNU General Public License
  146. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  147. + */
  148. +
  149. +#ifndef _CBUS_H_
  150. +#define _CBUS_H_
  151. +
  152. +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
  153. +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
  154. +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
  155. +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
  156. +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
  157. +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
  158. +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
  159. +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
  160. +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
  161. +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
  162. +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
  163. +#define LMEM_SIZE 0x10000
  164. +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
  165. +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
  166. +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
  167. +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
  168. +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
  169. +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
  170. +
  171. +/*
  172. + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
  173. + * XXX_MEM_ACCESS_ADDR register bit definitions.
  174. + */
  175. +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
  176. +#define PE_MEM_ACCESS_IMEM BIT(15)
  177. +#define PE_MEM_ACCESS_DMEM BIT(16)
  178. +
  179. +/* Byte Enables of the Internal memory access. These are interpred in BE */
  180. +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
  181. + ({ typeof(size) size_ = (size); \
  182. + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
  183. +
  184. +#include "cbus/emac_mtip.h"
  185. +#include "cbus/gpi.h"
  186. +#include "cbus/bmu.h"
  187. +#include "cbus/hif.h"
  188. +#include "cbus/tmu_csr.h"
  189. +#include "cbus/class_csr.h"
  190. +#include "cbus/hif_nocpy.h"
  191. +#include "cbus/util_csr.h"
  192. +
  193. +/* PFE cores states */
  194. +#define CORE_DISABLE 0x00000000
  195. +#define CORE_ENABLE 0x00000001
  196. +#define CORE_SW_RESET 0x00000002
  197. +
  198. +/* LMEM defines */
  199. +#define LMEM_HDR_SIZE 0x0010
  200. +#define LMEM_BUF_SIZE_LN2 0x7
  201. +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
  202. +
  203. +/* DDR defines */
  204. +#define DDR_HDR_SIZE 0x0100
  205. +#define DDR_BUF_SIZE_LN2 0xb
  206. +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
  207. +
  208. +#endif /* _CBUS_H_ */
  209. --- /dev/null
  210. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
  211. @@ -0,0 +1,55 @@
  212. +/*
  213. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  214. + * Copyright 2017 NXP
  215. + *
  216. + * This program is free software; you can redistribute it and/or modify
  217. + * it under the terms of the GNU General Public License as published by
  218. + * the Free Software Foundation; either version 2 of the License, or
  219. + * (at your option) any later version.
  220. + *
  221. + * This program is distributed in the hope that it will be useful,
  222. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  223. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  224. + * GNU General Public License for more details.
  225. + *
  226. + * You should have received a copy of the GNU General Public License
  227. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  228. + */
  229. +
  230. +#ifndef _BMU_H_
  231. +#define _BMU_H_
  232. +
  233. +#define BMU_VERSION 0x000
  234. +#define BMU_CTRL 0x004
  235. +#define BMU_UCAST_CONFIG 0x008
  236. +#define BMU_UCAST_BASE_ADDR 0x00c
  237. +#define BMU_BUF_SIZE 0x010
  238. +#define BMU_BUF_CNT 0x014
  239. +#define BMU_THRES 0x018
  240. +#define BMU_INT_SRC 0x020
  241. +#define BMU_INT_ENABLE 0x024
  242. +#define BMU_ALLOC_CTRL 0x030
  243. +#define BMU_FREE_CTRL 0x034
  244. +#define BMU_FREE_ERR_ADDR 0x038
  245. +#define BMU_CURR_BUF_CNT 0x03c
  246. +#define BMU_MCAST_CNT 0x040
  247. +#define BMU_MCAST_ALLOC_CTRL 0x044
  248. +#define BMU_REM_BUF_CNT 0x048
  249. +#define BMU_LOW_WATERMARK 0x050
  250. +#define BMU_HIGH_WATERMARK 0x054
  251. +#define BMU_INT_MEM_ACCESS 0x100
  252. +
  253. +struct BMU_CFG {
  254. + unsigned long baseaddr;
  255. + u32 count;
  256. + u32 size;
  257. + u32 low_watermark;
  258. + u32 high_watermark;
  259. +};
  260. +
  261. +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
  262. +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
  263. +
  264. +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
  265. +
  266. +#endif /* _BMU_H_ */
  267. --- /dev/null
  268. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
  269. @@ -0,0 +1,289 @@
  270. +/*
  271. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  272. + * Copyright 2017 NXP
  273. + *
  274. + * This program is free software; you can redistribute it and/or modify
  275. + * it under the terms of the GNU General Public License as published by
  276. + * the Free Software Foundation; either version 2 of the License, or
  277. + * (at your option) any later version.
  278. + *
  279. + * This program is distributed in the hope that it will be useful,
  280. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  281. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  282. + * GNU General Public License for more details.
  283. + *
  284. + * You should have received a copy of the GNU General Public License
  285. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  286. + */
  287. +
  288. +#ifndef _CLASS_CSR_H_
  289. +#define _CLASS_CSR_H_
  290. +
  291. +/* @file class_csr.h.
  292. + * class_csr - block containing all the classifier control and status register.
  293. + * Mapped on CBUS and accessible from all PE's and ARM.
  294. + */
  295. +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
  296. +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
  297. +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
  298. +
  299. +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
  300. +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
  301. +
  302. +/* LMEM header size for the Classifier block.\ Data in the LMEM
  303. + * is written from this offset.
  304. + */
  305. +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
  306. +
  307. +/* DDR header size for the Classifier block.\ Data in the DDR
  308. + * is written from this offset.
  309. + */
  310. +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
  311. +
  312. +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
  313. +
  314. +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
  315. +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
  316. +
  317. +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
  318. +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
  319. +
  320. +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
  321. +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
  322. +
  323. +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
  324. +
  325. +/* @name Class PE memory access. Allows external PE's and HOST to
  326. + * read/write PMEM/DMEM memory ranges for each classifier PE.
  327. + */
  328. +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
  329. + * See \ref XXX_MEM_ACCESS_ADDR for details.
  330. + */
  331. +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
  332. +
  333. +/* Internal Memory Access Write Data [31:0] */
  334. +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
  335. +
  336. +/* Internal Memory Access Read Data [31:0] */
  337. +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
  338. +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
  339. +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
  340. +
  341. +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
  342. +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
  343. +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
  344. +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
  345. +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
  346. +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
  347. +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
  348. +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
  349. +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
  350. +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
  351. +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
  352. +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
  353. +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
  354. +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
  355. +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
  356. +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
  357. +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
  358. +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
  359. +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
  360. +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
  361. +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
  362. +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
  363. +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
  364. +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
  365. +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
  366. +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
  367. +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
  368. +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
  369. +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
  370. +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
  371. +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
  372. +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
  373. +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
  374. +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
  375. +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
  376. +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
  377. +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
  378. +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
  379. +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
  380. +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
  381. +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
  382. +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
  383. +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
  384. +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
  385. +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
  386. +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
  387. +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
  388. +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
  389. +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
  390. +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
  391. +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
  392. +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
  393. +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
  394. +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
  395. +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
  396. +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
  397. +
  398. +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
  399. +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
  400. +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
  401. +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
  402. +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
  403. +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
  404. +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
  405. +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
  406. +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
  407. +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
  408. +
  409. +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
  410. +
  411. +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
  412. +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
  413. +
  414. +/* (route_entry_size[9:0], route_hash_size[23:16]
  415. + * (this is actually ln2(size)))
  416. + */
  417. +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
  418. +
  419. +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
  420. +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
  421. +
  422. +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
  423. +
  424. +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
  425. +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
  426. +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
  427. +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
  428. +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
  429. +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
  430. +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
  431. +
  432. +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
  433. +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
  434. +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
  435. +
  436. +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
  437. +
  438. +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
  439. +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
  440. +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
  441. +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
  442. +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
  443. +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
  444. +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
  445. +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
  446. +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
  447. +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
  448. +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
  449. +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
  450. +
  451. +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
  452. +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
  453. +
  454. +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
  455. +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
  456. +
  457. +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
  458. +
  459. +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
  460. +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
  461. +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
  462. +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
  463. +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
  464. +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
  465. +
  466. +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
  467. +
  468. +/* CLASS defines */
  469. +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
  470. +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
  471. +
  472. +/* Can be configured */
  473. +#define CLASS_PBUF0_BASE_ADDR 0x000
  474. +/* Can be configured */
  475. +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
  476. +/* Can be configured */
  477. +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
  478. +/* Can be configured */
  479. +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
  480. +
  481. +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
  482. + CLASS_PBUF_HEADER_OFFSET)
  483. +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
  484. + CLASS_PBUF_HEADER_OFFSET)
  485. +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
  486. + CLASS_PBUF_HEADER_OFFSET)
  487. +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
  488. + CLASS_PBUF_HEADER_OFFSET)
  489. +
  490. +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
  491. + CLASS_PBUF0_BASE_ADDR)
  492. +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
  493. + CLASS_PBUF2_BASE_ADDR)
  494. +
  495. +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
  496. + CLASS_PBUF0_HEADER_BASE_ADDR)
  497. +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
  498. + CLASS_PBUF2_HEADER_BASE_ADDR)
  499. +
  500. +#define CLASS_ROUTE_SIZE 128
  501. +#define CLASS_MAX_ROUTE_SIZE 256
  502. +#define CLASS_ROUTE_HASH_BITS 20
  503. +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
  504. +
  505. +/* Can be configured */
  506. +#define CLASS_ROUTE0_BASE_ADDR 0x400
  507. +/* Can be configured */
  508. +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
  509. +/* Can be configured */
  510. +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
  511. +/* Can be configured */
  512. +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
  513. +
  514. +#define CLASS_SA_SIZE 128
  515. +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
  516. +/* not used */
  517. +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
  518. +/* not used */
  519. +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
  520. +/* not used */
  521. +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
  522. +
  523. +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
  524. +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
  525. + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
  526. +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
  527. + CLASS_SA_SIZE))
  528. +
  529. +#define TWO_LEVEL_ROUTE BIT(0)
  530. +#define PHYNO_IN_HASH BIT(1)
  531. +#define HW_ROUTE_FETCH BIT(3)
  532. +#define HW_BRIDGE_FETCH BIT(5)
  533. +#define IP_ALIGNED BIT(6)
  534. +#define ARC_HIT_CHECK_EN BIT(7)
  535. +#define CLASS_TOE BIT(11)
  536. +#define HASH_NORMAL (0 << 12)
  537. +#define HASH_CRC_PORT BIT(12)
  538. +#define HASH_CRC_IP (2 << 12)
  539. +#define HASH_CRC_PORT_IP (3 << 12)
  540. +#define QB2BUS_LE BIT(15)
  541. +
  542. +#define TCP_CHKSUM_DROP BIT(0)
  543. +#define UDP_CHKSUM_DROP BIT(1)
  544. +#define IPV4_CHKSUM_DROP BIT(9)
  545. +
  546. +/*CLASS_HIF_PARSE bits*/
  547. +#define HIF_PKT_CLASS_EN BIT(0)
  548. +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
  549. +
  550. +struct class_cfg {
  551. + u32 toe_mode;
  552. + unsigned long route_table_baseaddr;
  553. + u32 route_table_hash_bits;
  554. + u32 pe_sys_clk_ratio;
  555. + u32 resume;
  556. +};
  557. +
  558. +#endif /* _CLASS_CSR_H_ */
  559. --- /dev/null
  560. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
  561. @@ -0,0 +1,242 @@
  562. +/*
  563. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  564. + * Copyright 2017 NXP
  565. + *
  566. + * This program is free software; you can redistribute it and/or modify
  567. + * it under the terms of the GNU General Public License as published by
  568. + * the Free Software Foundation; either version 2 of the License, or
  569. + * (at your option) any later version.
  570. + *
  571. + * This program is distributed in the hope that it will be useful,
  572. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  573. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  574. + * GNU General Public License for more details.
  575. + *
  576. + * You should have received a copy of the GNU General Public License
  577. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  578. + */
  579. +
  580. +#ifndef _EMAC_H_
  581. +#define _EMAC_H_
  582. +
  583. +#include <linux/ethtool.h>
  584. +
  585. +#define EMAC_IEVENT_REG 0x004
  586. +#define EMAC_IMASK_REG 0x008
  587. +#define EMAC_R_DES_ACTIVE_REG 0x010
  588. +#define EMAC_X_DES_ACTIVE_REG 0x014
  589. +#define EMAC_ECNTRL_REG 0x024
  590. +#define EMAC_MII_DATA_REG 0x040
  591. +#define EMAC_MII_CTRL_REG 0x044
  592. +#define EMAC_MIB_CTRL_STS_REG 0x064
  593. +#define EMAC_RCNTRL_REG 0x084
  594. +#define EMAC_TCNTRL_REG 0x0C4
  595. +#define EMAC_PHY_ADDR_LOW 0x0E4
  596. +#define EMAC_PHY_ADDR_HIGH 0x0E8
  597. +#define EMAC_GAUR 0x120
  598. +#define EMAC_GALR 0x124
  599. +#define EMAC_TFWR_STR_FWD 0x144
  600. +#define EMAC_RX_SECTION_FULL 0x190
  601. +#define EMAC_RX_SECTION_EMPTY 0x194
  602. +#define EMAC_TX_SECTION_EMPTY 0x1A0
  603. +#define EMAC_TRUNC_FL 0x1B0
  604. +
  605. +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
  606. +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
  607. +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
  608. +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
  609. +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
  610. +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
  611. +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
  612. +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
  613. +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
  614. +#define RMON_T_COL 0x224 /* RMON TX collision count */
  615. +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
  616. +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
  617. +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
  618. +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
  619. +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
  620. +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
  621. +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
  622. +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
  623. +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
  624. +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
  625. +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
  626. +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
  627. +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
  628. +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
  629. +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
  630. +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
  631. +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
  632. +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
  633. +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
  634. +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
  635. +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
  636. +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
  637. +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
  638. +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
  639. +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
  640. +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
  641. +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
  642. +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
  643. +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
  644. +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
  645. +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
  646. +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
  647. +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
  648. +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
  649. +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
  650. +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
  651. +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
  652. +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
  653. +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
  654. +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
  655. +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
  656. +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
  657. +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
  658. +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
  659. +
  660. +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
  661. +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
  662. +
  663. +/* GEMAC definitions and settings */
  664. +
  665. +#define EMAC_PORT_0 0
  666. +#define EMAC_PORT_1 1
  667. +
  668. +/* GEMAC Bit definitions */
  669. +#define EMAC_IEVENT_HBERR 0x80000000
  670. +#define EMAC_IEVENT_BABR 0x40000000
  671. +#define EMAC_IEVENT_BABT 0x20000000
  672. +#define EMAC_IEVENT_GRA 0x10000000
  673. +#define EMAC_IEVENT_TXF 0x08000000
  674. +#define EMAC_IEVENT_TXB 0x04000000
  675. +#define EMAC_IEVENT_RXF 0x02000000
  676. +#define EMAC_IEVENT_RXB 0x01000000
  677. +#define EMAC_IEVENT_MII 0x00800000
  678. +#define EMAC_IEVENT_EBERR 0x00400000
  679. +#define EMAC_IEVENT_LC 0x00200000
  680. +#define EMAC_IEVENT_RL 0x00100000
  681. +#define EMAC_IEVENT_UN 0x00080000
  682. +
  683. +#define EMAC_IMASK_HBERR 0x80000000
  684. +#define EMAC_IMASK_BABR 0x40000000
  685. +#define EMAC_IMASKT_BABT 0x20000000
  686. +#define EMAC_IMASK_GRA 0x10000000
  687. +#define EMAC_IMASKT_TXF 0x08000000
  688. +#define EMAC_IMASK_TXB 0x04000000
  689. +#define EMAC_IMASKT_RXF 0x02000000
  690. +#define EMAC_IMASK_RXB 0x01000000
  691. +#define EMAC_IMASK_MII 0x00800000
  692. +#define EMAC_IMASK_EBERR 0x00400000
  693. +#define EMAC_IMASK_LC 0x00200000
  694. +#define EMAC_IMASKT_RL 0x00100000
  695. +#define EMAC_IMASK_UN 0x00080000
  696. +
  697. +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
  698. +#define EMAC_RCNTRL_LOOP 0x00000001
  699. +#define EMAC_RCNTRL_DRT 0x00000002
  700. +#define EMAC_RCNTRL_MII_MODE 0x00000004
  701. +#define EMAC_RCNTRL_PROM 0x00000008
  702. +#define EMAC_RCNTRL_BC_REJ 0x00000010
  703. +#define EMAC_RCNTRL_FCE 0x00000020
  704. +#define EMAC_RCNTRL_RGMII 0x00000040
  705. +#define EMAC_RCNTRL_SGMII 0x00000080
  706. +#define EMAC_RCNTRL_RMII 0x00000100
  707. +#define EMAC_RCNTRL_RMII_10T 0x00000200
  708. +#define EMAC_RCNTRL_CRC_FWD 0x00004000
  709. +
  710. +#define EMAC_TCNTRL_GTS 0x00000001
  711. +#define EMAC_TCNTRL_HBC 0x00000002
  712. +#define EMAC_TCNTRL_FDEN 0x00000004
  713. +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
  714. +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
  715. +
  716. +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
  717. +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
  718. +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
  719. +#define EMAC_ECNTRL_SLEEP 0x00000008
  720. +#define EMAC_ECNTRL_SPEED 0x00000020
  721. +#define EMAC_ECNTRL_DBSWAP 0x00000100
  722. +
  723. +#define EMAC_X_WMRK_STRFWD 0x00000100
  724. +
  725. +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
  726. +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
  727. +
  728. +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
  729. +/*
  730. + * The possible operating speeds of the MAC, currently supporting 10, 100 and
  731. + * 1000Mb modes.
  732. + */
  733. +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
  734. +
  735. +/* MII-related definitios */
  736. +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
  737. +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
  738. +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
  739. +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
  740. +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
  741. +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
  742. +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
  743. +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
  744. +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
  745. +
  746. +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
  747. +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
  748. +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
  749. +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
  750. +
  751. +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
  752. + EMAC_MII_DATA_RA_SHIFT)
  753. +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
  754. + EMAC_MII_DATA_PA_SHIFT)
  755. +#define EMAC_MII_DATA(v) ((v) & 0xffff)
  756. +
  757. +#define EMAC_MII_SPEED_SHIFT 1
  758. +#define EMAC_HOLDTIME_SHIFT 8
  759. +#define EMAC_HOLDTIME_MASK 0x7
  760. +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
  761. + EMAC_HOLDTIME_SHIFT)
  762. +
  763. +/*
  764. + * The Address organisation for the MAC device. All addresses are split into
  765. + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
  766. + * the address and the other field are the high order bits - this may be 16-bits
  767. + * in the case of MAC addresses, or 32-bits for the hash address.
  768. + * In terms of memory storage, the first item (bottom) is assumed to be at a
  769. + * lower address location than 'top'. i.e. top should be at address location of
  770. + * 'bottom' + 4 bytes.
  771. + */
  772. +struct pfe_mac_addr {
  773. + u32 bottom; /* Lower 32-bits of address. */
  774. + u32 top; /* Upper 32-bits of address. */
  775. +};
  776. +
  777. +/*
  778. + * The following is the organisation of the address filters section of the MAC
  779. + * registers. The Cadence MAC contains four possible specific address match
  780. + * addresses, if an incoming frame corresponds to any one of these four
  781. + * addresses then the frame will be copied to memory.
  782. + * It is not necessary for all four of the address match registers to be
  783. + * programmed, this is application dependent.
  784. + */
  785. +struct spec_addr {
  786. + struct pfe_mac_addr one; /* Specific address register 1. */
  787. + struct pfe_mac_addr two; /* Specific address register 2. */
  788. + struct pfe_mac_addr three; /* Specific address register 3. */
  789. + struct pfe_mac_addr four; /* Specific address register 4. */
  790. +};
  791. +
  792. +struct gemac_cfg {
  793. + u32 mode;
  794. + u32 speed;
  795. + u32 duplex;
  796. +};
  797. +
  798. +/* EMAC Hash size */
  799. +#define EMAC_HASH_REG_BITS 64
  800. +
  801. +#define EMAC_SPEC_ADDR_MAX 4
  802. +
  803. +#endif /* _EMAC_H_ */
  804. --- /dev/null
  805. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
  806. @@ -0,0 +1,86 @@
  807. +/*
  808. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  809. + * Copyright 2017 NXP
  810. + *
  811. + * This program is free software; you can redistribute it and/or modify
  812. + * it under the terms of the GNU General Public License as published by
  813. + * the Free Software Foundation; either version 2 of the License, or
  814. + * (at your option) any later version.
  815. + *
  816. + * This program is distributed in the hope that it will be useful,
  817. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  818. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  819. + * GNU General Public License for more details.
  820. + *
  821. + * You should have received a copy of the GNU General Public License
  822. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  823. + */
  824. +
  825. +#ifndef _GPI_H_
  826. +#define _GPI_H_
  827. +
  828. +#define GPI_VERSION 0x00
  829. +#define GPI_CTRL 0x04
  830. +#define GPI_RX_CONFIG 0x08
  831. +#define GPI_HDR_SIZE 0x0c
  832. +#define GPI_BUF_SIZE 0x10
  833. +#define GPI_LMEM_ALLOC_ADDR 0x14
  834. +#define GPI_LMEM_FREE_ADDR 0x18
  835. +#define GPI_DDR_ALLOC_ADDR 0x1c
  836. +#define GPI_DDR_FREE_ADDR 0x20
  837. +#define GPI_CLASS_ADDR 0x24
  838. +#define GPI_DRX_FIFO 0x28
  839. +#define GPI_TRX_FIFO 0x2c
  840. +#define GPI_INQ_PKTPTR 0x30
  841. +#define GPI_DDR_DATA_OFFSET 0x34
  842. +#define GPI_LMEM_DATA_OFFSET 0x38
  843. +#define GPI_TMLF_TX 0x4c
  844. +#define GPI_DTX_ASEQ 0x50
  845. +#define GPI_FIFO_STATUS 0x54
  846. +#define GPI_FIFO_DEBUG 0x58
  847. +#define GPI_TX_PAUSE_TIME 0x5c
  848. +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
  849. +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
  850. +#define GPI_TOE_CHKSUM_EN 0x68
  851. +#define GPI_OVERRUN_DROPCNT 0x6c
  852. +#define GPI_CSR_MTIP_PAUSE_REG 0x74
  853. +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
  854. +#define GPI_CSR_RX_CNT 0x7c
  855. +#define GPI_CSR_TX_CNT 0x80
  856. +#define GPI_CSR_DEBUG1 0x84
  857. +#define GPI_CSR_DEBUG2 0x88
  858. +
  859. +struct gpi_cfg {
  860. + u32 lmem_rtry_cnt;
  861. + u32 tmlf_txthres;
  862. + u32 aseq_len;
  863. + u32 mtip_pause_reg;
  864. +};
  865. +
  866. +/* GPI commons defines */
  867. +#define GPI_LMEM_BUF_EN 0x1
  868. +#define GPI_DDR_BUF_EN 0x1
  869. +
  870. +/* EGPI 1 defines */
  871. +#define EGPI1_LMEM_RTRY_CNT 0x40
  872. +#define EGPI1_TMLF_TXTHRES 0xBC
  873. +#define EGPI1_ASEQ_LEN 0x50
  874. +
  875. +/* EGPI 2 defines */
  876. +#define EGPI2_LMEM_RTRY_CNT 0x40
  877. +#define EGPI2_TMLF_TXTHRES 0xBC
  878. +#define EGPI2_ASEQ_LEN 0x40
  879. +
  880. +/* EGPI 3 defines */
  881. +#define EGPI3_LMEM_RTRY_CNT 0x40
  882. +#define EGPI3_TMLF_TXTHRES 0xBC
  883. +#define EGPI3_ASEQ_LEN 0x40
  884. +
  885. +/* HGPI defines */
  886. +#define HGPI_LMEM_RTRY_CNT 0x40
  887. +#define HGPI_TMLF_TXTHRES 0xBC
  888. +#define HGPI_ASEQ_LEN 0x40
  889. +
  890. +#define EGPI_PAUSE_TIME 0x000007D0
  891. +#define EGPI_PAUSE_ENABLE 0x40000000
  892. +#endif /* _GPI_H_ */
  893. --- /dev/null
  894. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
  895. @@ -0,0 +1,100 @@
  896. +/*
  897. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  898. + * Copyright 2017 NXP
  899. + *
  900. + * This program is free software; you can redistribute it and/or modify
  901. + * it under the terms of the GNU General Public License as published by
  902. + * the Free Software Foundation; either version 2 of the License, or
  903. + * (at your option) any later version.
  904. + *
  905. + * This program is distributed in the hope that it will be useful,
  906. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  907. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  908. + * GNU General Public License for more details.
  909. + *
  910. + * You should have received a copy of the GNU General Public License
  911. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  912. + */
  913. +
  914. +#ifndef _HIF_H_
  915. +#define _HIF_H_
  916. +
  917. +/* @file hif.h.
  918. + * hif - PFE hif block control and status register.
  919. + * Mapped on CBUS and accessible from all PE's and ARM.
  920. + */
  921. +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
  922. +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
  923. +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
  924. +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
  925. +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
  926. +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
  927. +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
  928. +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
  929. +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
  930. +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
  931. +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
  932. +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
  933. +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
  934. +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
  935. +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
  936. +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
  937. +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
  938. +
  939. +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
  940. +#define HIF_INT BIT(0)
  941. +#define HIF_RXBD_INT BIT(1)
  942. +#define HIF_RXPKT_INT BIT(2)
  943. +#define HIF_TXBD_INT BIT(3)
  944. +#define HIF_TXPKT_INT BIT(4)
  945. +
  946. +/* HIF_TX_CTRL bits */
  947. +#define HIF_CTRL_DMA_EN BIT(0)
  948. +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
  949. +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
  950. +
  951. +/* HIF_RX_STATUS bits */
  952. +#define BDP_CSR_RX_DMA_ACTV BIT(16)
  953. +
  954. +/* HIF_INT_ENABLE bits */
  955. +#define HIF_INT_EN BIT(0)
  956. +#define HIF_RXBD_INT_EN BIT(1)
  957. +#define HIF_RXPKT_INT_EN BIT(2)
  958. +#define HIF_TXBD_INT_EN BIT(3)
  959. +#define HIF_TXPKT_INT_EN BIT(4)
  960. +
  961. +/* HIF_POLL_CTRL bits*/
  962. +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
  963. +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
  964. +
  965. +/* HIF_INT_COAL bits*/
  966. +#define HIF_INT_COAL_ENABLE BIT(31)
  967. +
  968. +/* Buffer descriptor control bits */
  969. +#define BD_CTRL_BUFLEN_MASK 0x3fff
  970. +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
  971. +#define BD_CTRL_CBD_INT_EN BIT(16)
  972. +#define BD_CTRL_PKT_INT_EN BIT(17)
  973. +#define BD_CTRL_LIFM BIT(18)
  974. +#define BD_CTRL_LAST_BD BIT(19)
  975. +#define BD_CTRL_DIR BIT(20)
  976. +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
  977. +#define BD_CTRL_PKT_XFER BIT(24)
  978. +#define BD_CTRL_DESC_EN BIT(31)
  979. +#define BD_CTRL_PARSE_DISABLE BIT(25)
  980. +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
  981. +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
  982. +
  983. +/* Buffer descriptor status bits*/
  984. +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
  985. +#define BD_STATUS_DIR_PROC_ID BIT(16)
  986. +#define BD_STATUS_CONN_ID_EN BIT(17)
  987. +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
  988. +#define BD_STATUS_LE_DATA BIT(21)
  989. +#define BD_STATUS_CHKSUM_EN BIT(22)
  990. +
  991. +/* HIF Buffer descriptor status bits */
  992. +#define DIR_PROC_ID BIT(16)
  993. +#define PROC_ID(id) ((id) << 18)
  994. +
  995. +#endif /* _HIF_H_ */
  996. --- /dev/null
  997. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
  998. @@ -0,0 +1,50 @@
  999. +/*
  1000. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1001. + * Copyright 2017 NXP
  1002. + *
  1003. + * This program is free software; you can redistribute it and/or modify
  1004. + * it under the terms of the GNU General Public License as published by
  1005. + * the Free Software Foundation; either version 2 of the License, or
  1006. + * (at your option) any later version.
  1007. + *
  1008. + * This program is distributed in the hope that it will be useful,
  1009. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1010. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1011. + * GNU General Public License for more details.
  1012. + *
  1013. + * You should have received a copy of the GNU General Public License
  1014. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1015. + */
  1016. +
  1017. +#ifndef _HIF_NOCPY_H_
  1018. +#define _HIF_NOCPY_H_
  1019. +
  1020. +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
  1021. +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
  1022. +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
  1023. +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
  1024. +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
  1025. +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
  1026. +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
  1027. +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
  1028. +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
  1029. +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
  1030. +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
  1031. +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
  1032. +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
  1033. +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
  1034. +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
  1035. +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
  1036. +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
  1037. +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
  1038. +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
  1039. +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
  1040. +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
  1041. +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
  1042. +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
  1043. +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
  1044. +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
  1045. +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
  1046. +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
  1047. +
  1048. +#endif /* _HIF_NOCPY_H_ */
  1049. --- /dev/null
  1050. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
  1051. @@ -0,0 +1,168 @@
  1052. +/*
  1053. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1054. + * Copyright 2017 NXP
  1055. + *
  1056. + * This program is free software; you can redistribute it and/or modify
  1057. + * it under the terms of the GNU General Public License as published by
  1058. + * the Free Software Foundation; either version 2 of the License, or
  1059. + * (at your option) any later version.
  1060. + *
  1061. + * This program is distributed in the hope that it will be useful,
  1062. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1063. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1064. + * GNU General Public License for more details.
  1065. + *
  1066. + * You should have received a copy of the GNU General Public License
  1067. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1068. + */
  1069. +
  1070. +#ifndef _TMU_CSR_H_
  1071. +#define _TMU_CSR_H_
  1072. +
  1073. +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
  1074. +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
  1075. +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
  1076. +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
  1077. +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
  1078. +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
  1079. +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
  1080. +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
  1081. +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
  1082. +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
  1083. +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
  1084. +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
  1085. +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
  1086. +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
  1087. +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
  1088. +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
  1089. +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
  1090. +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
  1091. +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
  1092. +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
  1093. +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
  1094. +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
  1095. +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
  1096. +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
  1097. +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
  1098. +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
  1099. +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
  1100. +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
  1101. +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
  1102. +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
  1103. +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
  1104. +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
  1105. +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
  1106. +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
  1107. +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
  1108. +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
  1109. +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
  1110. +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
  1111. +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
  1112. +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
  1113. +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
  1114. +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
  1115. +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
  1116. +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
  1117. +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
  1118. +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
  1119. +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
  1120. + * This is a global Enable for all schedulers in PHY0
  1121. + */
  1122. +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
  1123. +
  1124. +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
  1125. +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
  1126. +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
  1127. +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
  1128. +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
  1129. +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
  1130. +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
  1131. +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
  1132. +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
  1133. +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
  1134. +
  1135. +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
  1136. + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
  1137. + * the internal memory. This address is used to access both the PM and DM of
  1138. + * all the PE's
  1139. + */
  1140. +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
  1141. +
  1142. +/* Internal Memory Access Write Data */
  1143. +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
  1144. +/* Internal Memory Access Read Data. The commands are blocked
  1145. + * at the mem_access only
  1146. + */
  1147. +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
  1148. +
  1149. +/* [31:0] PHY0 in queue address (must be initialized with one of the
  1150. + * xxx_INQ_PKTPTR cbus addresses)
  1151. + */
  1152. +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
  1153. +/* [31:0] PHY1 in queue address (must be initialized with one of the
  1154. + * xxx_INQ_PKTPTR cbus addresses)
  1155. + */
  1156. +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
  1157. +/* [31:0] PHY2 in queue address (must be initialized with one of the
  1158. + * xxx_INQ_PKTPTR cbus addresses)
  1159. + */
  1160. +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
  1161. +/* [31:0] PHY3 in queue address (must be initialized with one of the
  1162. + * xxx_INQ_PKTPTR cbus addresses)
  1163. + */
  1164. +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
  1165. +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
  1166. +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
  1167. +
  1168. +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
  1169. +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
  1170. +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
  1171. +
  1172. +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
  1173. +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
  1174. +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
  1175. +/* [31:0] PHY4 in queue address (must be initialized with one of the
  1176. + * xxx_INQ_PKTPTR cbus addresses)
  1177. + */
  1178. +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
  1179. +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
  1180. + * This is a global Enable for all schedulers in PHY1
  1181. + */
  1182. +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
  1183. +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
  1184. + * This is a global Enable for all schedulers in PHY2
  1185. + */
  1186. +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
  1187. +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
  1188. + * This is a global Enable for all schedulers in PHY3
  1189. + */
  1190. +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
  1191. +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
  1192. +/* [31:0] PHY5 in queue address (must be initialized with one of the
  1193. + * xxx_INQ_PKTPTR cbus addresses)
  1194. + */
  1195. +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
  1196. +
  1197. +#define SW_RESET BIT(0) /* Global software reset */
  1198. +#define INQ_RESET BIT(2)
  1199. +#define TEQ_RESET BIT(3)
  1200. +#define TDQ_RESET BIT(4)
  1201. +#define PE_RESET BIT(5)
  1202. +#define MEM_INIT BIT(6)
  1203. +#define MEM_INIT_DONE BIT(7)
  1204. +#define LLM_INIT BIT(8)
  1205. +#define LLM_INIT_DONE BIT(9)
  1206. +#define ECC_MEM_INIT_DONE BIT(10)
  1207. +
  1208. +struct tmu_cfg {
  1209. + u32 pe_sys_clk_ratio;
  1210. + unsigned long llm_base_addr;
  1211. + u32 llm_queue_len;
  1212. +};
  1213. +
  1214. +/* Not HW related for pfe_ctrl / pfe common defines */
  1215. +#define DEFAULT_MAX_QDEPTH 80
  1216. +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
  1217. +#define DEFAULT_TMU3_QDEPTH 127
  1218. +
  1219. +#endif /* _TMU_CSR_H_ */
  1220. --- /dev/null
  1221. +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
  1222. @@ -0,0 +1,61 @@
  1223. +/*
  1224. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1225. + * Copyright 2017 NXP
  1226. + *
  1227. + * This program is free software; you can redistribute it and/or modify
  1228. + * it under the terms of the GNU General Public License as published by
  1229. + * the Free Software Foundation; either version 2 of the License, or
  1230. + * (at your option) any later version.
  1231. + *
  1232. + * This program is distributed in the hope that it will be useful,
  1233. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1234. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1235. + * GNU General Public License for more details.
  1236. + *
  1237. + * You should have received a copy of the GNU General Public License
  1238. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1239. + */
  1240. +
  1241. +#ifndef _UTIL_CSR_H_
  1242. +#define _UTIL_CSR_H_
  1243. +
  1244. +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
  1245. +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
  1246. +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
  1247. +
  1248. +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
  1249. +
  1250. +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
  1251. +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
  1252. +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
  1253. +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
  1254. +
  1255. +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
  1256. +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
  1257. +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
  1258. +
  1259. +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
  1260. +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
  1261. +
  1262. +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
  1263. +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
  1264. +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
  1265. +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
  1266. +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
  1267. +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
  1268. +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
  1269. +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
  1270. +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
  1271. +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
  1272. +
  1273. +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
  1274. +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
  1275. +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
  1276. +
  1277. +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
  1278. +
  1279. +struct util_cfg {
  1280. + u32 pe_sys_clk_ratio;
  1281. +};
  1282. +
  1283. +#endif /* _UTIL_CSR_H_ */
  1284. --- /dev/null
  1285. +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
  1286. @@ -0,0 +1,372 @@
  1287. +/*
  1288. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1289. + * Copyright 2017 NXP
  1290. + *
  1291. + * This program is free software; you can redistribute it and/or modify
  1292. + * it under the terms of the GNU General Public License as published by
  1293. + * the Free Software Foundation; either version 2 of the License, or
  1294. + * (at your option) any later version.
  1295. + *
  1296. + * This program is distributed in the hope that it will be useful,
  1297. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1298. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1299. + * GNU General Public License for more details.
  1300. + *
  1301. + * You should have received a copy of the GNU General Public License
  1302. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1303. + */
  1304. +
  1305. +#ifndef _PFE_H_
  1306. +#define _PFE_H_
  1307. +
  1308. +#include "cbus.h"
  1309. +
  1310. +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
  1311. +/*
  1312. + * Only valid for mem access register interface
  1313. + */
  1314. +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
  1315. +#define CLASS_DMEM_SIZE 0x00002000
  1316. +#define CLASS_IMEM_SIZE 0x00008000
  1317. +
  1318. +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
  1319. +/*
  1320. + * Only valid for mem access register interface
  1321. + */
  1322. +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
  1323. +#define TMU_DMEM_SIZE 0x00000800
  1324. +#define TMU_IMEM_SIZE 0x00002000
  1325. +
  1326. +#define UTIL_DMEM_BASE_ADDR 0x00000000
  1327. +#define UTIL_DMEM_SIZE 0x00002000
  1328. +
  1329. +#define PE_LMEM_BASE_ADDR 0xc3010000
  1330. +#define PE_LMEM_SIZE 0x8000
  1331. +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
  1332. +
  1333. +#define DMEM_BASE_ADDR 0x00000000
  1334. +#define DMEM_SIZE 0x2000 /* TMU has less... */
  1335. +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
  1336. +
  1337. +#define PMEM_BASE_ADDR 0x00010000
  1338. +#define PMEM_SIZE 0x8000 /* TMU has less... */
  1339. +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
  1340. +
  1341. +/* These check memory ranges from PE point of view/memory map */
  1342. +#define IS_DMEM(addr, len) \
  1343. + ({ typeof(addr) addr_ = (addr); \
  1344. + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
  1345. + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
  1346. +
  1347. +#define IS_PMEM(addr, len) \
  1348. + ({ typeof(addr) addr_ = (addr); \
  1349. + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
  1350. + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
  1351. +
  1352. +#define IS_PE_LMEM(addr, len) \
  1353. + ({ typeof(addr) addr_ = (addr); \
  1354. + ((unsigned long)(addr_) >= \
  1355. + PE_LMEM_BASE_ADDR) && \
  1356. + (((unsigned long)(addr_) + \
  1357. + (len)) <= PE_LMEM_END); })
  1358. +
  1359. +#define IS_PFE_LMEM(addr, len) \
  1360. + ({ typeof(addr) addr_ = (addr); \
  1361. + ((unsigned long)(addr_) >= \
  1362. + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
  1363. + (((unsigned long)(addr_) + (len)) <= \
  1364. + CBUS_VIRT_TO_PFE(LMEM_END)); })
  1365. +
  1366. +#define __IS_PHYS_DDR(addr, len) \
  1367. + ({ typeof(addr) addr_ = (addr); \
  1368. + ((unsigned long)(addr_) >= \
  1369. + DDR_PHYS_BASE_ADDR) && \
  1370. + (((unsigned long)(addr_) + (len)) <= \
  1371. + DDR_PHYS_END); })
  1372. +
  1373. +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
  1374. +
  1375. +/*
  1376. + * If using a run-time virtual address for the cbus base address use this code
  1377. + */
  1378. +extern void *cbus_base_addr;
  1379. +extern void *ddr_base_addr;
  1380. +extern unsigned long ddr_phys_base_addr;
  1381. +extern unsigned int ddr_size;
  1382. +
  1383. +#define CBUS_BASE_ADDR cbus_base_addr
  1384. +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
  1385. +#define DDR_BASE_ADDR ddr_base_addr
  1386. +#define DDR_SIZE ddr_size
  1387. +
  1388. +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
  1389. +
  1390. +#define LS1012A_PFE_RESET_WA /*
  1391. + * PFE doesn't have global reset and re-init
  1392. + * should takecare few things to make PFE
  1393. + * functional after reset
  1394. + */
  1395. +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
  1396. + * as seen by PE's.
  1397. + */
  1398. +/* CBUS physical base address as seen by PE's. */
  1399. +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
  1400. +
  1401. +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
  1402. +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
  1403. +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
  1404. + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
  1405. +/* Translates to PFE address map */
  1406. +
  1407. +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
  1408. +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
  1409. +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
  1410. +
  1411. +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
  1412. + PFE_CBUS_PHYS_BASE_ADDR)
  1413. +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
  1414. + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
  1415. +
  1416. +/* The below part of the code is used in QOS control driver from host */
  1417. +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
  1418. + * pe's
  1419. + */
  1420. +
  1421. +enum {
  1422. + CLASS0_ID = 0,
  1423. + CLASS1_ID,
  1424. + CLASS2_ID,
  1425. + CLASS3_ID,
  1426. + CLASS4_ID,
  1427. + CLASS5_ID,
  1428. + TMU0_ID,
  1429. + TMU1_ID,
  1430. + TMU2_ID,
  1431. + TMU3_ID,
  1432. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1433. + UTIL_ID,
  1434. +#endif
  1435. + MAX_PE
  1436. +};
  1437. +
  1438. +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
  1439. + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
  1440. + BIT(CLASS4_ID) | BIT(CLASS5_ID))
  1441. +#define CLASS_MAX_ID CLASS5_ID
  1442. +
  1443. +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
  1444. + BIT(TMU3_ID))
  1445. +
  1446. +#define TMU_MAX_ID TMU3_ID
  1447. +
  1448. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1449. +#define UTIL_MASK BIT(UTIL_ID)
  1450. +#endif
  1451. +
  1452. +struct pe_status {
  1453. + u32 cpu_state;
  1454. + u32 activity_counter;
  1455. + u32 rx;
  1456. + union {
  1457. + u32 tx;
  1458. + u32 tmu_qstatus;
  1459. + };
  1460. + u32 drop;
  1461. +#if defined(CFG_PE_DEBUG)
  1462. + u32 debug_indicator;
  1463. + u32 debug[16];
  1464. +#endif
  1465. +} __aligned(16);
  1466. +
  1467. +struct pe_sync_mailbox {
  1468. + u32 stop;
  1469. + u32 stopped;
  1470. +};
  1471. +
  1472. +/* Drop counter definitions */
  1473. +
  1474. +#define CLASS_NUM_DROP_COUNTERS 13
  1475. +#define UTIL_NUM_DROP_COUNTERS 8
  1476. +
  1477. +/* PE information.
  1478. + * Structure containing PE's specific information. It is used to create
  1479. + * generic C functions common to all PE's.
  1480. + * Before using the library functions this structure needs to be initialized
  1481. + * with the different registers virtual addresses
  1482. + * (according to the ARM MMU mmaping). The default initialization supports a
  1483. + * virtual == physical mapping.
  1484. + */
  1485. +struct pe_info {
  1486. + u32 dmem_base_addr; /* PE's dmem base address */
  1487. + u32 pmem_base_addr; /* PE's pmem base address */
  1488. + u32 pmem_size; /* PE's pmem size */
  1489. +
  1490. + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
  1491. + * address
  1492. + */
  1493. + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
  1494. + * address
  1495. + */
  1496. + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
  1497. + * address
  1498. + */
  1499. +};
  1500. +
  1501. +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
  1502. +void pe_lmem_write(u32 *src, u32 len, u32 offset);
  1503. +
  1504. +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
  1505. +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
  1506. +
  1507. +u32 pe_pmem_read(int id, u32 addr, u8 size);
  1508. +
  1509. +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
  1510. +u32 pe_dmem_read(int id, u32 addr, u8 size);
  1511. +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
  1512. +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
  1513. +void class_bus_write(u32 val, u32 addr, u8 size);
  1514. +u32 class_bus_read(u32 addr, u8 size);
  1515. +
  1516. +#define class_bus_readl(addr) class_bus_read(addr, 4)
  1517. +#define class_bus_readw(addr) class_bus_read(addr, 2)
  1518. +#define class_bus_readb(addr) class_bus_read(addr, 1)
  1519. +
  1520. +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
  1521. +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
  1522. +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
  1523. +
  1524. +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
  1525. +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
  1526. +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
  1527. +
  1528. +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
  1529. +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
  1530. +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
  1531. +
  1532. +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
  1533. +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
  1534. + struct device *dev);
  1535. +
  1536. +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
  1537. + unsigned int ddr_size);
  1538. +void bmu_init(void *base, struct BMU_CFG *cfg);
  1539. +void bmu_reset(void *base);
  1540. +void bmu_enable(void *base);
  1541. +void bmu_disable(void *base);
  1542. +void bmu_set_config(void *base, struct BMU_CFG *cfg);
  1543. +
  1544. +/*
  1545. + * An enumerated type for loopback values. This can be one of three values, no
  1546. + * loopback -normal operation, local loopback with internal loopback module of
  1547. + * MAC or PHY loopback which is through the external PHY.
  1548. + */
  1549. +#ifndef __MAC_LOOP_ENUM__
  1550. +#define __MAC_LOOP_ENUM__
  1551. +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
  1552. +#endif
  1553. +
  1554. +void gemac_init(void *base, void *config);
  1555. +void gemac_disable_rx_checksum_offload(void *base);
  1556. +void gemac_enable_rx_checksum_offload(void *base);
  1557. +void gemac_set_mdc_div(void *base, int mdc_div);
  1558. +void gemac_set_speed(void *base, enum mac_speed gem_speed);
  1559. +void gemac_set_duplex(void *base, int duplex);
  1560. +void gemac_set_mode(void *base, int mode);
  1561. +void gemac_enable(void *base);
  1562. +void gemac_tx_disable(void *base);
  1563. +void gemac_tx_enable(void *base);
  1564. +void gemac_disable(void *base);
  1565. +void gemac_reset(void *base);
  1566. +void gemac_set_address(void *base, struct spec_addr *addr);
  1567. +struct spec_addr gemac_get_address(void *base);
  1568. +void gemac_set_loop(void *base, enum mac_loop gem_loop);
  1569. +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
  1570. +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
  1571. +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
  1572. +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
  1573. +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
  1574. + unsigned int entry_index);
  1575. +void gemac_clear_laddr1(void *base);
  1576. +void gemac_clear_laddr2(void *base);
  1577. +void gemac_clear_laddr3(void *base);
  1578. +void gemac_clear_laddr4(void *base);
  1579. +void gemac_clear_laddrN(void *base, unsigned int entry_index);
  1580. +struct pfe_mac_addr gemac_get_hash(void *base);
  1581. +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
  1582. +struct pfe_mac_addr gem_get_laddr1(void *base);
  1583. +struct pfe_mac_addr gem_get_laddr2(void *base);
  1584. +struct pfe_mac_addr gem_get_laddr3(void *base);
  1585. +struct pfe_mac_addr gem_get_laddr4(void *base);
  1586. +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
  1587. +void gemac_set_config(void *base, struct gemac_cfg *cfg);
  1588. +void gemac_allow_broadcast(void *base);
  1589. +void gemac_no_broadcast(void *base);
  1590. +void gemac_enable_1536_rx(void *base);
  1591. +void gemac_disable_1536_rx(void *base);
  1592. +void gemac_enable_rx_jmb(void *base);
  1593. +void gemac_disable_rx_jmb(void *base);
  1594. +void gemac_enable_stacked_vlan(void *base);
  1595. +void gemac_disable_stacked_vlan(void *base);
  1596. +void gemac_enable_pause_rx(void *base);
  1597. +void gemac_disable_pause_rx(void *base);
  1598. +void gemac_enable_copy_all(void *base);
  1599. +void gemac_disable_copy_all(void *base);
  1600. +void gemac_set_bus_width(void *base, int width);
  1601. +void gemac_set_wol(void *base, u32 wol_conf);
  1602. +
  1603. +void gpi_init(void *base, struct gpi_cfg *cfg);
  1604. +void gpi_reset(void *base);
  1605. +void gpi_enable(void *base);
  1606. +void gpi_disable(void *base);
  1607. +void gpi_set_config(void *base, struct gpi_cfg *cfg);
  1608. +
  1609. +void class_init(struct class_cfg *cfg);
  1610. +void class_reset(void);
  1611. +void class_enable(void);
  1612. +void class_disable(void);
  1613. +void class_set_config(struct class_cfg *cfg);
  1614. +
  1615. +void tmu_reset(void);
  1616. +void tmu_init(struct tmu_cfg *cfg);
  1617. +void tmu_enable(u32 pe_mask);
  1618. +void tmu_disable(u32 pe_mask);
  1619. +u32 tmu_qstatus(u32 if_id);
  1620. +u32 tmu_pkts_processed(u32 if_id);
  1621. +
  1622. +void util_init(struct util_cfg *cfg);
  1623. +void util_reset(void);
  1624. +void util_enable(void);
  1625. +void util_disable(void);
  1626. +
  1627. +void hif_init(void);
  1628. +void hif_tx_enable(void);
  1629. +void hif_tx_disable(void);
  1630. +void hif_rx_enable(void);
  1631. +void hif_rx_disable(void);
  1632. +
  1633. +/* Get Chip Revision level
  1634. + *
  1635. + */
  1636. +static inline unsigned int CHIP_REVISION(void)
  1637. +{
  1638. + /*For LS1012A return always 1 */
  1639. + return 1;
  1640. +}
  1641. +
  1642. +/* Start HIF rx DMA
  1643. + *
  1644. + */
  1645. +static inline void hif_rx_dma_start(void)
  1646. +{
  1647. + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
  1648. +}
  1649. +
  1650. +/* Start HIF tx DMA
  1651. + *
  1652. + */
  1653. +static inline void hif_tx_dma_start(void)
  1654. +{
  1655. + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
  1656. +}
  1657. +
  1658. +#endif /* _PFE_H_ */
  1659. --- /dev/null
  1660. +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
  1661. @@ -0,0 +1,238 @@
  1662. +/*
  1663. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1664. + * Copyright 2017 NXP
  1665. + *
  1666. + * This program is free software; you can redistribute it and/or modify
  1667. + * it under the terms of the GNU General Public License as published by
  1668. + * the Free Software Foundation; either version 2 of the License, or
  1669. + * (at your option) any later version.
  1670. + *
  1671. + * This program is distributed in the hope that it will be useful,
  1672. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1673. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1674. + * GNU General Public License for more details.
  1675. + *
  1676. + * You should have received a copy of the GNU General Public License
  1677. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1678. + */
  1679. +
  1680. +#include <linux/kernel.h>
  1681. +#include <linux/sched.h>
  1682. +#include <linux/module.h>
  1683. +#include <linux/list.h>
  1684. +#include <linux/kthread.h>
  1685. +
  1686. +#include "pfe_mod.h"
  1687. +#include "pfe_ctrl.h"
  1688. +
  1689. +#define TIMEOUT_MS 1000
  1690. +
  1691. +int relax(unsigned long end)
  1692. +{
  1693. + if (time_after(jiffies, end)) {
  1694. + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
  1695. + return -1;
  1696. +
  1697. + if (need_resched())
  1698. + schedule();
  1699. + }
  1700. +
  1701. + return 0;
  1702. +}
  1703. +
  1704. +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
  1705. +{
  1706. + int id;
  1707. +
  1708. + mutex_lock(&ctrl->mutex);
  1709. +
  1710. + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
  1711. + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
  1712. +
  1713. + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
  1714. + if (id == TMU2_ID)
  1715. + continue;
  1716. + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
  1717. + }
  1718. +
  1719. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1720. + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
  1721. +#endif
  1722. + mutex_unlock(&ctrl->mutex);
  1723. +}
  1724. +
  1725. +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
  1726. +{
  1727. + int pe_mask = CLASS_MASK | TMU_MASK;
  1728. +
  1729. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1730. + pe_mask |= UTIL_MASK;
  1731. +#endif
  1732. + mutex_lock(&ctrl->mutex);
  1733. + pe_start(&pfe->ctrl, pe_mask);
  1734. + mutex_unlock(&ctrl->mutex);
  1735. +}
  1736. +
  1737. +/* PE sync stop.
  1738. + * Stops packet processing for a list of PE's (specified using a bitmask).
  1739. + * The caller must hold ctrl->mutex.
  1740. + *
  1741. + * @param ctrl Control context
  1742. + * @param pe_mask Mask of PE id's to stop
  1743. + *
  1744. + */
  1745. +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
  1746. +{
  1747. + struct pe_sync_mailbox *mbox;
  1748. + int pe_stopped = 0;
  1749. + unsigned long end = jiffies + 2;
  1750. + int i;
  1751. +
  1752. + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
  1753. +
  1754. + for (i = 0; i < MAX_PE; i++)
  1755. + if (pe_mask & (1 << i)) {
  1756. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1757. +
  1758. + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
  1759. + long)&mbox->stop, 4);
  1760. + }
  1761. +
  1762. + while (pe_stopped != pe_mask) {
  1763. + for (i = 0; i < MAX_PE; i++)
  1764. + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
  1765. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1766. +
  1767. + if (pe_dmem_read(i, (unsigned
  1768. + long)&mbox->stopped, 4) &
  1769. + cpu_to_be32(0x1))
  1770. + pe_stopped |= (1 << i);
  1771. + }
  1772. +
  1773. + if (relax(end) < 0)
  1774. + goto err;
  1775. + }
  1776. +
  1777. + return 0;
  1778. +
  1779. +err:
  1780. + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
  1781. +
  1782. + for (i = 0; i < MAX_PE; i++)
  1783. + if (pe_mask & (1 << i)) {
  1784. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1785. +
  1786. + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
  1787. + long)&mbox->stop, 4);
  1788. + }
  1789. +
  1790. + return -EIO;
  1791. +}
  1792. +
  1793. +/* PE start.
  1794. + * Starts packet processing for a list of PE's (specified using a bitmask).
  1795. + * The caller must hold ctrl->mutex.
  1796. + *
  1797. + * @param ctrl Control context
  1798. + * @param pe_mask Mask of PE id's to start
  1799. + *
  1800. + */
  1801. +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
  1802. +{
  1803. + struct pe_sync_mailbox *mbox;
  1804. + int i;
  1805. +
  1806. + for (i = 0; i < MAX_PE; i++)
  1807. + if (pe_mask & (1 << i)) {
  1808. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1809. +
  1810. + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
  1811. + long)&mbox->stop, 4);
  1812. + }
  1813. +}
  1814. +
  1815. +/* This function will ensure all PEs are put in to idle state */
  1816. +int pe_reset_all(struct pfe_ctrl *ctrl)
  1817. +{
  1818. + struct pe_sync_mailbox *mbox;
  1819. + int pe_stopped = 0;
  1820. + unsigned long end = jiffies + 2;
  1821. + int i;
  1822. + int pe_mask = CLASS_MASK | TMU_MASK;
  1823. +
  1824. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1825. + pe_mask |= UTIL_MASK;
  1826. +#endif
  1827. +
  1828. + for (i = 0; i < MAX_PE; i++)
  1829. + if (pe_mask & (1 << i)) {
  1830. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1831. +
  1832. + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
  1833. + long)&mbox->stop, 4);
  1834. + }
  1835. +
  1836. + while (pe_stopped != pe_mask) {
  1837. + for (i = 0; i < MAX_PE; i++)
  1838. + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
  1839. + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
  1840. +
  1841. + if (pe_dmem_read(i, (unsigned long)
  1842. + &mbox->stopped, 4) &
  1843. + cpu_to_be32(0x1))
  1844. + pe_stopped |= (1 << i);
  1845. + }
  1846. +
  1847. + if (relax(end) < 0)
  1848. + goto err;
  1849. + }
  1850. +
  1851. + return 0;
  1852. +
  1853. +err:
  1854. + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
  1855. + return -EIO;
  1856. +}
  1857. +
  1858. +int pfe_ctrl_init(struct pfe *pfe)
  1859. +{
  1860. + struct pfe_ctrl *ctrl = &pfe->ctrl;
  1861. + int id;
  1862. +
  1863. + pr_info("%s\n", __func__);
  1864. +
  1865. + mutex_init(&ctrl->mutex);
  1866. + spin_lock_init(&ctrl->lock);
  1867. +
  1868. + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
  1869. + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
  1870. + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
  1871. + }
  1872. +
  1873. + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
  1874. + if (id == TMU2_ID)
  1875. + continue;
  1876. + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
  1877. + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
  1878. + }
  1879. +
  1880. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  1881. + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
  1882. + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
  1883. +#endif
  1884. +
  1885. + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
  1886. + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
  1887. + ROUTE_TABLE_BASEADDR;
  1888. +
  1889. + ctrl->dev = pfe->dev;
  1890. +
  1891. + pr_info("%s finished\n", __func__);
  1892. +
  1893. + return 0;
  1894. +}
  1895. +
  1896. +void pfe_ctrl_exit(struct pfe *pfe)
  1897. +{
  1898. + pr_info("%s\n", __func__);
  1899. +}
  1900. --- /dev/null
  1901. +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
  1902. @@ -0,0 +1,112 @@
  1903. +/*
  1904. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  1905. + * Copyright 2017 NXP
  1906. + *
  1907. + * This program is free software; you can redistribute it and/or modify
  1908. + * it under the terms of the GNU General Public License as published by
  1909. + * the Free Software Foundation; either version 2 of the License, or
  1910. + * (at your option) any later version.
  1911. + *
  1912. + * This program is distributed in the hope that it will be useful,
  1913. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1914. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1915. + * GNU General Public License for more details.
  1916. + *
  1917. + * You should have received a copy of the GNU General Public License
  1918. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  1919. + */
  1920. +
  1921. +#ifndef _PFE_CTRL_H_
  1922. +#define _PFE_CTRL_H_
  1923. +
  1924. +#include <linux/dmapool.h>
  1925. +
  1926. +#include "pfe_mod.h"
  1927. +#include "pfe/pfe.h"
  1928. +
  1929. +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
  1930. +#define DMA_BUF_SIZE_256 0x100
  1931. +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
  1932. +#define DMA_BUF_SIZE_512 0x200
  1933. +/* 512bytes dma allocated buffers used by rtp relay feature */
  1934. +#define DMA_BUF_MIN_ALIGNMENT 8
  1935. +#define DMA_BUF_BOUNDARY (4 * 1024)
  1936. +/* bursts can not cross 4k boundary */
  1937. +
  1938. +#define CMD_TX_ENABLE 0x0501
  1939. +#define CMD_TX_DISABLE 0x0502
  1940. +
  1941. +#define CMD_RX_LRO 0x0011
  1942. +#define CMD_PKTCAP_ENABLE 0x0d01
  1943. +#define CMD_QM_EXPT_RATE 0x020c
  1944. +
  1945. +#define CLASS_DM_SH_STATIC (0x800)
  1946. +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
  1947. +#define CLASS_DM_SYNC_MBOX (0x808)
  1948. +#define CLASS_DM_MSG_MBOX (0x810)
  1949. +#define CLASS_DM_DROP_CNTR (0x820)
  1950. +#define CLASS_DM_RESUME (0x854)
  1951. +#define CLASS_DM_PESTATUS (0x860)
  1952. +
  1953. +#define TMU_DM_SH_STATIC (0x80)
  1954. +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
  1955. +#define TMU_DM_SYNC_MBOX (0x88)
  1956. +#define TMU_DM_MSG_MBOX (0x90)
  1957. +#define TMU_DM_RESUME (0xA0)
  1958. +#define TMU_DM_PESTATUS (0xB0)
  1959. +#define TMU_DM_CONTEXT (0x300)
  1960. +#define TMU_DM_TX_TRANS (0x480)
  1961. +
  1962. +#define UTIL_DM_SH_STATIC (0x0)
  1963. +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
  1964. +#define UTIL_DM_SYNC_MBOX (0x8)
  1965. +#define UTIL_DM_MSG_MBOX (0x10)
  1966. +#define UTIL_DM_DROP_CNTR (0x20)
  1967. +#define UTIL_DM_RESUME (0x40)
  1968. +#define UTIL_DM_PESTATUS (0x50)
  1969. +
  1970. +struct pfe_ctrl {
  1971. + struct mutex mutex; /* to serialize pfe control access */
  1972. + spinlock_t lock;
  1973. +
  1974. + void *dma_pool;
  1975. + void *dma_pool_512;
  1976. + void *dma_pool_128;
  1977. +
  1978. + struct device *dev;
  1979. +
  1980. + void *hash_array_baseaddr; /*
  1981. + * Virtual base address of
  1982. + * the conntrack hash array
  1983. + */
  1984. + unsigned long hash_array_phys_baseaddr; /*
  1985. + * Physical base address of
  1986. + * the conntrack hash array
  1987. + */
  1988. +
  1989. + int (*event_cb)(u16, u16, u16*);
  1990. +
  1991. + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
  1992. + * Sync mailbox PFE
  1993. + * internal address,
  1994. + * initialized
  1995. + * when parsing elf images
  1996. + */
  1997. + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
  1998. + * Msg mailbox PFE internal
  1999. + * address, initialized
  2000. + * when parsing elf images
  2001. + */
  2002. + unsigned int sys_clk; /* AXI clock value, in KHz */
  2003. +};
  2004. +
  2005. +int pfe_ctrl_init(struct pfe *pfe);
  2006. +void pfe_ctrl_exit(struct pfe *pfe);
  2007. +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
  2008. +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
  2009. +int pe_reset_all(struct pfe_ctrl *ctrl);
  2010. +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
  2011. +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
  2012. +int relax(unsigned long end);
  2013. +
  2014. +#endif /* _PFE_CTRL_H_ */
  2015. --- /dev/null
  2016. +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
  2017. @@ -0,0 +1,111 @@
  2018. +/*
  2019. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  2020. + * Copyright 2017 NXP
  2021. + *
  2022. + * This program is free software; you can redistribute it and/or modify
  2023. + * it under the terms of the GNU General Public License as published by
  2024. + * the Free Software Foundation; either version 2 of the License, or
  2025. + * (at your option) any later version.
  2026. + *
  2027. + * This program is distributed in the hope that it will be useful,
  2028. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2029. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2030. + * GNU General Public License for more details.
  2031. + *
  2032. + * You should have received a copy of the GNU General Public License
  2033. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  2034. + */
  2035. +
  2036. +#include <linux/module.h>
  2037. +#include <linux/debugfs.h>
  2038. +#include <linux/platform_device.h>
  2039. +
  2040. +#include "pfe_mod.h"
  2041. +
  2042. +static int dmem_show(struct seq_file *s, void *unused)
  2043. +{
  2044. + u32 dmem_addr, val;
  2045. + int id = (long int)s->private;
  2046. + int i;
  2047. +
  2048. + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
  2049. + seq_printf(s, "%04x:", dmem_addr);
  2050. +
  2051. + for (i = 0; i < 8; i++) {
  2052. + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
  2053. + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
  2054. + (val >> 8) & 0xff, (val >> 16) & 0xff,
  2055. + (val >> 24) & 0xff);
  2056. + }
  2057. +
  2058. + seq_puts(s, "\n");
  2059. + }
  2060. +
  2061. + return 0;
  2062. +}
  2063. +
  2064. +static int dmem_open(struct inode *inode, struct file *file)
  2065. +{
  2066. + return single_open(file, dmem_show, inode->i_private);
  2067. +}
  2068. +
  2069. +static const struct file_operations dmem_fops = {
  2070. + .open = dmem_open,
  2071. + .read = seq_read,
  2072. + .llseek = seq_lseek,
  2073. + .release = single_release,
  2074. +};
  2075. +
  2076. +int pfe_debugfs_init(struct pfe *pfe)
  2077. +{
  2078. + struct dentry *d;
  2079. +
  2080. + pr_info("%s\n", __func__);
  2081. +
  2082. + pfe->dentry = debugfs_create_dir("pfe", NULL);
  2083. + if (IS_ERR_OR_NULL(pfe->dentry))
  2084. + goto err_dir;
  2085. +
  2086. + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
  2087. + &dmem_fops);
  2088. + if (IS_ERR_OR_NULL(d))
  2089. + goto err_pe;
  2090. +
  2091. + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
  2092. + &dmem_fops);
  2093. + if (IS_ERR_OR_NULL(d))
  2094. + goto err_pe;
  2095. +
  2096. + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
  2097. + &dmem_fops);
  2098. + if (IS_ERR_OR_NULL(d))
  2099. + goto err_pe;
  2100. +
  2101. + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
  2102. + &dmem_fops);
  2103. + if (IS_ERR_OR_NULL(d))
  2104. + goto err_pe;
  2105. +
  2106. + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
  2107. + &dmem_fops);
  2108. + if (IS_ERR_OR_NULL(d))
  2109. + goto err_pe;
  2110. +
  2111. + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
  2112. + &dmem_fops);
  2113. + if (IS_ERR_OR_NULL(d))
  2114. + goto err_pe;
  2115. +
  2116. + return 0;
  2117. +
  2118. +err_pe:
  2119. + debugfs_remove_recursive(pfe->dentry);
  2120. +
  2121. +err_dir:
  2122. + return -1;
  2123. +}
  2124. +
  2125. +void pfe_debugfs_exit(struct pfe *pfe)
  2126. +{
  2127. + debugfs_remove_recursive(pfe->dentry);
  2128. +}
  2129. --- /dev/null
  2130. +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
  2131. @@ -0,0 +1,25 @@
  2132. +/*
  2133. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  2134. + * Copyright 2017 NXP
  2135. + *
  2136. + * This program is free software; you can redistribute it and/or modify
  2137. + * it under the terms of the GNU General Public License as published by
  2138. + * the Free Software Foundation; either version 2 of the License, or
  2139. + * (at your option) any later version.
  2140. + *
  2141. + * This program is distributed in the hope that it will be useful,
  2142. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2143. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2144. + * GNU General Public License for more details.
  2145. + *
  2146. + * You should have received a copy of the GNU General Public License
  2147. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  2148. + */
  2149. +
  2150. +#ifndef _PFE_DEBUGFS_H_
  2151. +#define _PFE_DEBUGFS_H_
  2152. +
  2153. +int pfe_debugfs_init(struct pfe *pfe);
  2154. +void pfe_debugfs_exit(struct pfe *pfe);
  2155. +
  2156. +#endif /* _PFE_DEBUGFS_H_ */
  2157. --- /dev/null
  2158. +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
  2159. @@ -0,0 +1,2491 @@
  2160. +/*
  2161. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  2162. + * Copyright 2017 NXP
  2163. + *
  2164. + * This program is free software; you can redistribute it and/or modify
  2165. + * it under the terms of the GNU General Public License as published by
  2166. + * the Free Software Foundation; either version 2 of the License, or
  2167. + * (at your option) any later version.
  2168. + *
  2169. + * This program is distributed in the hope that it will be useful,
  2170. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2171. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2172. + * GNU General Public License for more details.
  2173. + *
  2174. + * You should have received a copy of the GNU General Public License
  2175. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  2176. + */
  2177. +
  2178. +/* @pfe_eth.c.
  2179. + * Ethernet driver for to handle exception path for PFE.
  2180. + * - uses HIF functions to send/receive packets.
  2181. + * - uses ctrl function to start/stop interfaces.
  2182. + * - uses direct register accesses to control phy operation.
  2183. + */
  2184. +#include <linux/version.h>
  2185. +#include <linux/kernel.h>
  2186. +#include <linux/interrupt.h>
  2187. +#include <linux/dma-mapping.h>
  2188. +#include <linux/dmapool.h>
  2189. +#include <linux/netdevice.h>
  2190. +#include <linux/etherdevice.h>
  2191. +#include <linux/ethtool.h>
  2192. +#include <linux/mii.h>
  2193. +#include <linux/phy.h>
  2194. +#include <linux/timer.h>
  2195. +#include <linux/hrtimer.h>
  2196. +#include <linux/platform_device.h>
  2197. +
  2198. +#include <net/ip.h>
  2199. +#include <net/sock.h>
  2200. +
  2201. +#include <linux/io.h>
  2202. +#include <asm/irq.h>
  2203. +#include <linux/delay.h>
  2204. +#include <linux/regmap.h>
  2205. +#include <linux/i2c.h>
  2206. +
  2207. +#if defined(CONFIG_NF_CONNTRACK_MARK)
  2208. +#include <net/netfilter/nf_conntrack.h>
  2209. +#endif
  2210. +
  2211. +#include "pfe_mod.h"
  2212. +#include "pfe_eth.h"
  2213. +
  2214. +static void *cbus_emac_base[3];
  2215. +static void *cbus_gpi_base[3];
  2216. +
  2217. +/* Forward Declaration */
  2218. +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
  2219. +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
  2220. +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
  2221. + from_tx, int n_desc);
  2222. +
  2223. +unsigned int gemac_regs[] = {
  2224. + 0x0004, /* Interrupt event */
  2225. + 0x0008, /* Interrupt mask */
  2226. + 0x0024, /* Ethernet control */
  2227. + 0x0064, /* MIB Control/Status */
  2228. + 0x0084, /* Receive control/status */
  2229. + 0x00C4, /* Transmit control */
  2230. + 0x00E4, /* Physical address low */
  2231. + 0x00E8, /* Physical address high */
  2232. + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
  2233. + 0x0190, /* Receive FIFO Section Full Threshold */
  2234. + 0x01A0, /* Transmit FIFO Section Empty Threshold */
  2235. + 0x01B0, /* Frame Truncation Length */
  2236. +};
  2237. +
  2238. +/********************************************************************/
  2239. +/* SYSFS INTERFACE */
  2240. +/********************************************************************/
  2241. +
  2242. +#ifdef PFE_ETH_NAPI_STATS
  2243. +/*
  2244. + * pfe_eth_show_napi_stats
  2245. + */
  2246. +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
  2247. + struct device_attribute *attr,
  2248. + char *buf)
  2249. +{
  2250. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2251. + ssize_t len = 0;
  2252. +
  2253. + len += sprintf(buf + len, "sched: %u\n",
  2254. + priv->napi_counters[NAPI_SCHED_COUNT]);
  2255. + len += sprintf(buf + len, "poll: %u\n",
  2256. + priv->napi_counters[NAPI_POLL_COUNT]);
  2257. + len += sprintf(buf + len, "packet: %u\n",
  2258. + priv->napi_counters[NAPI_PACKET_COUNT]);
  2259. + len += sprintf(buf + len, "budget: %u\n",
  2260. + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
  2261. + len += sprintf(buf + len, "desc: %u\n",
  2262. + priv->napi_counters[NAPI_DESC_COUNT]);
  2263. +
  2264. + return len;
  2265. +}
  2266. +
  2267. +/*
  2268. + * pfe_eth_set_napi_stats
  2269. + */
  2270. +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
  2271. + struct device_attribute *attr,
  2272. + const char *buf, size_t count)
  2273. +{
  2274. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2275. +
  2276. + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
  2277. +
  2278. + return count;
  2279. +}
  2280. +#endif
  2281. +#ifdef PFE_ETH_TX_STATS
  2282. +/* pfe_eth_show_tx_stats
  2283. + *
  2284. + */
  2285. +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
  2286. + struct device_attribute *attr,
  2287. + char *buf)
  2288. +{
  2289. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2290. + ssize_t len = 0;
  2291. + int i;
  2292. +
  2293. + len += sprintf(buf + len, "TX queues stats:\n");
  2294. +
  2295. + for (i = 0; i < emac_txq_cnt; i++) {
  2296. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  2297. + i);
  2298. +
  2299. + len += sprintf(buf + len, "\n");
  2300. + __netif_tx_lock_bh(tx_queue);
  2301. +
  2302. + hif_tx_lock(&pfe->hif);
  2303. + len += sprintf(buf + len,
  2304. + "Queue %2d : credits = %10d\n"
  2305. + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
  2306. + len += sprintf(buf + len,
  2307. + " tx packets = %10d\n"
  2308. + , pfe->tmu_credit.tx_packets[priv->id][i]);
  2309. + hif_tx_unlock(&pfe->hif);
  2310. +
  2311. + /* Don't output additionnal stats if queue never used */
  2312. + if (!pfe->tmu_credit.tx_packets[priv->id][i])
  2313. + goto skip;
  2314. +
  2315. + len += sprintf(buf + len,
  2316. + " clean_fail = %10d\n"
  2317. + , priv->clean_fail[i]);
  2318. + len += sprintf(buf + len,
  2319. + " stop_queue = %10d\n"
  2320. + , priv->stop_queue_total[i]);
  2321. + len += sprintf(buf + len,
  2322. + " stop_queue_hif = %10d\n"
  2323. + , priv->stop_queue_hif[i]);
  2324. + len += sprintf(buf + len,
  2325. + " stop_queue_hif_client = %10d\n"
  2326. + , priv->stop_queue_hif_client[i]);
  2327. + len += sprintf(buf + len,
  2328. + " stop_queue_credit = %10d\n"
  2329. + , priv->stop_queue_credit[i]);
  2330. +skip:
  2331. + __netif_tx_unlock_bh(tx_queue);
  2332. + }
  2333. + return len;
  2334. +}
  2335. +
  2336. +/* pfe_eth_set_tx_stats
  2337. + *
  2338. + */
  2339. +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
  2340. + struct device_attribute *attr,
  2341. + const char *buf, size_t count)
  2342. +{
  2343. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2344. + int i;
  2345. +
  2346. + for (i = 0; i < emac_txq_cnt; i++) {
  2347. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  2348. + i);
  2349. +
  2350. + __netif_tx_lock_bh(tx_queue);
  2351. + priv->clean_fail[i] = 0;
  2352. + priv->stop_queue_total[i] = 0;
  2353. + priv->stop_queue_hif[i] = 0;
  2354. + priv->stop_queue_hif_client[i] = 0;
  2355. + priv->stop_queue_credit[i] = 0;
  2356. + __netif_tx_unlock_bh(tx_queue);
  2357. + }
  2358. +
  2359. + return count;
  2360. +}
  2361. +#endif
  2362. +/* pfe_eth_show_txavail
  2363. + *
  2364. + */
  2365. +static ssize_t pfe_eth_show_txavail(struct device *dev,
  2366. + struct device_attribute *attr,
  2367. + char *buf)
  2368. +{
  2369. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2370. + ssize_t len = 0;
  2371. + int i;
  2372. +
  2373. + for (i = 0; i < emac_txq_cnt; i++) {
  2374. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  2375. + i);
  2376. +
  2377. + __netif_tx_lock_bh(tx_queue);
  2378. +
  2379. + len += sprintf(buf + len, "%d",
  2380. + hif_lib_tx_avail(&priv->client, i));
  2381. +
  2382. + __netif_tx_unlock_bh(tx_queue);
  2383. +
  2384. + if (i == (emac_txq_cnt - 1))
  2385. + len += sprintf(buf + len, "\n");
  2386. + else
  2387. + len += sprintf(buf + len, " ");
  2388. + }
  2389. +
  2390. + return len;
  2391. +}
  2392. +
  2393. +/* pfe_eth_show_default_priority
  2394. + *
  2395. + */
  2396. +static ssize_t pfe_eth_show_default_priority(struct device *dev,
  2397. + struct device_attribute *attr,
  2398. + char *buf)
  2399. +{
  2400. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2401. + unsigned long flags;
  2402. + int rc;
  2403. +
  2404. + spin_lock_irqsave(&priv->lock, flags);
  2405. + rc = sprintf(buf, "%d\n", priv->default_priority);
  2406. + spin_unlock_irqrestore(&priv->lock, flags);
  2407. +
  2408. + return rc;
  2409. +}
  2410. +
  2411. +/* pfe_eth_set_default_priority
  2412. + *
  2413. + */
  2414. +
  2415. +static ssize_t pfe_eth_set_default_priority(struct device *dev,
  2416. + struct device_attribute *attr,
  2417. + const char *buf, size_t count)
  2418. +{
  2419. + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
  2420. + unsigned long flags;
  2421. +
  2422. + spin_lock_irqsave(&priv->lock, flags);
  2423. + priv->default_priority = kstrtoul(buf, 0, 0);
  2424. + spin_unlock_irqrestore(&priv->lock, flags);
  2425. +
  2426. + return count;
  2427. +}
  2428. +
  2429. +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
  2430. +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
  2431. + pfe_eth_set_default_priority);
  2432. +
  2433. +#ifdef PFE_ETH_NAPI_STATS
  2434. +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
  2435. + pfe_eth_set_napi_stats);
  2436. +#endif
  2437. +
  2438. +#ifdef PFE_ETH_TX_STATS
  2439. +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
  2440. + pfe_eth_set_tx_stats);
  2441. +#endif
  2442. +
  2443. +/*
  2444. + * pfe_eth_sysfs_init
  2445. + *
  2446. + */
  2447. +static int pfe_eth_sysfs_init(struct net_device *ndev)
  2448. +{
  2449. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2450. + int err;
  2451. +
  2452. + /* Initialize the default values */
  2453. +
  2454. + /*
  2455. + * By default, packets without conntrack will use this default low
  2456. + * priority queue
  2457. + */
  2458. + priv->default_priority = 0;
  2459. +
  2460. + /* Create our sysfs files */
  2461. + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
  2462. + if (err) {
  2463. + netdev_err(ndev,
  2464. + "failed to create default_priority sysfs files\n");
  2465. + goto err_priority;
  2466. + }
  2467. +
  2468. + err = device_create_file(&ndev->dev, &dev_attr_txavail);
  2469. + if (err) {
  2470. + netdev_err(ndev,
  2471. + "failed to create default_priority sysfs files\n");
  2472. + goto err_txavail;
  2473. + }
  2474. +
  2475. +#ifdef PFE_ETH_NAPI_STATS
  2476. + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
  2477. + if (err) {
  2478. + netdev_err(ndev, "failed to create napi stats sysfs files\n");
  2479. + goto err_napi;
  2480. + }
  2481. +#endif
  2482. +
  2483. +#ifdef PFE_ETH_TX_STATS
  2484. + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
  2485. + if (err) {
  2486. + netdev_err(ndev, "failed to create tx stats sysfs files\n");
  2487. + goto err_tx;
  2488. + }
  2489. +#endif
  2490. +
  2491. + return 0;
  2492. +
  2493. +#ifdef PFE_ETH_TX_STATS
  2494. +err_tx:
  2495. +#endif
  2496. +#ifdef PFE_ETH_NAPI_STATS
  2497. + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
  2498. +
  2499. +err_napi:
  2500. +#endif
  2501. + device_remove_file(&ndev->dev, &dev_attr_txavail);
  2502. +
  2503. +err_txavail:
  2504. + device_remove_file(&ndev->dev, &dev_attr_default_priority);
  2505. +
  2506. +err_priority:
  2507. + return -1;
  2508. +}
  2509. +
  2510. +/* pfe_eth_sysfs_exit
  2511. + *
  2512. + */
  2513. +void pfe_eth_sysfs_exit(struct net_device *ndev)
  2514. +{
  2515. +#ifdef PFE_ETH_TX_STATS
  2516. + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
  2517. +#endif
  2518. +
  2519. +#ifdef PFE_ETH_NAPI_STATS
  2520. + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
  2521. +#endif
  2522. + device_remove_file(&ndev->dev, &dev_attr_txavail);
  2523. + device_remove_file(&ndev->dev, &dev_attr_default_priority);
  2524. +}
  2525. +
  2526. +/*************************************************************************/
  2527. +/* ETHTOOL INTERCAE */
  2528. +/*************************************************************************/
  2529. +
  2530. +/*MTIP GEMAC */
  2531. +static const struct fec_stat {
  2532. + char name[ETH_GSTRING_LEN];
  2533. + u16 offset;
  2534. +} fec_stats[] = {
  2535. + /* RMON TX */
  2536. + { "tx_dropped", RMON_T_DROP },
  2537. + { "tx_packets", RMON_T_PACKETS },
  2538. + { "tx_broadcast", RMON_T_BC_PKT },
  2539. + { "tx_multicast", RMON_T_MC_PKT },
  2540. + { "tx_crc_errors", RMON_T_CRC_ALIGN },
  2541. + { "tx_undersize", RMON_T_UNDERSIZE },
  2542. + { "tx_oversize", RMON_T_OVERSIZE },
  2543. + { "tx_fragment", RMON_T_FRAG },
  2544. + { "tx_jabber", RMON_T_JAB },
  2545. + { "tx_collision", RMON_T_COL },
  2546. + { "tx_64byte", RMON_T_P64 },
  2547. + { "tx_65to127byte", RMON_T_P65TO127 },
  2548. + { "tx_128to255byte", RMON_T_P128TO255 },
  2549. + { "tx_256to511byte", RMON_T_P256TO511 },
  2550. + { "tx_512to1023byte", RMON_T_P512TO1023 },
  2551. + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
  2552. + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
  2553. + { "tx_octets", RMON_T_OCTETS },
  2554. +
  2555. + /* IEEE TX */
  2556. + { "IEEE_tx_drop", IEEE_T_DROP },
  2557. + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
  2558. + { "IEEE_tx_1col", IEEE_T_1COL },
  2559. + { "IEEE_tx_mcol", IEEE_T_MCOL },
  2560. + { "IEEE_tx_def", IEEE_T_DEF },
  2561. + { "IEEE_tx_lcol", IEEE_T_LCOL },
  2562. + { "IEEE_tx_excol", IEEE_T_EXCOL },
  2563. + { "IEEE_tx_macerr", IEEE_T_MACERR },
  2564. + { "IEEE_tx_cserr", IEEE_T_CSERR },
  2565. + { "IEEE_tx_sqe", IEEE_T_SQE },
  2566. + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
  2567. + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
  2568. +
  2569. + /* RMON RX */
  2570. + { "rx_packets", RMON_R_PACKETS },
  2571. + { "rx_broadcast", RMON_R_BC_PKT },
  2572. + { "rx_multicast", RMON_R_MC_PKT },
  2573. + { "rx_crc_errors", RMON_R_CRC_ALIGN },
  2574. + { "rx_undersize", RMON_R_UNDERSIZE },
  2575. + { "rx_oversize", RMON_R_OVERSIZE },
  2576. + { "rx_fragment", RMON_R_FRAG },
  2577. + { "rx_jabber", RMON_R_JAB },
  2578. + { "rx_64byte", RMON_R_P64 },
  2579. + { "rx_65to127byte", RMON_R_P65TO127 },
  2580. + { "rx_128to255byte", RMON_R_P128TO255 },
  2581. + { "rx_256to511byte", RMON_R_P256TO511 },
  2582. + { "rx_512to1023byte", RMON_R_P512TO1023 },
  2583. + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
  2584. + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
  2585. + { "rx_octets", RMON_R_OCTETS },
  2586. +
  2587. + /* IEEE RX */
  2588. + { "IEEE_rx_drop", IEEE_R_DROP },
  2589. + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
  2590. + { "IEEE_rx_crc", IEEE_R_CRC },
  2591. + { "IEEE_rx_align", IEEE_R_ALIGN },
  2592. + { "IEEE_rx_macerr", IEEE_R_MACERR },
  2593. + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
  2594. + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
  2595. +};
  2596. +
  2597. +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
  2598. + *stats, u64 *data)
  2599. +{
  2600. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2601. + int i;
  2602. +
  2603. + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
  2604. + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
  2605. +}
  2606. +
  2607. +static void pfe_eth_gstrings(struct net_device *netdev,
  2608. + u32 stringset, u8 *data)
  2609. +{
  2610. + int i;
  2611. +
  2612. + switch (stringset) {
  2613. + case ETH_SS_STATS:
  2614. + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
  2615. + memcpy(data + i * ETH_GSTRING_LEN,
  2616. + fec_stats[i].name, ETH_GSTRING_LEN);
  2617. + break;
  2618. + }
  2619. +}
  2620. +
  2621. +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
  2622. +{
  2623. + switch (sset) {
  2624. + case ETH_SS_STATS:
  2625. + return ARRAY_SIZE(fec_stats);
  2626. + default:
  2627. + return -EOPNOTSUPP;
  2628. + }
  2629. +}
  2630. +
  2631. +/*
  2632. + * pfe_eth_gemac_reglen - Return the length of the register structure.
  2633. + *
  2634. + */
  2635. +static int pfe_eth_gemac_reglen(struct net_device *ndev)
  2636. +{
  2637. + pr_info("%s()\n", __func__);
  2638. + return (sizeof(gemac_regs) / sizeof(u32));
  2639. +}
  2640. +
  2641. +/*
  2642. + * pfe_eth_gemac_get_regs - Return the gemac register structure.
  2643. + *
  2644. + */
  2645. +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
  2646. + *regs, void *regbuf)
  2647. +{
  2648. + int i;
  2649. +
  2650. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2651. + u32 *buf = (u32 *)regbuf;
  2652. +
  2653. + pr_info("%s()\n", __func__);
  2654. + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
  2655. + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
  2656. +}
  2657. +
  2658. +/*
  2659. + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
  2660. + *
  2661. + */
  2662. +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
  2663. +{
  2664. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2665. +
  2666. + if (wol->wolopts & ~WAKE_MAGIC)
  2667. + return -EOPNOTSUPP;
  2668. +
  2669. + /* for MTIP we store wol->wolopts */
  2670. + priv->wol = wol->wolopts;
  2671. +
  2672. + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
  2673. +
  2674. + return 0;
  2675. +}
  2676. +
  2677. +/*
  2678. + *
  2679. + * pfe_eth_get_wol - Get the WoL options.
  2680. + *
  2681. + */
  2682. +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
  2683. + *wol)
  2684. +{
  2685. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2686. +
  2687. + wol->supported = WAKE_MAGIC;
  2688. + wol->wolopts = 0;
  2689. +
  2690. + if (priv->wol & WAKE_MAGIC)
  2691. + wol->wolopts = WAKE_MAGIC;
  2692. +
  2693. + memset(&wol->sopass, 0, sizeof(wol->sopass));
  2694. +}
  2695. +
  2696. +/*
  2697. + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
  2698. + *
  2699. + */
  2700. +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
  2701. + *drvinfo)
  2702. +{
  2703. + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  2704. + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
  2705. + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
  2706. + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
  2707. +}
  2708. +
  2709. +/*
  2710. + * pfe_eth_set_settings - Used to send commands to PHY.
  2711. + *
  2712. + */
  2713. +static int pfe_eth_set_settings(struct net_device *ndev,
  2714. + const struct ethtool_link_ksettings *cmd)
  2715. +{
  2716. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2717. + struct phy_device *phydev = priv->phydev;
  2718. +
  2719. + if (!phydev)
  2720. + return -ENODEV;
  2721. +
  2722. + return phy_ethtool_ksettings_set(phydev, cmd);
  2723. +}
  2724. +
  2725. +/*
  2726. + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
  2727. + * structure.
  2728. + *
  2729. + */
  2730. +static int pfe_eth_get_settings(struct net_device *ndev,
  2731. + struct ethtool_link_ksettings *cmd)
  2732. +{
  2733. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2734. + struct phy_device *phydev = priv->phydev;
  2735. +
  2736. + if (!phydev)
  2737. + return -ENODEV;
  2738. +
  2739. + phy_ethtool_ksettings_get(phydev, cmd);
  2740. +
  2741. + return 0;
  2742. +}
  2743. +
  2744. +/*
  2745. + * pfe_eth_get_msglevel - Gets the debug message mask.
  2746. + *
  2747. + */
  2748. +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
  2749. +{
  2750. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2751. +
  2752. + return priv->msg_enable;
  2753. +}
  2754. +
  2755. +/*
  2756. + * pfe_eth_set_msglevel - Sets the debug message mask.
  2757. + *
  2758. + */
  2759. +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
  2760. +{
  2761. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2762. +
  2763. + priv->msg_enable = data;
  2764. +}
  2765. +
  2766. +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
  2767. +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
  2768. +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
  2769. + HIF_RX_COAL_CLKS_PER_USEC)
  2770. +
  2771. +/*
  2772. + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
  2773. + *
  2774. + */
  2775. +static int pfe_eth_set_coalesce(struct net_device *ndev,
  2776. + struct ethtool_coalesce *ec)
  2777. +{
  2778. + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
  2779. + return -EINVAL;
  2780. +
  2781. + if (!ec->rx_coalesce_usecs) {
  2782. + writel(0, HIF_INT_COAL);
  2783. + return 0;
  2784. + }
  2785. +
  2786. + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
  2787. + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
  2788. +
  2789. + return 0;
  2790. +}
  2791. +
  2792. +/*
  2793. + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
  2794. + *
  2795. + */
  2796. +static int pfe_eth_get_coalesce(struct net_device *ndev,
  2797. + struct ethtool_coalesce *ec)
  2798. +{
  2799. + int reg_val = readl(HIF_INT_COAL);
  2800. +
  2801. + if (reg_val & HIF_INT_COAL_ENABLE)
  2802. + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
  2803. + HIF_RX_COAL_CLKS_PER_USEC;
  2804. + else
  2805. + ec->rx_coalesce_usecs = 0;
  2806. +
  2807. + return 0;
  2808. +}
  2809. +
  2810. +/*
  2811. + * pfe_eth_set_pauseparam - Sets pause parameters
  2812. + *
  2813. + */
  2814. +static int pfe_eth_set_pauseparam(struct net_device *ndev,
  2815. + struct ethtool_pauseparam *epause)
  2816. +{
  2817. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2818. +
  2819. + if (epause->tx_pause != epause->rx_pause) {
  2820. + netdev_info(ndev,
  2821. + "hardware only support enable/disable both tx and rx\n");
  2822. + return -EINVAL;
  2823. + }
  2824. +
  2825. + priv->pause_flag = 0;
  2826. + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
  2827. + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
  2828. +
  2829. + if (epause->rx_pause || epause->autoneg) {
  2830. + gemac_enable_pause_rx(priv->EMAC_baseaddr);
  2831. + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
  2832. + EGPI_PAUSE_ENABLE),
  2833. + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
  2834. + if (priv->phydev) {
  2835. + priv->phydev->supported |= ADVERTISED_Pause |
  2836. + ADVERTISED_Asym_Pause;
  2837. + priv->phydev->advertising |= ADVERTISED_Pause |
  2838. + ADVERTISED_Asym_Pause;
  2839. + }
  2840. + } else {
  2841. + gemac_disable_pause_rx(priv->EMAC_baseaddr);
  2842. + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
  2843. + ~EGPI_PAUSE_ENABLE),
  2844. + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
  2845. + if (priv->phydev) {
  2846. + priv->phydev->supported &= ~(ADVERTISED_Pause |
  2847. + ADVERTISED_Asym_Pause);
  2848. + priv->phydev->advertising &= ~(ADVERTISED_Pause |
  2849. + ADVERTISED_Asym_Pause);
  2850. + }
  2851. + }
  2852. +
  2853. + return 0;
  2854. +}
  2855. +
  2856. +/*
  2857. + * pfe_eth_get_pauseparam - Gets pause parameters
  2858. + *
  2859. + */
  2860. +static void pfe_eth_get_pauseparam(struct net_device *ndev,
  2861. + struct ethtool_pauseparam *epause)
  2862. +{
  2863. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  2864. +
  2865. + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
  2866. + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
  2867. + epause->rx_pause = epause->tx_pause;
  2868. +}
  2869. +
  2870. +/*
  2871. + * pfe_eth_get_hash
  2872. + */
  2873. +#define PFE_HASH_BITS 6 /* #bits in hash */
  2874. +#define CRC32_POLY 0xEDB88320
  2875. +
  2876. +static int pfe_eth_get_hash(u8 *addr)
  2877. +{
  2878. + unsigned int i, bit, data, crc, hash;
  2879. +
  2880. + /* calculate crc32 value of mac address */
  2881. + crc = 0xffffffff;
  2882. +
  2883. + for (i = 0; i < 6; i++) {
  2884. + data = addr[i];
  2885. + for (bit = 0; bit < 8; bit++, data >>= 1) {
  2886. + crc = (crc >> 1) ^
  2887. + (((crc ^ data) & 1) ? CRC32_POLY : 0);
  2888. + }
  2889. + }
  2890. +
  2891. + /*
  2892. + * only upper 6 bits (PFE_HASH_BITS) are used
  2893. + * which point to specific bit in the hash registers
  2894. + */
  2895. + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
  2896. +
  2897. + return hash;
  2898. +}
  2899. +
  2900. +const struct ethtool_ops pfe_ethtool_ops = {
  2901. + .get_drvinfo = pfe_eth_get_drvinfo,
  2902. + .get_regs_len = pfe_eth_gemac_reglen,
  2903. + .get_regs = pfe_eth_gemac_get_regs,
  2904. + .get_link = ethtool_op_get_link,
  2905. + .get_wol = pfe_eth_get_wol,
  2906. + .set_wol = pfe_eth_set_wol,
  2907. + .set_pauseparam = pfe_eth_set_pauseparam,
  2908. + .get_pauseparam = pfe_eth_get_pauseparam,
  2909. + .get_strings = pfe_eth_gstrings,
  2910. + .get_sset_count = pfe_eth_stats_count,
  2911. + .get_ethtool_stats = pfe_eth_fill_stats,
  2912. + .get_msglevel = pfe_eth_get_msglevel,
  2913. + .set_msglevel = pfe_eth_set_msglevel,
  2914. + .set_coalesce = pfe_eth_set_coalesce,
  2915. + .get_coalesce = pfe_eth_get_coalesce,
  2916. + .get_link_ksettings = pfe_eth_get_settings,
  2917. + .set_link_ksettings = pfe_eth_set_settings,
  2918. +};
  2919. +
  2920. +/* pfe_eth_mdio_reset
  2921. + */
  2922. +int pfe_eth_mdio_reset(struct mii_bus *bus)
  2923. +{
  2924. + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
  2925. + u32 phy_speed;
  2926. +
  2927. + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
  2928. +
  2929. + mutex_lock(&bus->mdio_lock);
  2930. +
  2931. + /*
  2932. + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
  2933. + *
  2934. + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
  2935. + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
  2936. + */
  2937. + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
  2938. + << EMAC_MII_SPEED_SHIFT);
  2939. + phy_speed |= EMAC_HOLDTIME(0x5);
  2940. + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
  2941. +
  2942. + mutex_unlock(&bus->mdio_lock);
  2943. +
  2944. + return 0;
  2945. +}
  2946. +
  2947. +/* pfe_eth_gemac_phy_timeout
  2948. + *
  2949. + */
  2950. +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
  2951. +{
  2952. + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
  2953. + EMAC_IEVENT_MII)) {
  2954. + if (timeout-- <= 0)
  2955. + return -1;
  2956. + usleep_range(10, 20);
  2957. + }
  2958. + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
  2959. + return 0;
  2960. +}
  2961. +
  2962. +static int pfe_eth_mdio_mux(u8 muxval)
  2963. +{
  2964. + struct i2c_adapter *a;
  2965. + struct i2c_msg msg;
  2966. + unsigned char buf[2];
  2967. + int ret;
  2968. +
  2969. + a = i2c_get_adapter(0);
  2970. + if (!a)
  2971. + return -ENODEV;
  2972. +
  2973. + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
  2974. + buf[0] = 0x54; /* reg number */
  2975. + buf[1] = (muxval << 6) | 0x3; /* data */
  2976. + msg.addr = 0x66;
  2977. + msg.buf = buf;
  2978. + msg.len = 2;
  2979. + msg.flags = 0;
  2980. + ret = i2c_transfer(a, &msg, 1);
  2981. + i2c_put_adapter(a);
  2982. + if (ret != 1)
  2983. + return -ENODEV;
  2984. + return 0;
  2985. +}
  2986. +
  2987. +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
  2988. + int dev_addr, int regnum)
  2989. +{
  2990. + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
  2991. +
  2992. + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
  2993. + EMAC_MII_DATA_RA(dev_addr) |
  2994. + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
  2995. + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
  2996. +
  2997. + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
  2998. + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
  2999. + __func__);
  3000. + return -1;
  3001. + }
  3002. +
  3003. + return 0;
  3004. +}
  3005. +
  3006. +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  3007. + u16 value)
  3008. +{
  3009. + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
  3010. +
  3011. + /*To access external PHYs on QDS board mux needs to be configured*/
  3012. + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
  3013. + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
  3014. +
  3015. + if (regnum & MII_ADDR_C45) {
  3016. + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
  3017. + regnum & 0xffff);
  3018. + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
  3019. + EMAC_MII_DATA_PA(mii_id) |
  3020. + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
  3021. + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
  3022. + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
  3023. + } else {
  3024. + /* start a write op */
  3025. + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
  3026. + EMAC_MII_DATA_PA(mii_id) |
  3027. + EMAC_MII_DATA_RA(regnum) |
  3028. + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
  3029. + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
  3030. + }
  3031. +
  3032. + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
  3033. + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
  3034. + __func__);
  3035. + return -1;
  3036. + }
  3037. + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
  3038. + mii_id, regnum, value);
  3039. +
  3040. + return 0;
  3041. +}
  3042. +
  3043. +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  3044. +{
  3045. + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
  3046. + u16 value = 0;
  3047. +
  3048. + /*To access external PHYs on QDS board mux needs to be configured*/
  3049. + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
  3050. + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
  3051. +
  3052. + if (regnum & MII_ADDR_C45) {
  3053. + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
  3054. + regnum & 0xffff);
  3055. + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
  3056. + EMAC_MII_DATA_PA(mii_id) |
  3057. + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
  3058. + EMAC_MII_DATA_TA,
  3059. + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
  3060. + } else {
  3061. + /* start a read op */
  3062. + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
  3063. + EMAC_MII_DATA_PA(mii_id) |
  3064. + EMAC_MII_DATA_RA(regnum) |
  3065. + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
  3066. + EMAC_MII_DATA_REG);
  3067. + }
  3068. +
  3069. + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
  3070. + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
  3071. + return -1;
  3072. + }
  3073. +
  3074. + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
  3075. + EMAC_MII_DATA_REG));
  3076. + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
  3077. + mii_id, regnum, value);
  3078. + return value;
  3079. +}
  3080. +
  3081. +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
  3082. + struct ls1012a_mdio_platform_data *minfo)
  3083. +{
  3084. + struct mii_bus *bus;
  3085. + int rc, ii;
  3086. + struct phy_device *phydev;
  3087. +
  3088. + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
  3089. + pr_info("%s\n", __func__);
  3090. +
  3091. + bus = mdiobus_alloc();
  3092. + if (!bus) {
  3093. + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
  3094. + rc = -ENOMEM;
  3095. + goto err0;
  3096. + }
  3097. +
  3098. + bus->name = "ls1012a MDIO Bus";
  3099. + bus->read = &pfe_eth_mdio_read;
  3100. + bus->write = &pfe_eth_mdio_write;
  3101. + bus->reset = &pfe_eth_mdio_reset;
  3102. + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
  3103. + bus->priv = priv;
  3104. +
  3105. + bus->phy_mask = minfo->phy_mask;
  3106. + priv->mdc_div = minfo->mdc_div;
  3107. +
  3108. + if (!priv->mdc_div)
  3109. + priv->mdc_div = 64;
  3110. +
  3111. + bus->irq[0] = minfo->irq[0];
  3112. +
  3113. + bus->parent = priv->pfe->dev;
  3114. +
  3115. + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
  3116. + __func__, priv->mdc_div, bus->phy_mask);
  3117. + rc = mdiobus_register(bus);
  3118. + if (rc) {
  3119. + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
  3120. + bus->name);
  3121. + goto err1;
  3122. + }
  3123. +
  3124. + priv->mii_bus = bus;
  3125. +
  3126. + /* For clause 45 we need to call get_phy_device() with it's
  3127. + * 3rd argument as true and then register the phy device
  3128. + * via phy_device_register()
  3129. + */
  3130. +
  3131. + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) {
  3132. + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
  3133. + phydev = get_phy_device(priv->mii_bus,
  3134. + priv->einfo->phy_id + ii, true);
  3135. + if (!phydev || IS_ERR(phydev)) {
  3136. + rc = -EIO;
  3137. + netdev_err(priv->ndev, "fail to get device\n");
  3138. + goto err1;
  3139. + }
  3140. + rc = phy_device_register(phydev);
  3141. + if (rc) {
  3142. + phy_device_free(phydev);
  3143. + netdev_err(priv->ndev,
  3144. + "phy_device_register() failed\n");
  3145. + goto err1;
  3146. + }
  3147. + }
  3148. + }
  3149. +
  3150. + pfe_eth_mdio_reset(bus);
  3151. +
  3152. + return 0;
  3153. +
  3154. +err1:
  3155. + mdiobus_free(bus);
  3156. +err0:
  3157. + return rc;
  3158. +}
  3159. +
  3160. +/* pfe_eth_mdio_exit
  3161. + */
  3162. +static void pfe_eth_mdio_exit(struct mii_bus *bus)
  3163. +{
  3164. + if (!bus)
  3165. + return;
  3166. +
  3167. + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
  3168. + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
  3169. +
  3170. + mdiobus_unregister(bus);
  3171. + mdiobus_free(bus);
  3172. +}
  3173. +
  3174. +/* pfe_get_phydev_speed
  3175. + */
  3176. +static int pfe_get_phydev_speed(struct phy_device *phydev)
  3177. +{
  3178. + switch (phydev->speed) {
  3179. + case 10:
  3180. + return SPEED_10M;
  3181. + case 100:
  3182. + return SPEED_100M;
  3183. + case 1000:
  3184. + default:
  3185. + return SPEED_1000M;
  3186. + }
  3187. +}
  3188. +
  3189. +/* pfe_set_rgmii_speed
  3190. + */
  3191. +#define RGMIIPCR 0x434
  3192. +/* RGMIIPCR bit definitions*/
  3193. +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
  3194. +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
  3195. +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
  3196. +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
  3197. +#define SCFG_RGMIIPCR_SETFD (0x00000001)
  3198. +
  3199. +static void pfe_set_rgmii_speed(struct phy_device *phydev)
  3200. +{
  3201. + u32 rgmii_pcr;
  3202. +
  3203. + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
  3204. + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
  3205. +
  3206. + switch (phydev->speed) {
  3207. + case 10:
  3208. + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
  3209. + break;
  3210. + case 1000:
  3211. + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
  3212. + break;
  3213. + case 100:
  3214. + default:
  3215. + /* Default is 100M */
  3216. + break;
  3217. + }
  3218. + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
  3219. +}
  3220. +
  3221. +/* pfe_get_phydev_duplex
  3222. + */
  3223. +static int pfe_get_phydev_duplex(struct phy_device *phydev)
  3224. +{
  3225. + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
  3226. + return DUPLEX_FULL;
  3227. +}
  3228. +
  3229. +/* pfe_eth_adjust_link
  3230. + */
  3231. +static void pfe_eth_adjust_link(struct net_device *ndev)
  3232. +{
  3233. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3234. + unsigned long flags;
  3235. + struct phy_device *phydev = priv->phydev;
  3236. + int new_state = 0;
  3237. +
  3238. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3239. +
  3240. + spin_lock_irqsave(&priv->lock, flags);
  3241. +
  3242. + if (phydev->link) {
  3243. + /*
  3244. + * Now we make sure that we can be in full duplex mode.
  3245. + * If not, we operate in half-duplex mode.
  3246. + */
  3247. + if (phydev->duplex != priv->oldduplex) {
  3248. + new_state = 1;
  3249. + gemac_set_duplex(priv->EMAC_baseaddr,
  3250. + pfe_get_phydev_duplex(phydev));
  3251. + priv->oldduplex = phydev->duplex;
  3252. + }
  3253. +
  3254. + if (phydev->speed != priv->oldspeed) {
  3255. + new_state = 1;
  3256. + gemac_set_speed(priv->EMAC_baseaddr,
  3257. + pfe_get_phydev_speed(phydev));
  3258. + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
  3259. + pfe_set_rgmii_speed(phydev);
  3260. + priv->oldspeed = phydev->speed;
  3261. + }
  3262. +
  3263. + if (!priv->oldlink) {
  3264. + new_state = 1;
  3265. + priv->oldlink = 1;
  3266. + }
  3267. +
  3268. + } else if (priv->oldlink) {
  3269. + new_state = 1;
  3270. + priv->oldlink = 0;
  3271. + priv->oldspeed = 0;
  3272. + priv->oldduplex = -1;
  3273. + }
  3274. +
  3275. + if (new_state && netif_msg_link(priv))
  3276. + phy_print_status(phydev);
  3277. +
  3278. + spin_unlock_irqrestore(&priv->lock, flags);
  3279. +}
  3280. +
  3281. +/* pfe_phy_exit
  3282. + */
  3283. +static void pfe_phy_exit(struct net_device *ndev)
  3284. +{
  3285. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3286. +
  3287. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3288. +
  3289. + phy_disconnect(priv->phydev);
  3290. + priv->phydev = NULL;
  3291. +}
  3292. +
  3293. +/* pfe_eth_stop
  3294. + */
  3295. +static void pfe_eth_stop(struct net_device *ndev, int wake)
  3296. +{
  3297. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3298. +
  3299. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3300. +
  3301. + if (wake) {
  3302. + gemac_tx_disable(priv->EMAC_baseaddr);
  3303. + } else {
  3304. + gemac_disable(priv->EMAC_baseaddr);
  3305. + gpi_disable(priv->GPI_baseaddr);
  3306. +
  3307. + if (priv->phydev)
  3308. + phy_stop(priv->phydev);
  3309. + }
  3310. +}
  3311. +
  3312. +/* pfe_eth_start
  3313. + */
  3314. +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
  3315. +{
  3316. + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
  3317. +
  3318. + if (priv->phydev)
  3319. + phy_start(priv->phydev);
  3320. +
  3321. + gpi_enable(priv->GPI_baseaddr);
  3322. + gemac_enable(priv->EMAC_baseaddr);
  3323. +
  3324. + return 0;
  3325. +}
  3326. +
  3327. +/*
  3328. + * Configure on chip serdes through mdio
  3329. + */
  3330. +static void ls1012a_configure_serdes(struct net_device *ndev)
  3331. +{
  3332. + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
  3333. + int sgmii_2500 = 0;
  3334. + struct mii_bus *bus = priv->mii_bus;
  3335. + u16 value = 0;
  3336. +
  3337. + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
  3338. + sgmii_2500 = 1;
  3339. +
  3340. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3341. + /* PCS configuration done with corresponding GEMAC */
  3342. +
  3343. + pfe_eth_mdio_read(bus, 0, 0);
  3344. + pfe_eth_mdio_read(bus, 0, 1);
  3345. +
  3346. + /*These settings taken from validtion team */
  3347. + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
  3348. + if (sgmii_2500) {
  3349. + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
  3350. + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
  3351. + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
  3352. + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
  3353. + /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
  3354. + value = 0x0140;
  3355. + pfe_eth_mdio_write(bus, 0, 0x0, value);
  3356. + } else {
  3357. + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
  3358. + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
  3359. + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
  3360. + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
  3361. + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
  3362. + }
  3363. +}
  3364. +
  3365. +/*
  3366. + * pfe_phy_init
  3367. + *
  3368. + */
  3369. +static int pfe_phy_init(struct net_device *ndev)
  3370. +{
  3371. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3372. + struct phy_device *phydev;
  3373. + char phy_id[MII_BUS_ID_SIZE + 3];
  3374. + char bus_id[MII_BUS_ID_SIZE];
  3375. + phy_interface_t interface;
  3376. +
  3377. + priv->oldlink = 0;
  3378. + priv->oldspeed = 0;
  3379. + priv->oldduplex = -1;
  3380. +
  3381. + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
  3382. + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
  3383. + priv->einfo->phy_id);
  3384. +
  3385. + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
  3386. + interface = priv->einfo->mii_config;
  3387. + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
  3388. + (interface == PHY_INTERFACE_MODE_2500SGMII)) {
  3389. + /*Configure SGMII PCS */
  3390. + if (pfe->scfg) {
  3391. + /*Config MDIO from serdes */
  3392. + regmap_write(pfe->scfg, 0x484, 0x00000000);
  3393. + }
  3394. + ls1012a_configure_serdes(ndev);
  3395. + }
  3396. +
  3397. + if (pfe->scfg) {
  3398. + /*Config MDIO from PAD */
  3399. + regmap_write(pfe->scfg, 0x484, 0x80000000);
  3400. + }
  3401. +
  3402. + priv->oldlink = 0;
  3403. + priv->oldspeed = 0;
  3404. + priv->oldduplex = -1;
  3405. + pr_info("%s interface %x\n", __func__, interface);
  3406. + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
  3407. +
  3408. + if (IS_ERR(phydev)) {
  3409. + netdev_err(ndev, "phy_connect() failed\n");
  3410. + return PTR_ERR(phydev);
  3411. + }
  3412. +
  3413. + priv->phydev = phydev;
  3414. + phydev->irq = PHY_POLL;
  3415. +
  3416. + return 0;
  3417. +}
  3418. +
  3419. +/* pfe_gemac_init
  3420. + */
  3421. +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
  3422. +{
  3423. + struct gemac_cfg cfg;
  3424. +
  3425. + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
  3426. +
  3427. + cfg.speed = SPEED_1000M;
  3428. + cfg.duplex = DUPLEX_FULL;
  3429. +
  3430. + gemac_set_config(priv->EMAC_baseaddr, &cfg);
  3431. + gemac_allow_broadcast(priv->EMAC_baseaddr);
  3432. + gemac_enable_1536_rx(priv->EMAC_baseaddr);
  3433. + gemac_enable_rx_jmb(priv->EMAC_baseaddr);
  3434. + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
  3435. + gemac_enable_pause_rx(priv->EMAC_baseaddr);
  3436. + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
  3437. +
  3438. + /*GEM will perform checksum verifications*/
  3439. + if (priv->ndev->features & NETIF_F_RXCSUM)
  3440. + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
  3441. + else
  3442. + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
  3443. +
  3444. + return 0;
  3445. +}
  3446. +
  3447. +/* pfe_eth_event_handler
  3448. + */
  3449. +static int pfe_eth_event_handler(void *data, int event, int qno)
  3450. +{
  3451. + struct pfe_eth_priv_s *priv = data;
  3452. +
  3453. + switch (event) {
  3454. + case EVENT_RX_PKT_IND:
  3455. +
  3456. + if (qno == 0) {
  3457. + if (napi_schedule_prep(&priv->high_napi)) {
  3458. + netif_info(priv, intr, priv->ndev,
  3459. + "%s: schedule high prio poll\n"
  3460. + , __func__);
  3461. +
  3462. +#ifdef PFE_ETH_NAPI_STATS
  3463. + priv->napi_counters[NAPI_SCHED_COUNT]++;
  3464. +#endif
  3465. +
  3466. + __napi_schedule(&priv->high_napi);
  3467. + }
  3468. + } else if (qno == 1) {
  3469. + if (napi_schedule_prep(&priv->low_napi)) {
  3470. + netif_info(priv, intr, priv->ndev,
  3471. + "%s: schedule low prio poll\n"
  3472. + , __func__);
  3473. +
  3474. +#ifdef PFE_ETH_NAPI_STATS
  3475. + priv->napi_counters[NAPI_SCHED_COUNT]++;
  3476. +#endif
  3477. + __napi_schedule(&priv->low_napi);
  3478. + }
  3479. + } else if (qno == 2) {
  3480. + if (napi_schedule_prep(&priv->lro_napi)) {
  3481. + netif_info(priv, intr, priv->ndev,
  3482. + "%s: schedule lro prio poll\n"
  3483. + , __func__);
  3484. +
  3485. +#ifdef PFE_ETH_NAPI_STATS
  3486. + priv->napi_counters[NAPI_SCHED_COUNT]++;
  3487. +#endif
  3488. + __napi_schedule(&priv->lro_napi);
  3489. + }
  3490. + }
  3491. +
  3492. + break;
  3493. +
  3494. + case EVENT_TXDONE_IND:
  3495. + pfe_eth_flush_tx(priv);
  3496. + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
  3497. + break;
  3498. + case EVENT_HIGH_RX_WM:
  3499. + default:
  3500. + break;
  3501. + }
  3502. +
  3503. + return 0;
  3504. +}
  3505. +
  3506. +/* pfe_eth_open
  3507. + */
  3508. +static int pfe_eth_open(struct net_device *ndev)
  3509. +{
  3510. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3511. + struct hif_client_s *client;
  3512. + int rc;
  3513. +
  3514. + netif_info(priv, ifup, ndev, "%s\n", __func__);
  3515. +
  3516. + /* Register client driver with HIF */
  3517. + client = &priv->client;
  3518. + memset(client, 0, sizeof(*client));
  3519. + client->id = PFE_CL_GEM0 + priv->id;
  3520. + client->tx_qn = emac_txq_cnt;
  3521. + client->rx_qn = EMAC_RXQ_CNT;
  3522. + client->priv = priv;
  3523. + client->pfe = priv->pfe;
  3524. + client->event_handler = pfe_eth_event_handler;
  3525. +
  3526. + client->tx_qsize = EMAC_TXQ_DEPTH;
  3527. + client->rx_qsize = EMAC_RXQ_DEPTH;
  3528. +
  3529. + rc = hif_lib_client_register(client);
  3530. + if (rc) {
  3531. + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
  3532. + __func__, client->id);
  3533. + goto err0;
  3534. + }
  3535. +
  3536. + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
  3537. + client);
  3538. +
  3539. + pfe_gemac_init(priv);
  3540. +
  3541. + if (!is_valid_ether_addr(ndev->dev_addr)) {
  3542. + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
  3543. + rc = -EADDRNOTAVAIL;
  3544. + goto err1;
  3545. + }
  3546. +
  3547. + gemac_set_laddrN(priv->EMAC_baseaddr,
  3548. + (struct pfe_mac_addr *)ndev->dev_addr, 1);
  3549. +
  3550. + napi_enable(&priv->high_napi);
  3551. + napi_enable(&priv->low_napi);
  3552. + napi_enable(&priv->lro_napi);
  3553. +
  3554. + rc = pfe_eth_start(priv);
  3555. +
  3556. + netif_tx_wake_all_queues(ndev);
  3557. +
  3558. + return rc;
  3559. +
  3560. +err1:
  3561. + hif_lib_client_unregister(&priv->client);
  3562. +
  3563. +err0:
  3564. + return rc;
  3565. +}
  3566. +
  3567. +/*
  3568. + * pfe_eth_shutdown
  3569. + */
  3570. +int pfe_eth_shutdown(struct net_device *ndev, int wake)
  3571. +{
  3572. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3573. + int i, qstatus;
  3574. + unsigned long next_poll = jiffies + 1, end = jiffies +
  3575. + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
  3576. + int tx_pkts, prv_tx_pkts;
  3577. +
  3578. + netif_info(priv, ifdown, ndev, "%s\n", __func__);
  3579. +
  3580. + for (i = 0; i < emac_txq_cnt; i++)
  3581. + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
  3582. +
  3583. + netif_tx_stop_all_queues(ndev);
  3584. +
  3585. + do {
  3586. + tx_pkts = 0;
  3587. + pfe_eth_flush_tx(priv);
  3588. +
  3589. + for (i = 0; i < emac_txq_cnt; i++)
  3590. + tx_pkts += hif_lib_tx_pending(&priv->client, i);
  3591. +
  3592. + if (tx_pkts) {
  3593. + /*Don't wait forever, break if we cross max timeout */
  3594. + if (time_after(jiffies, end)) {
  3595. + pr_err(
  3596. + "(%s)Tx is not complete after %dmsec\n",
  3597. + ndev->name, TX_POLL_TIMEOUT_MS);
  3598. + break;
  3599. + }
  3600. +
  3601. + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
  3602. + , __func__, ndev->name, tx_pkts);
  3603. + if (need_resched())
  3604. + schedule();
  3605. + }
  3606. +
  3607. + } while (tx_pkts);
  3608. +
  3609. + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
  3610. +
  3611. + prv_tx_pkts = tmu_pkts_processed(priv->id);
  3612. + /*
  3613. + * Wait till TMU transmits all pending packets
  3614. + * poll tmu_qstatus and pkts processed by TMU for every 10ms
  3615. + * Consider TMU is busy, If we see TMU qeueu pending or any packets
  3616. + * processed by TMU
  3617. + */
  3618. + while (1) {
  3619. + if (time_after(jiffies, next_poll)) {
  3620. + tx_pkts = tmu_pkts_processed(priv->id);
  3621. + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
  3622. +
  3623. + if (!qstatus && (tx_pkts == prv_tx_pkts))
  3624. + break;
  3625. + /* Don't wait forever, break if we cross max
  3626. + * timeout(TX_POLL_TIMEOUT_MS)
  3627. + */
  3628. + if (time_after(jiffies, end)) {
  3629. + pr_err("TMU%d is busy after %dmsec\n",
  3630. + priv->id, TX_POLL_TIMEOUT_MS);
  3631. + break;
  3632. + }
  3633. + prv_tx_pkts = tx_pkts;
  3634. + next_poll++;
  3635. + }
  3636. + if (need_resched())
  3637. + schedule();
  3638. + }
  3639. + /* Wait for some more time to complete transmitting packet if any */
  3640. + next_poll = jiffies + 1;
  3641. + while (1) {
  3642. + if (time_after(jiffies, next_poll))
  3643. + break;
  3644. + if (need_resched())
  3645. + schedule();
  3646. + }
  3647. +
  3648. + pfe_eth_stop(ndev, wake);
  3649. +
  3650. + napi_disable(&priv->lro_napi);
  3651. + napi_disable(&priv->low_napi);
  3652. + napi_disable(&priv->high_napi);
  3653. +
  3654. + hif_lib_client_unregister(&priv->client);
  3655. +
  3656. + return 0;
  3657. +}
  3658. +
  3659. +/* pfe_eth_close
  3660. + *
  3661. + */
  3662. +static int pfe_eth_close(struct net_device *ndev)
  3663. +{
  3664. + pfe_eth_shutdown(ndev, 0);
  3665. +
  3666. + return 0;
  3667. +}
  3668. +
  3669. +/* pfe_eth_suspend
  3670. + *
  3671. + * return value : 1 if netdevice is configured to wakeup system
  3672. + * 0 otherwise
  3673. + */
  3674. +int pfe_eth_suspend(struct net_device *ndev)
  3675. +{
  3676. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3677. + int retval = 0;
  3678. +
  3679. + if (priv->wol) {
  3680. + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
  3681. + retval = 1;
  3682. + }
  3683. + pfe_eth_shutdown(ndev, priv->wol);
  3684. +
  3685. + return retval;
  3686. +}
  3687. +
  3688. +/* pfe_eth_resume
  3689. + *
  3690. + */
  3691. +int pfe_eth_resume(struct net_device *ndev)
  3692. +{
  3693. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3694. +
  3695. + if (priv->wol)
  3696. + gemac_set_wol(priv->EMAC_baseaddr, 0);
  3697. + gemac_tx_enable(priv->EMAC_baseaddr);
  3698. +
  3699. + return pfe_eth_open(ndev);
  3700. +}
  3701. +
  3702. +/* pfe_eth_get_queuenum
  3703. + */
  3704. +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
  3705. + *skb)
  3706. +{
  3707. + int queuenum = 0;
  3708. + unsigned long flags;
  3709. +
  3710. + /* Get the Fast Path queue number */
  3711. + /*
  3712. + * Use conntrack mark (if conntrack exists), then packet mark (if any),
  3713. + * then fallback to default
  3714. + */
  3715. +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
  3716. + if (skb->nfct) {
  3717. + enum ip_conntrack_info cinfo;
  3718. + struct nf_conn *ct;
  3719. +
  3720. + ct = nf_ct_get(skb, &cinfo);
  3721. +
  3722. + if (ct) {
  3723. + u32 connmark;
  3724. +
  3725. + connmark = ct->mark;
  3726. +
  3727. + if ((connmark & 0x80000000) && priv->id != 0)
  3728. + connmark >>= 16;
  3729. +
  3730. + queuenum = connmark & EMAC_QUEUENUM_MASK;
  3731. + }
  3732. + } else {/* continued after #endif ... */
  3733. +#endif
  3734. + if (skb->mark) {
  3735. + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
  3736. + } else {
  3737. + spin_lock_irqsave(&priv->lock, flags);
  3738. + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
  3739. + spin_unlock_irqrestore(&priv->lock, flags);
  3740. + }
  3741. +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
  3742. + }
  3743. +#endif
  3744. + return queuenum;
  3745. +}
  3746. +
  3747. +/* pfe_eth_might_stop_tx
  3748. + *
  3749. + */
  3750. +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
  3751. + struct netdev_queue *tx_queue,
  3752. + unsigned int n_desc,
  3753. + unsigned int n_segs)
  3754. +{
  3755. + ktime_t kt;
  3756. + int tried = 0;
  3757. +
  3758. +try_again:
  3759. + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
  3760. + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
  3761. + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
  3762. + if (!tried) {
  3763. + __hif_lib_update_credit(&priv->client, queuenum);
  3764. + tried = 1;
  3765. + goto try_again;
  3766. + }
  3767. +#ifdef PFE_ETH_TX_STATS
  3768. + if (__hif_tx_avail(&pfe->hif) < n_desc) {
  3769. + priv->stop_queue_hif[queuenum]++;
  3770. + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
  3771. + priv->stop_queue_hif_client[queuenum]++;
  3772. + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
  3773. + n_segs) {
  3774. + priv->stop_queue_credit[queuenum]++;
  3775. + }
  3776. + priv->stop_queue_total[queuenum]++;
  3777. +#endif
  3778. + netif_tx_stop_queue(tx_queue);
  3779. +
  3780. + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
  3781. + NSEC_PER_MSEC);
  3782. + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
  3783. + HRTIMER_MODE_REL);
  3784. + return -1;
  3785. + } else {
  3786. + return 0;
  3787. + }
  3788. +}
  3789. +
  3790. +#define SA_MAX_OP 2
  3791. +/* pfe_hif_send_packet
  3792. + *
  3793. + * At this level if TX fails we drop the packet
  3794. + */
  3795. +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
  3796. + *priv, int queuenum)
  3797. +{
  3798. + struct skb_shared_info *sh = skb_shinfo(skb);
  3799. + unsigned int nr_frags;
  3800. + u32 ctrl = 0;
  3801. +
  3802. + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
  3803. +
  3804. + if (skb_is_gso(skb)) {
  3805. + priv->stats.tx_dropped++;
  3806. + return;
  3807. + }
  3808. +
  3809. + if (skb->ip_summed == CHECKSUM_PARTIAL)
  3810. + ctrl = HIF_CTRL_TX_CHECKSUM;
  3811. +
  3812. + nr_frags = sh->nr_frags;
  3813. +
  3814. + if (nr_frags) {
  3815. + skb_frag_t *f;
  3816. + int i;
  3817. +
  3818. + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
  3819. + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
  3820. + skb);
  3821. +
  3822. + for (i = 0; i < nr_frags - 1; i++) {
  3823. + f = &sh->frags[i];
  3824. + __hif_lib_xmit_pkt(&priv->client, queuenum,
  3825. + skb_frag_address(f),
  3826. + skb_frag_size(f),
  3827. + 0x0, 0x0, skb);
  3828. + }
  3829. +
  3830. + f = &sh->frags[i];
  3831. +
  3832. + __hif_lib_xmit_pkt(&priv->client, queuenum,
  3833. + skb_frag_address(f), skb_frag_size(f),
  3834. + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
  3835. + skb);
  3836. +
  3837. + netif_info(priv, tx_queued, priv->ndev,
  3838. + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
  3839. + __func__, skb, nr_frags, skb->len);
  3840. + } else {
  3841. + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
  3842. + skb->len, ctrl, HIF_FIRST_BUFFER |
  3843. + HIF_LAST_BUFFER | HIF_DATA_VALID,
  3844. + skb);
  3845. + netif_info(priv, tx_queued, priv->ndev,
  3846. + "%s: pkt sent successfully skb:%p len:%d\n",
  3847. + __func__, skb, skb->len);
  3848. + }
  3849. + hif_tx_dma_start();
  3850. + priv->stats.tx_packets++;
  3851. + priv->stats.tx_bytes += skb->len;
  3852. + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
  3853. +}
  3854. +
  3855. +/* pfe_eth_flush_txQ
  3856. + */
  3857. +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
  3858. + from_tx, int n_desc)
  3859. +{
  3860. + struct sk_buff *skb;
  3861. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  3862. + tx_q_num);
  3863. + unsigned int flags;
  3864. +
  3865. + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
  3866. +
  3867. + if (!from_tx)
  3868. + __netif_tx_lock_bh(tx_queue);
  3869. +
  3870. + /* Clean HIF and client queue */
  3871. + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
  3872. + tx_q_num, &flags,
  3873. + HIF_TX_DESC_NT))) {
  3874. + if (flags & HIF_DATA_VALID)
  3875. + dev_kfree_skb_any(skb);
  3876. + }
  3877. + if (!from_tx)
  3878. + __netif_tx_unlock_bh(tx_queue);
  3879. +}
  3880. +
  3881. +/* pfe_eth_flush_tx
  3882. + */
  3883. +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
  3884. +{
  3885. + int ii;
  3886. +
  3887. + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
  3888. +
  3889. + for (ii = 0; ii < emac_txq_cnt; ii++) {
  3890. + pfe_eth_flush_txQ(priv, ii, 0, 0);
  3891. + __hif_lib_update_credit(&priv->client, ii);
  3892. + }
  3893. +}
  3894. +
  3895. +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
  3896. + *n_segs)
  3897. +{
  3898. + struct skb_shared_info *sh = skb_shinfo(skb);
  3899. +
  3900. + /* Scattered data */
  3901. + if (sh->nr_frags) {
  3902. + *n_desc = sh->nr_frags + 1;
  3903. + *n_segs = 1;
  3904. + /* Regular case */
  3905. + } else {
  3906. + *n_desc = 1;
  3907. + *n_segs = 1;
  3908. + }
  3909. +}
  3910. +
  3911. +/* pfe_eth_send_packet
  3912. + */
  3913. +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
  3914. +{
  3915. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3916. + int tx_q_num = skb_get_queue_mapping(skb);
  3917. + int n_desc, n_segs;
  3918. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  3919. + tx_q_num);
  3920. +
  3921. + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
  3922. +
  3923. + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
  3924. + sizeof(unsigned long)))) {
  3925. + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
  3926. + __func__);
  3927. +
  3928. + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
  3929. + long)), 0, GFP_ATOMIC)) {
  3930. + /* No need to re-transmit, no way to recover*/
  3931. + kfree_skb(skb);
  3932. + priv->stats.tx_dropped++;
  3933. + return NETDEV_TX_OK;
  3934. + }
  3935. + }
  3936. +
  3937. + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
  3938. +
  3939. + hif_tx_lock(&pfe->hif);
  3940. + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
  3941. + n_segs))) {
  3942. +#ifdef PFE_ETH_TX_STATS
  3943. + if (priv->was_stopped[tx_q_num]) {
  3944. + priv->clean_fail[tx_q_num]++;
  3945. + priv->was_stopped[tx_q_num] = 0;
  3946. + }
  3947. +#endif
  3948. + hif_tx_unlock(&pfe->hif);
  3949. + return NETDEV_TX_BUSY;
  3950. + }
  3951. +
  3952. + pfe_hif_send_packet(skb, priv, tx_q_num);
  3953. +
  3954. + hif_tx_unlock(&pfe->hif);
  3955. +
  3956. + tx_queue->trans_start = jiffies;
  3957. +
  3958. +#ifdef PFE_ETH_TX_STATS
  3959. + priv->was_stopped[tx_q_num] = 0;
  3960. +#endif
  3961. +
  3962. + return NETDEV_TX_OK;
  3963. +}
  3964. +
  3965. +/* pfe_eth_select_queue
  3966. + *
  3967. + */
  3968. +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
  3969. + void *accel_priv,
  3970. + select_queue_fallback_t fallback)
  3971. +{
  3972. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3973. +
  3974. + return pfe_eth_get_queuenum(priv, skb);
  3975. +}
  3976. +
  3977. +/* pfe_eth_get_stats
  3978. + */
  3979. +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
  3980. +{
  3981. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3982. +
  3983. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3984. +
  3985. + return &priv->stats;
  3986. +}
  3987. +
  3988. +/* pfe_eth_set_mac_address
  3989. + */
  3990. +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
  3991. +{
  3992. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  3993. + struct sockaddr *sa = addr;
  3994. +
  3995. + netif_info(priv, drv, ndev, "%s\n", __func__);
  3996. +
  3997. + if (!is_valid_ether_addr(sa->sa_data))
  3998. + return -EADDRNOTAVAIL;
  3999. +
  4000. + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
  4001. +
  4002. + gemac_set_laddrN(priv->EMAC_baseaddr,
  4003. + (struct pfe_mac_addr *)ndev->dev_addr, 1);
  4004. +
  4005. + return 0;
  4006. +}
  4007. +
  4008. +/* pfe_eth_enet_addr_byte_mac
  4009. + */
  4010. +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
  4011. + struct pfe_mac_addr *enet_addr)
  4012. +{
  4013. + if (!enet_byte_addr || !enet_addr) {
  4014. + return -1;
  4015. +
  4016. + } else {
  4017. + enet_addr->bottom = enet_byte_addr[0] |
  4018. + (enet_byte_addr[1] << 8) |
  4019. + (enet_byte_addr[2] << 16) |
  4020. + (enet_byte_addr[3] << 24);
  4021. + enet_addr->top = enet_byte_addr[4] |
  4022. + (enet_byte_addr[5] << 8);
  4023. + return 0;
  4024. + }
  4025. +}
  4026. +
  4027. +/* pfe_eth_set_multi
  4028. + */
  4029. +static void pfe_eth_set_multi(struct net_device *ndev)
  4030. +{
  4031. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  4032. + struct pfe_mac_addr hash_addr; /* hash register structure */
  4033. + /* specific mac address register structure */
  4034. + struct pfe_mac_addr spec_addr;
  4035. + int result; /* index into hash register to set.. */
  4036. + int uc_count = 0;
  4037. + struct netdev_hw_addr *ha;
  4038. +
  4039. + if (ndev->flags & IFF_PROMISC) {
  4040. + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
  4041. +
  4042. + priv->promisc = 1;
  4043. + gemac_enable_copy_all(priv->EMAC_baseaddr);
  4044. + } else {
  4045. + priv->promisc = 0;
  4046. + gemac_disable_copy_all(priv->EMAC_baseaddr);
  4047. + }
  4048. +
  4049. + /* Enable broadcast frame reception if required. */
  4050. + if (ndev->flags & IFF_BROADCAST) {
  4051. + gemac_allow_broadcast(priv->EMAC_baseaddr);
  4052. + } else {
  4053. + netif_info(priv, drv, ndev,
  4054. + "disabling broadcast frame reception\n");
  4055. +
  4056. + gemac_no_broadcast(priv->EMAC_baseaddr);
  4057. + }
  4058. +
  4059. + if (ndev->flags & IFF_ALLMULTI) {
  4060. + /* Set the hash to rx all multicast frames */
  4061. + hash_addr.bottom = 0xFFFFFFFF;
  4062. + hash_addr.top = 0xFFFFFFFF;
  4063. + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
  4064. + netdev_for_each_uc_addr(ha, ndev) {
  4065. + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
  4066. + break;
  4067. + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
  4068. + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
  4069. + uc_count + 2);
  4070. + uc_count++;
  4071. + }
  4072. + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
  4073. + u8 *addr;
  4074. +
  4075. + hash_addr.bottom = 0;
  4076. + hash_addr.top = 0;
  4077. +
  4078. + netdev_for_each_mc_addr(ha, ndev) {
  4079. + addr = ha->addr;
  4080. +
  4081. + netif_info(priv, drv, ndev,
  4082. + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
  4083. + addr[0], addr[1], addr[2],
  4084. + addr[3], addr[4], addr[5]);
  4085. +
  4086. + result = pfe_eth_get_hash(addr);
  4087. +
  4088. + if (result < EMAC_HASH_REG_BITS) {
  4089. + if (result < 32)
  4090. + hash_addr.bottom |= (1 << result);
  4091. + else
  4092. + hash_addr.top |= (1 << (result - 32));
  4093. + } else {
  4094. + break;
  4095. + }
  4096. + }
  4097. +
  4098. + uc_count = -1;
  4099. + netdev_for_each_uc_addr(ha, ndev) {
  4100. + addr = ha->addr;
  4101. +
  4102. + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
  4103. + netdev_info(ndev,
  4104. + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
  4105. + addr[0], addr[1], addr[2],
  4106. + addr[3], addr[4], addr[5]);
  4107. + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
  4108. + gemac_set_laddrN(priv->EMAC_baseaddr,
  4109. + &spec_addr, uc_count + 2);
  4110. + } else {
  4111. + netif_info(priv, drv, ndev,
  4112. + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
  4113. + addr[0], addr[1], addr[2],
  4114. + addr[3], addr[4], addr[5]);
  4115. +
  4116. + result = pfe_eth_get_hash(addr);
  4117. + if (result >= EMAC_HASH_REG_BITS) {
  4118. + break;
  4119. +
  4120. + } else {
  4121. + if (result < 32)
  4122. + hash_addr.bottom |= (1 <<
  4123. + result);
  4124. + else
  4125. + hash_addr.top |= (1 <<
  4126. + (result - 32));
  4127. + }
  4128. + }
  4129. + }
  4130. +
  4131. + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
  4132. + }
  4133. +
  4134. + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
  4135. + /*
  4136. + * Check if there are any specific address HW registers that
  4137. + * need to be flushed
  4138. + */
  4139. + for (uc_count = netdev_uc_count(ndev); uc_count <
  4140. + MAX_UC_SPEC_ADDR_REG; uc_count++)
  4141. + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
  4142. + }
  4143. +
  4144. + if (ndev->flags & IFF_LOOPBACK)
  4145. + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
  4146. +}
  4147. +
  4148. +/* pfe_eth_set_features
  4149. + */
  4150. +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
  4151. + features)
  4152. +{
  4153. + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
  4154. + int rc = 0;
  4155. +
  4156. + if (features & NETIF_F_RXCSUM)
  4157. + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
  4158. + else
  4159. + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
  4160. + return rc;
  4161. +}
  4162. +
  4163. +/* pfe_eth_fast_tx_timeout
  4164. + */
  4165. +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
  4166. +{
  4167. + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
  4168. + pfe_eth_fast_timer,
  4169. + timer);
  4170. + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
  4171. + struct pfe_eth_priv_s,
  4172. + fast_tx_timeout);
  4173. + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
  4174. + fast_tx_timeout->queuenum);
  4175. +
  4176. + if (netif_tx_queue_stopped(tx_queue)) {
  4177. +#ifdef PFE_ETH_TX_STATS
  4178. + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
  4179. +#endif
  4180. + netif_tx_wake_queue(tx_queue);
  4181. + }
  4182. +
  4183. + return HRTIMER_NORESTART;
  4184. +}
  4185. +
  4186. +/* pfe_eth_fast_tx_timeout_init
  4187. + */
  4188. +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
  4189. +{
  4190. + int i;
  4191. +
  4192. + for (i = 0; i < emac_txq_cnt; i++) {
  4193. + priv->fast_tx_timeout[i].queuenum = i;
  4194. + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
  4195. + HRTIMER_MODE_REL);
  4196. + priv->fast_tx_timeout[i].timer.function =
  4197. + pfe_eth_fast_tx_timeout;
  4198. + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
  4199. + }
  4200. +}
  4201. +
  4202. +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
  4203. + struct pfe_eth_priv_s *priv,
  4204. + unsigned int qno)
  4205. +{
  4206. + void *buf_addr;
  4207. + unsigned int rx_ctrl;
  4208. + unsigned int desc_ctrl = 0;
  4209. + struct hif_ipsec_hdr *ipsec_hdr = NULL;
  4210. + struct sk_buff *skb;
  4211. + struct sk_buff *skb_frag, *skb_frag_last = NULL;
  4212. + int length = 0, offset;
  4213. +
  4214. + skb = priv->skb_inflight[qno];
  4215. +
  4216. + if (skb) {
  4217. + skb_frag_last = skb_shinfo(skb)->frag_list;
  4218. + if (skb_frag_last) {
  4219. + while (skb_frag_last->next)
  4220. + skb_frag_last = skb_frag_last->next;
  4221. + }
  4222. + }
  4223. +
  4224. + while (!(desc_ctrl & CL_DESC_LAST)) {
  4225. + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
  4226. + &offset, &rx_ctrl, &desc_ctrl,
  4227. + (void **)&ipsec_hdr);
  4228. + if (!buf_addr)
  4229. + goto incomplete;
  4230. +
  4231. +#ifdef PFE_ETH_NAPI_STATS
  4232. + priv->napi_counters[NAPI_DESC_COUNT]++;
  4233. +#endif
  4234. +
  4235. + /* First frag */
  4236. + if (desc_ctrl & CL_DESC_FIRST) {
  4237. + skb = build_skb(buf_addr, 0);
  4238. + if (unlikely(!skb))
  4239. + goto pkt_drop;
  4240. +
  4241. + skb_reserve(skb, offset);
  4242. + skb_put(skb, length);
  4243. + skb->dev = ndev;
  4244. +
  4245. + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
  4246. + HIF_CTRL_RX_CHECKSUMMED))
  4247. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  4248. + else
  4249. + skb_checksum_none_assert(skb);
  4250. +
  4251. + } else {
  4252. + /* Next frags */
  4253. + if (unlikely(!skb)) {
  4254. + pr_err("%s: NULL skb_inflight\n",
  4255. + __func__);
  4256. + goto pkt_drop;
  4257. + }
  4258. +
  4259. + skb_frag = build_skb(buf_addr, 0);
  4260. +
  4261. + if (unlikely(!skb_frag)) {
  4262. + kfree(buf_addr);
  4263. + goto pkt_drop;
  4264. + }
  4265. +
  4266. + skb_reserve(skb_frag, offset);
  4267. + skb_put(skb_frag, length);
  4268. +
  4269. + skb_frag->dev = ndev;
  4270. +
  4271. + if (skb_shinfo(skb)->frag_list)
  4272. + skb_frag_last->next = skb_frag;
  4273. + else
  4274. + skb_shinfo(skb)->frag_list = skb_frag;
  4275. +
  4276. + skb->truesize += skb_frag->truesize;
  4277. + skb->data_len += length;
  4278. + skb->len += length;
  4279. + skb_frag_last = skb_frag;
  4280. + }
  4281. + }
  4282. +
  4283. + priv->skb_inflight[qno] = NULL;
  4284. + return skb;
  4285. +
  4286. +incomplete:
  4287. + priv->skb_inflight[qno] = skb;
  4288. + return NULL;
  4289. +
  4290. +pkt_drop:
  4291. + priv->skb_inflight[qno] = NULL;
  4292. +
  4293. + if (skb)
  4294. + kfree_skb(skb);
  4295. + else
  4296. + kfree(buf_addr);
  4297. +
  4298. + priv->stats.rx_errors++;
  4299. +
  4300. + return NULL;
  4301. +}
  4302. +
  4303. +/* pfe_eth_poll
  4304. + */
  4305. +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
  4306. + unsigned int qno, int budget)
  4307. +{
  4308. + struct net_device *ndev = priv->ndev;
  4309. + struct sk_buff *skb;
  4310. + int work_done = 0;
  4311. + unsigned int len;
  4312. +
  4313. + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
  4314. +
  4315. +#ifdef PFE_ETH_NAPI_STATS
  4316. + priv->napi_counters[NAPI_POLL_COUNT]++;
  4317. +#endif
  4318. +
  4319. + do {
  4320. + skb = pfe_eth_rx_skb(ndev, priv, qno);
  4321. +
  4322. + if (!skb)
  4323. + break;
  4324. +
  4325. + len = skb->len;
  4326. +
  4327. + /* Packet will be processed */
  4328. + skb->protocol = eth_type_trans(skb, ndev);
  4329. +
  4330. + netif_receive_skb(skb);
  4331. +
  4332. + priv->stats.rx_packets++;
  4333. + priv->stats.rx_bytes += len;
  4334. +
  4335. + work_done++;
  4336. +
  4337. +#ifdef PFE_ETH_NAPI_STATS
  4338. + priv->napi_counters[NAPI_PACKET_COUNT]++;
  4339. +#endif
  4340. +
  4341. + } while (work_done < budget);
  4342. +
  4343. + /*
  4344. + * If no Rx receive nor cleanup work was done, exit polling mode.
  4345. + * No more netif_running(dev) check is required here , as this is
  4346. + * checked in net/core/dev.c (2.6.33.5 kernel specific).
  4347. + */
  4348. + if (work_done < budget) {
  4349. + napi_complete(napi);
  4350. +
  4351. + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
  4352. + qno);
  4353. + }
  4354. +#ifdef PFE_ETH_NAPI_STATS
  4355. + else
  4356. + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
  4357. +#endif
  4358. +
  4359. + return work_done;
  4360. +}
  4361. +
  4362. +/*
  4363. + * pfe_eth_lro_poll
  4364. + */
  4365. +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
  4366. +{
  4367. + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
  4368. + lro_napi);
  4369. +
  4370. + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
  4371. +
  4372. + return pfe_eth_poll(priv, napi, 2, budget);
  4373. +}
  4374. +
  4375. +/* pfe_eth_low_poll
  4376. + */
  4377. +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
  4378. +{
  4379. + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
  4380. + low_napi);
  4381. +
  4382. + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
  4383. +
  4384. + return pfe_eth_poll(priv, napi, 1, budget);
  4385. +}
  4386. +
  4387. +/* pfe_eth_high_poll
  4388. + */
  4389. +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
  4390. +{
  4391. + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
  4392. + high_napi);
  4393. +
  4394. + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
  4395. +
  4396. + return pfe_eth_poll(priv, napi, 0, budget);
  4397. +}
  4398. +
  4399. +static const struct net_device_ops pfe_netdev_ops = {
  4400. + .ndo_open = pfe_eth_open,
  4401. + .ndo_stop = pfe_eth_close,
  4402. + .ndo_start_xmit = pfe_eth_send_packet,
  4403. + .ndo_select_queue = pfe_eth_select_queue,
  4404. + .ndo_get_stats = pfe_eth_get_stats,
  4405. + .ndo_set_mac_address = pfe_eth_set_mac_address,
  4406. + .ndo_set_rx_mode = pfe_eth_set_multi,
  4407. + .ndo_set_features = pfe_eth_set_features,
  4408. + .ndo_validate_addr = eth_validate_addr,
  4409. +};
  4410. +
  4411. +/* pfe_eth_init_one
  4412. + */
  4413. +static int pfe_eth_init_one(struct pfe *pfe, int id)
  4414. +{
  4415. + struct net_device *ndev = NULL;
  4416. + struct pfe_eth_priv_s *priv = NULL;
  4417. + struct ls1012a_eth_platform_data *einfo;
  4418. + struct ls1012a_mdio_platform_data *minfo;
  4419. + struct ls1012a_pfe_platform_data *pfe_info;
  4420. + int err;
  4421. +
  4422. + /* Extract pltform data */
  4423. + pfe_info = (struct ls1012a_pfe_platform_data *)
  4424. + pfe->dev->platform_data;
  4425. + if (!pfe_info) {
  4426. + pr_err(
  4427. + "%s: pfe missing additional platform data\n"
  4428. + , __func__);
  4429. + err = -ENODEV;
  4430. + goto err0;
  4431. + }
  4432. +
  4433. + einfo = (struct ls1012a_eth_platform_data *)
  4434. + pfe_info->ls1012a_eth_pdata;
  4435. +
  4436. + /* einfo never be NULL, but no harm in having this check */
  4437. + if (!einfo) {
  4438. + pr_err(
  4439. + "%s: pfe missing additional gemacs platform data\n"
  4440. + , __func__);
  4441. + err = -ENODEV;
  4442. + goto err0;
  4443. + }
  4444. +
  4445. + minfo = (struct ls1012a_mdio_platform_data *)
  4446. + pfe_info->ls1012a_mdio_pdata;
  4447. +
  4448. + /* einfo never be NULL, but no harm in having this check */
  4449. + if (!minfo) {
  4450. + pr_err(
  4451. + "%s: pfe missing additional mdios platform data\n",
  4452. + __func__);
  4453. + err = -ENODEV;
  4454. + goto err0;
  4455. + }
  4456. +
  4457. + if (us)
  4458. + emac_txq_cnt = EMAC_TXQ_CNT;
  4459. + /* Create an ethernet device instance */
  4460. + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
  4461. +
  4462. + if (!ndev) {
  4463. + pr_err("%s: gemac %d device allocation failed\n",
  4464. + __func__, einfo[id].gem_id);
  4465. + err = -ENOMEM;
  4466. + goto err0;
  4467. + }
  4468. +
  4469. + priv = netdev_priv(ndev);
  4470. + priv->ndev = ndev;
  4471. + priv->id = einfo[id].gem_id;
  4472. + priv->pfe = pfe;
  4473. +
  4474. + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
  4475. +
  4476. + pfe->eth.eth_priv[id] = priv;
  4477. +
  4478. + /* Set the info in the priv to the current info */
  4479. + priv->einfo = &einfo[id];
  4480. + priv->EMAC_baseaddr = cbus_emac_base[id];
  4481. + priv->PHY_baseaddr = cbus_emac_base[0];
  4482. + priv->GPI_baseaddr = cbus_gpi_base[id];
  4483. +
  4484. +#define HIF_GEMAC_TMUQ_BASE 6
  4485. + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
  4486. + priv->high_tmu_q = priv->low_tmu_q + 1;
  4487. +
  4488. + spin_lock_init(&priv->lock);
  4489. +
  4490. + pfe_eth_fast_tx_timeout_init(priv);
  4491. +
  4492. + /* Copy the station address into the dev structure, */
  4493. + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
  4494. +
  4495. + /* Initialize mdio */
  4496. + if (minfo[id].enabled) {
  4497. + err = pfe_eth_mdio_init(priv, &minfo[id]);
  4498. + if (err) {
  4499. + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
  4500. + __func__);
  4501. + goto err2;
  4502. + }
  4503. + }
  4504. +
  4505. + if (us)
  4506. + goto phy_init;
  4507. +
  4508. + ndev->mtu = 1500;
  4509. +
  4510. + /* Set MTU limits */
  4511. + ndev->min_mtu = ETH_MIN_MTU;
  4512. + ndev->max_mtu = JUMBO_FRAME_SIZE;
  4513. +
  4514. + /* supported features */
  4515. + ndev->hw_features = NETIF_F_SG;
  4516. +
  4517. + /*Enable after checksum offload is validated */
  4518. + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
  4519. + NETIF_F_IPV6_CSUM | NETIF_F_SG;
  4520. +
  4521. + /* enabled by default */
  4522. + ndev->features = ndev->hw_features;
  4523. +
  4524. + priv->usr_features = ndev->features;
  4525. +
  4526. + ndev->netdev_ops = &pfe_netdev_ops;
  4527. +
  4528. + ndev->ethtool_ops = &pfe_ethtool_ops;
  4529. +
  4530. + /* Enable basic messages by default */
  4531. + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
  4532. + NETIF_MSG_PROBE;
  4533. +
  4534. + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
  4535. + HIF_RX_POLL_WEIGHT - 16);
  4536. + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
  4537. + HIF_RX_POLL_WEIGHT - 16);
  4538. + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
  4539. + HIF_RX_POLL_WEIGHT - 16);
  4540. +
  4541. + err = register_netdev(ndev);
  4542. +
  4543. + if (err) {
  4544. + netdev_err(ndev, "register_netdev() failed\n");
  4545. + goto err3;
  4546. + }
  4547. +
  4548. +phy_init:
  4549. + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
  4550. +
  4551. + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
  4552. + err = pfe_phy_init(ndev);
  4553. + if (err) {
  4554. + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
  4555. + __func__);
  4556. + goto err4;
  4557. + }
  4558. + }
  4559. +
  4560. + if (us) {
  4561. + if (priv->phydev)
  4562. + phy_start(priv->phydev);
  4563. + return 0;
  4564. + }
  4565. +
  4566. + netif_carrier_on(ndev);
  4567. +
  4568. + /* Create all the sysfs files */
  4569. + if (pfe_eth_sysfs_init(ndev))
  4570. + goto err4;
  4571. +
  4572. + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
  4573. + __func__, priv->EMAC_baseaddr);
  4574. +
  4575. + return 0;
  4576. +err4:
  4577. + if (us)
  4578. + goto err3;
  4579. + unregister_netdev(ndev);
  4580. +err3:
  4581. + pfe_eth_mdio_exit(priv->mii_bus);
  4582. +err2:
  4583. + free_netdev(priv->ndev);
  4584. +err0:
  4585. + return err;
  4586. +}
  4587. +
  4588. +/* pfe_eth_init
  4589. + */
  4590. +int pfe_eth_init(struct pfe *pfe)
  4591. +{
  4592. + int ii = 0;
  4593. + int err;
  4594. +
  4595. + pr_info("%s\n", __func__);
  4596. +
  4597. + cbus_emac_base[0] = EMAC1_BASE_ADDR;
  4598. + cbus_emac_base[1] = EMAC2_BASE_ADDR;
  4599. +
  4600. + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
  4601. + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
  4602. +
  4603. + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
  4604. + err = pfe_eth_init_one(pfe, ii);
  4605. + if (err)
  4606. + goto err0;
  4607. + }
  4608. +
  4609. + return 0;
  4610. +
  4611. +err0:
  4612. + while (ii--)
  4613. + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
  4614. +
  4615. + /* Register three network devices in the kernel */
  4616. + return err;
  4617. +}
  4618. +
  4619. +/* pfe_eth_exit_one
  4620. + */
  4621. +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
  4622. +{
  4623. + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
  4624. +
  4625. + if (!us)
  4626. + pfe_eth_sysfs_exit(priv->ndev);
  4627. +
  4628. + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
  4629. + pfe_phy_exit(priv->ndev);
  4630. +
  4631. + if (!us)
  4632. + unregister_netdev(priv->ndev);
  4633. +
  4634. + if (priv->mii_bus)
  4635. + pfe_eth_mdio_exit(priv->mii_bus);
  4636. +
  4637. + free_netdev(priv->ndev);
  4638. +}
  4639. +
  4640. +/* pfe_eth_exit
  4641. + */
  4642. +void pfe_eth_exit(struct pfe *pfe)
  4643. +{
  4644. + int ii;
  4645. +
  4646. + pr_info("%s\n", __func__);
  4647. +
  4648. + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
  4649. + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
  4650. +}
  4651. --- /dev/null
  4652. +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
  4653. @@ -0,0 +1,184 @@
  4654. +/*
  4655. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  4656. + * Copyright 2017 NXP
  4657. + *
  4658. + * This program is free software; you can redistribute it and/or modify
  4659. + * it under the terms of the GNU General Public License as published by
  4660. + * the Free Software Foundation; either version 2 of the License, or
  4661. + * (at your option) any later version.
  4662. + *
  4663. + * This program is distributed in the hope that it will be useful,
  4664. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  4665. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  4666. + * GNU General Public License for more details.
  4667. + *
  4668. + * You should have received a copy of the GNU General Public License
  4669. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  4670. + */
  4671. +
  4672. +#ifndef _PFE_ETH_H_
  4673. +#define _PFE_ETH_H_
  4674. +#include <linux/kernel.h>
  4675. +#include <linux/netdevice.h>
  4676. +#include <linux/etherdevice.h>
  4677. +#include <linux/ethtool.h>
  4678. +#include <linux/mii.h>
  4679. +#include <linux/phy.h>
  4680. +#include <linux/clk.h>
  4681. +#include <linux/interrupt.h>
  4682. +#include <linux/time.h>
  4683. +
  4684. +#define PFE_ETH_NAPI_STATS
  4685. +#define PFE_ETH_TX_STATS
  4686. +
  4687. +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
  4688. +#define LRO_LEN_COUNT_MAX 32
  4689. +#define LRO_NB_COUNT_MAX 32
  4690. +
  4691. +#define PFE_PAUSE_FLAG_ENABLE 1
  4692. +#define PFE_PAUSE_FLAG_AUTONEG 2
  4693. +
  4694. +/* GEMAC configured by SW */
  4695. +/* GEMAC configured by phy lines (not for MII/GMII) */
  4696. +
  4697. +#define GEMAC_SW_FULL_DUPLEX BIT(9)
  4698. +#define GEMAC_SW_SPEED_10M (0 << 12)
  4699. +#define GEMAC_SW_SPEED_100M BIT(12)
  4700. +#define GEMAC_SW_SPEED_1G (2 << 12)
  4701. +
  4702. +#define GEMAC_NO_PHY BIT(0)
  4703. +
  4704. +struct ls1012a_eth_platform_data {
  4705. + /* device specific information */
  4706. + u32 device_flags;
  4707. + char name[16];
  4708. +
  4709. + /* board specific information */
  4710. + u32 mii_config;
  4711. + u32 phy_flags;
  4712. + u32 gem_id;
  4713. + u32 bus_id;
  4714. + u32 phy_id;
  4715. + u32 mdio_muxval;
  4716. + u8 mac_addr[ETH_ALEN];
  4717. +};
  4718. +
  4719. +struct ls1012a_mdio_platform_data {
  4720. + int enabled;
  4721. + int irq[32];
  4722. + u32 phy_mask;
  4723. + int mdc_div;
  4724. +};
  4725. +
  4726. +struct ls1012a_pfe_platform_data {
  4727. + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
  4728. + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
  4729. +};
  4730. +
  4731. +#define NUM_GEMAC_SUPPORT 2
  4732. +#define DRV_NAME "pfe-eth"
  4733. +#define DRV_VERSION "1.0"
  4734. +
  4735. +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
  4736. +#define TX_POLL_TIMEOUT_MS 1000
  4737. +
  4738. +#define EMAC_TXQ_CNT 16
  4739. +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
  4740. +
  4741. +#define JUMBO_FRAME_SIZE 10258
  4742. +/*
  4743. + * Client Tx queue threshold, for txQ flush condition.
  4744. + * It must be smaller than the queue size (in case we ever change it in the
  4745. + * future).
  4746. + */
  4747. +#define HIF_CL_TX_FLUSH_MARK 32
  4748. +
  4749. +/*
  4750. + * Max number of TX resources (HIF descriptors or skbs) that will be released
  4751. + * in a single go during batch recycling.
  4752. + * Should be lower than the flush mark so the SW can provide the HW with a
  4753. + * continuous stream of packets instead of bursts.
  4754. + */
  4755. +#define TX_FREE_MAX_COUNT 16
  4756. +#define EMAC_RXQ_CNT 3
  4757. +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
  4758. +/* make sure clients can receive a full burst of packets */
  4759. +#define EMAC_RMON_TXBYTES_POS 0x00
  4760. +#define EMAC_RMON_RXBYTES_POS 0x14
  4761. +
  4762. +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
  4763. +#define EMAC_MDIO_TIMEOUT 1000
  4764. +#define MAX_UC_SPEC_ADDR_REG 31
  4765. +
  4766. +struct pfe_eth_fast_timer {
  4767. + int queuenum;
  4768. + struct hrtimer timer;
  4769. + void *base;
  4770. +};
  4771. +
  4772. +struct pfe_eth_priv_s {
  4773. + struct pfe *pfe;
  4774. + struct hif_client_s client;
  4775. + struct napi_struct lro_napi;
  4776. + struct napi_struct low_napi;
  4777. + struct napi_struct high_napi;
  4778. + int low_tmu_q;
  4779. + int high_tmu_q;
  4780. + struct net_device_stats stats;
  4781. + struct net_device *ndev;
  4782. + int id;
  4783. + int promisc;
  4784. + unsigned int msg_enable;
  4785. + unsigned int usr_features;
  4786. +
  4787. + spinlock_t lock; /* protect member variables */
  4788. + unsigned int event_status;
  4789. + int irq;
  4790. + void *EMAC_baseaddr;
  4791. + /* This points to the EMAC base from where we access PHY */
  4792. + void *PHY_baseaddr;
  4793. + void *GPI_baseaddr;
  4794. + /* PHY stuff */
  4795. + struct phy_device *phydev;
  4796. + int oldspeed;
  4797. + int oldduplex;
  4798. + int oldlink;
  4799. + /* mdio info */
  4800. + int mdc_div;
  4801. + struct mii_bus *mii_bus;
  4802. + struct clk *gemtx_clk;
  4803. + int wol;
  4804. + int pause_flag;
  4805. +
  4806. + int default_priority;
  4807. + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
  4808. +
  4809. + struct ls1012a_eth_platform_data *einfo;
  4810. + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
  4811. +
  4812. +#ifdef PFE_ETH_TX_STATS
  4813. + unsigned int stop_queue_total[EMAC_TXQ_CNT];
  4814. + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
  4815. + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
  4816. + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
  4817. + unsigned int clean_fail[EMAC_TXQ_CNT];
  4818. + unsigned int was_stopped[EMAC_TXQ_CNT];
  4819. +#endif
  4820. +
  4821. +#ifdef PFE_ETH_NAPI_STATS
  4822. + unsigned int napi_counters[NAPI_MAX_COUNT];
  4823. +#endif
  4824. + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
  4825. +};
  4826. +
  4827. +struct pfe_eth {
  4828. + struct pfe_eth_priv_s *eth_priv[3];
  4829. +};
  4830. +
  4831. +int pfe_eth_init(struct pfe *pfe);
  4832. +void pfe_eth_exit(struct pfe *pfe);
  4833. +int pfe_eth_suspend(struct net_device *dev);
  4834. +int pfe_eth_resume(struct net_device *dev);
  4835. +int pfe_eth_mdio_reset(struct mii_bus *bus);
  4836. +
  4837. +#endif /* _PFE_ETH_H_ */
  4838. --- /dev/null
  4839. +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
  4840. @@ -0,0 +1,314 @@
  4841. +/*
  4842. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  4843. + * Copyright 2017 NXP
  4844. + *
  4845. + * This program is free software; you can redistribute it and/or modify
  4846. + * it under the terms of the GNU General Public License as published by
  4847. + * the Free Software Foundation; either version 2 of the License, or
  4848. + * (at your option) any later version.
  4849. + *
  4850. + * This program is distributed in the hope that it will be useful,
  4851. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  4852. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  4853. + * GNU General Public License for more details.
  4854. + *
  4855. + * You should have received a copy of the GNU General Public License
  4856. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  4857. + */
  4858. +
  4859. +/*
  4860. + * @file
  4861. + * Contains all the functions to handle parsing and loading of PE firmware
  4862. + * files.
  4863. + */
  4864. +#include <linux/firmware.h>
  4865. +
  4866. +#include "pfe_mod.h"
  4867. +#include "pfe_firmware.h"
  4868. +#include "pfe/pfe.h"
  4869. +
  4870. +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
  4871. + const char *section)
  4872. +{
  4873. + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
  4874. + struct elf32_shdr *shdr;
  4875. + struct elf32_shdr *shdr_shstr;
  4876. + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
  4877. + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
  4878. + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
  4879. + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
  4880. + Elf32_Off shstr_offset;
  4881. + Elf32_Word sh_name;
  4882. + const char *name;
  4883. + int i;
  4884. +
  4885. + /* Section header strings */
  4886. + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
  4887. + e_shentsize);
  4888. + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
  4889. +
  4890. + for (i = 0; i < e_shnum; i++) {
  4891. + shdr = (struct elf32_shdr *)(fw->data + e_shoff
  4892. + + i * e_shentsize);
  4893. +
  4894. + sh_name = be32_to_cpu(shdr->sh_name);
  4895. +
  4896. + name = (const char *)(fw->data + shstr_offset + sh_name);
  4897. +
  4898. + if (!strcmp(name, section))
  4899. + return shdr;
  4900. + }
  4901. +
  4902. + pr_err("%s: didn't find section %s\n", __func__, section);
  4903. +
  4904. + return NULL;
  4905. +}
  4906. +
  4907. +#if defined(CFG_DIAGS)
  4908. +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
  4909. + *diags_info)
  4910. +{
  4911. + struct elf32_shdr *shdr;
  4912. + unsigned long offset, size;
  4913. +
  4914. + shdr = get_elf_section_header(fw, ".pfe_diags_str");
  4915. + if (shdr) {
  4916. + offset = be32_to_cpu(shdr->sh_offset);
  4917. + size = be32_to_cpu(shdr->sh_size);
  4918. + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
  4919. + diags_info->diags_str_size = size;
  4920. + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
  4921. + memcpy(diags_info->diags_str_array, fw->data + offset, size);
  4922. +
  4923. + return 0;
  4924. + } else {
  4925. + return -1;
  4926. + }
  4927. +}
  4928. +#endif
  4929. +
  4930. +static void pfe_check_version_info(const struct firmware *fw)
  4931. +{
  4932. + /*static char *version = NULL;*/
  4933. + static char *version;
  4934. +
  4935. + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
  4936. +
  4937. + if (shdr) {
  4938. + if (!version) {
  4939. + /*
  4940. + * this is the first fw we load, use its version
  4941. + * string as reference (whatever it is)
  4942. + */
  4943. + version = (char *)(fw->data +
  4944. + be32_to_cpu(shdr->sh_offset));
  4945. +
  4946. + pr_info("PFE binary version: %s\n", version);
  4947. + } else {
  4948. + /*
  4949. + * already have loaded at least one firmware, check
  4950. + * sequence can start now
  4951. + */
  4952. + if (strcmp(version, (char *)(fw->data +
  4953. + be32_to_cpu(shdr->sh_offset)))) {
  4954. + pr_info(
  4955. + "WARNING: PFE firmware binaries from incompatible version\n");
  4956. + }
  4957. + }
  4958. + } else {
  4959. + /*
  4960. + * version cannot be verified, a potential issue that should
  4961. + * be reported
  4962. + */
  4963. + pr_info(
  4964. + "WARNING: PFE firmware binaries from incompatible version\n");
  4965. + }
  4966. +}
  4967. +
  4968. +/* PFE elf firmware loader.
  4969. + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
  4970. + *
  4971. + * @param pe_mask Mask of PE id's to load firmware to
  4972. + * @param fw Pointer to the firmware image
  4973. + *
  4974. + * @return 0 on success, a negative value on error
  4975. + *
  4976. + */
  4977. +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
  4978. +{
  4979. + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
  4980. + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
  4981. + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
  4982. + be32_to_cpu(elf_hdr->e_shoff));
  4983. + int id, section;
  4984. + int rc;
  4985. +
  4986. + pr_info("%s\n", __func__);
  4987. +
  4988. + /* Some sanity checks */
  4989. + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
  4990. + pr_err("%s: incorrect elf magic number\n", __func__);
  4991. + return -EINVAL;
  4992. + }
  4993. +
  4994. + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
  4995. + pr_err("%s: incorrect elf class(%x)\n", __func__,
  4996. + elf_hdr->e_ident[EI_CLASS]);
  4997. + return -EINVAL;
  4998. + }
  4999. +
  5000. + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
  5001. + pr_err("%s: incorrect elf data(%x)\n", __func__,
  5002. + elf_hdr->e_ident[EI_DATA]);
  5003. + return -EINVAL;
  5004. + }
  5005. +
  5006. + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
  5007. + pr_err("%s: incorrect elf file type(%x)\n", __func__,
  5008. + be16_to_cpu(elf_hdr->e_type));
  5009. + return -EINVAL;
  5010. + }
  5011. +
  5012. + for (section = 0; section < sections; section++, shdr++) {
  5013. + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
  5014. + SHF_EXECINSTR)))
  5015. + continue;
  5016. +
  5017. + for (id = 0; id < MAX_PE; id++)
  5018. + if (pe_mask & (1 << id)) {
  5019. + rc = pe_load_elf_section(id, fw->data, shdr,
  5020. + pfe->dev);
  5021. + if (rc < 0)
  5022. + goto err;
  5023. + }
  5024. + }
  5025. +
  5026. + pfe_check_version_info(fw);
  5027. +
  5028. + return 0;
  5029. +
  5030. +err:
  5031. + return rc;
  5032. +}
  5033. +
  5034. +/* PFE firmware initialization.
  5035. + * Loads different firmware files from filesystem.
  5036. + * Initializes PE IMEM/DMEM and UTIL-PE DDR
  5037. + * Initializes control path symbol addresses (by looking them up in the elf
  5038. + * firmware files
  5039. + * Takes PE's out of reset
  5040. + *
  5041. + * @return 0 on success, a negative value on error
  5042. + *
  5043. + */
  5044. +int pfe_firmware_init(struct pfe *pfe)
  5045. +{
  5046. + const struct firmware *class_fw, *tmu_fw;
  5047. + int rc = 0;
  5048. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5049. + const char *util_fw_name;
  5050. + const struct firmware *util_fw;
  5051. +#endif
  5052. +
  5053. + pr_info("%s\n", __func__);
  5054. +
  5055. + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
  5056. + pr_err("%s: request firmware %s failed\n", __func__,
  5057. + CLASS_FIRMWARE_FILENAME);
  5058. + rc = -ETIMEDOUT;
  5059. + goto err0;
  5060. + }
  5061. +
  5062. + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
  5063. + pr_err("%s: request firmware %s failed\n", __func__,
  5064. + TMU_FIRMWARE_FILENAME);
  5065. + rc = -ETIMEDOUT;
  5066. + goto err1;
  5067. +}
  5068. +
  5069. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5070. + util_fw_name = UTIL_FIRMWARE_FILENAME;
  5071. +
  5072. + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
  5073. + pr_err("%s: request firmware %s failed\n", __func__,
  5074. + util_fw_name);
  5075. + rc = -ETIMEDOUT;
  5076. + goto err2;
  5077. + }
  5078. +#endif
  5079. + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
  5080. + if (rc < 0) {
  5081. + pr_err("%s: class firmware load failed\n", __func__);
  5082. + goto err3;
  5083. + }
  5084. +
  5085. +#if defined(CFG_DIAGS)
  5086. + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
  5087. + if (rc < 0) {
  5088. + pr_warn(
  5089. + "PFE diags won't be available for class PEs\n");
  5090. + rc = 0;
  5091. + }
  5092. +#endif
  5093. +
  5094. + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
  5095. + if (rc < 0) {
  5096. + pr_err("%s: tmu firmware load failed\n", __func__);
  5097. + goto err3;
  5098. + }
  5099. +
  5100. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5101. + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
  5102. + if (rc < 0) {
  5103. + pr_err("%s: util firmware load failed\n", __func__);
  5104. + goto err3;
  5105. + }
  5106. +
  5107. +#if defined(CFG_DIAGS)
  5108. + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
  5109. + if (rc < 0) {
  5110. + pr_warn(
  5111. + "PFE diags won't be available for util PE\n");
  5112. + rc = 0;
  5113. + }
  5114. +#endif
  5115. +
  5116. + util_enable();
  5117. +#endif
  5118. +
  5119. + tmu_enable(0xf);
  5120. + class_enable();
  5121. +
  5122. +err3:
  5123. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5124. + release_firmware(util_fw);
  5125. +
  5126. +err2:
  5127. +#endif
  5128. + release_firmware(tmu_fw);
  5129. +
  5130. +err1:
  5131. + release_firmware(class_fw);
  5132. +
  5133. +err0:
  5134. + return rc;
  5135. +}
  5136. +
  5137. +/* PFE firmware cleanup
  5138. + * Puts PE's in reset
  5139. + *
  5140. + *
  5141. + */
  5142. +void pfe_firmware_exit(struct pfe *pfe)
  5143. +{
  5144. + pr_info("%s\n", __func__);
  5145. +
  5146. + if (pe_reset_all(&pfe->ctrl) != 0)
  5147. + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
  5148. +
  5149. + class_disable();
  5150. + tmu_disable(0xf);
  5151. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5152. + util_disable();
  5153. +#endif
  5154. +}
  5155. --- /dev/null
  5156. +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
  5157. @@ -0,0 +1,32 @@
  5158. +/*
  5159. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  5160. + * Copyright 2017 NXP
  5161. + *
  5162. + * This program is free software; you can redistribute it and/or modify
  5163. + * it under the terms of the GNU General Public License as published by
  5164. + * the Free Software Foundation; either version 2 of the License, or
  5165. + * (at your option) any later version.
  5166. + *
  5167. + * This program is distributed in the hope that it will be useful,
  5168. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  5169. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  5170. + * GNU General Public License for more details.
  5171. + *
  5172. + * You should have received a copy of the GNU General Public License
  5173. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  5174. + */
  5175. +
  5176. +#ifndef _PFE_FIRMWARE_H_
  5177. +#define _PFE_FIRMWARE_H_
  5178. +
  5179. +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
  5180. +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
  5181. +
  5182. +#define PFE_FW_CHECK_PASS 0
  5183. +#define PFE_FW_CHECK_FAIL 1
  5184. +#define NUM_PFE_FW 3
  5185. +
  5186. +int pfe_firmware_init(struct pfe *pfe);
  5187. +void pfe_firmware_exit(struct pfe *pfe);
  5188. +
  5189. +#endif /* _PFE_FIRMWARE_H_ */
  5190. --- /dev/null
  5191. +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
  5192. @@ -0,0 +1,1516 @@
  5193. +/*
  5194. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  5195. + * Copyright 2017 NXP
  5196. + *
  5197. + * This program is free software; you can redistribute it and/or modify
  5198. + * it under the terms of the GNU General Public License as published by
  5199. + * the Free Software Foundation; either version 2 of the License, or
  5200. + * (at your option) any later version.
  5201. + *
  5202. + * This program is distributed in the hope that it will be useful,
  5203. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  5204. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  5205. + * GNU General Public License for more details.
  5206. + *
  5207. + * You should have received a copy of the GNU General Public License
  5208. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  5209. + */
  5210. +
  5211. +#include "pfe_mod.h"
  5212. +#include "pfe/pfe.h"
  5213. +
  5214. +void *cbus_base_addr;
  5215. +void *ddr_base_addr;
  5216. +unsigned long ddr_phys_base_addr;
  5217. +unsigned int ddr_size;
  5218. +
  5219. +static struct pe_info pe[MAX_PE];
  5220. +
  5221. +/* Initializes the PFE library.
  5222. + * Must be called before using any of the library functions.
  5223. + *
  5224. + * @param[in] cbus_base CBUS virtual base address (as mapped in
  5225. + * the host CPU address space)
  5226. + * @param[in] ddr_base PFE DDR range virtual base address (as
  5227. + * mapped in the host CPU address space)
  5228. + * @param[in] ddr_phys_base PFE DDR range physical base address (as
  5229. + * mapped in platform)
  5230. + * @param[in] size PFE DDR range size (as defined by the host
  5231. + * software)
  5232. + */
  5233. +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
  5234. + unsigned int size)
  5235. +{
  5236. + cbus_base_addr = cbus_base;
  5237. + ddr_base_addr = ddr_base;
  5238. + ddr_phys_base_addr = ddr_phys_base;
  5239. + ddr_size = size;
  5240. +
  5241. + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
  5242. + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
  5243. + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
  5244. + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5245. + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5246. + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5247. +
  5248. + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
  5249. + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
  5250. + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
  5251. + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5252. + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5253. + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5254. +
  5255. + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
  5256. + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
  5257. + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
  5258. + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5259. + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5260. + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5261. +
  5262. + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
  5263. + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
  5264. + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
  5265. + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5266. + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5267. + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5268. +
  5269. + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
  5270. + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
  5271. + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
  5272. + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5273. + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5274. + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5275. +
  5276. + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
  5277. + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
  5278. + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
  5279. + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
  5280. + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
  5281. + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
  5282. +
  5283. + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
  5284. + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
  5285. + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
  5286. + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
  5287. + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
  5288. + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
  5289. +
  5290. + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
  5291. + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
  5292. + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
  5293. + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
  5294. + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
  5295. + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
  5296. +
  5297. + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
  5298. + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
  5299. + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
  5300. + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
  5301. + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
  5302. + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
  5303. +
  5304. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5305. + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
  5306. + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
  5307. + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
  5308. + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
  5309. +#endif
  5310. +}
  5311. +
  5312. +/* Writes a buffer to PE internal memory from the host
  5313. + * through indirect access registers.
  5314. + *
  5315. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5316. + * ..., UTIL_ID)
  5317. + * @param[in] src Buffer source address
  5318. + * @param[in] mem_access_addr DMEM destination address (must be 32bit
  5319. + * aligned)
  5320. + * @param[in] len Number of bytes to copy
  5321. + */
  5322. +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
  5323. +int len)
  5324. +{
  5325. + u32 offset = 0, val, addr;
  5326. + unsigned int len32 = len >> 2;
  5327. + int i;
  5328. +
  5329. + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
  5330. + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
  5331. +
  5332. + for (i = 0; i < len32; i++, offset += 4, src += 4) {
  5333. + val = *(u32 *)src;
  5334. + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
  5335. + writel(addr + offset, pe[id].mem_access_addr);
  5336. + }
  5337. +
  5338. + len = (len & 0x3);
  5339. + if (len) {
  5340. + val = 0;
  5341. +
  5342. + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
  5343. + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
  5344. +
  5345. + for (i = 0; i < len; i++, src++)
  5346. + val |= (*(u8 *)src) << (8 * i);
  5347. +
  5348. + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
  5349. + writel(addr, pe[id].mem_access_addr);
  5350. + }
  5351. +}
  5352. +
  5353. +/* Writes a buffer to PE internal data memory (DMEM) from the host
  5354. + * through indirect access registers.
  5355. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5356. + * ..., UTIL_ID)
  5357. + * @param[in] src Buffer source address
  5358. + * @param[in] dst DMEM destination address (must be 32bit
  5359. + * aligned)
  5360. + * @param[in] len Number of bytes to copy
  5361. + */
  5362. +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
  5363. +{
  5364. + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
  5365. + PE_MEM_ACCESS_DMEM, src, len);
  5366. +}
  5367. +
  5368. +/* Writes a buffer to PE internal program memory (PMEM) from the host
  5369. + * through indirect access registers.
  5370. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5371. + * ..., TMU3_ID)
  5372. + * @param[in] src Buffer source address
  5373. + * @param[in] dst PMEM destination address (must be 32bit
  5374. + * aligned)
  5375. + * @param[in] len Number of bytes to copy
  5376. + */
  5377. +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
  5378. +{
  5379. + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
  5380. + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
  5381. +}
  5382. +
  5383. +/* Reads PE internal program memory (IMEM) from the host
  5384. + * through indirect access registers.
  5385. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5386. + * ..., TMU3_ID)
  5387. + * @param[in] addr PMEM read address (must be aligned on size)
  5388. + * @param[in] size Number of bytes to read (maximum 4, must not
  5389. + * cross 32bit boundaries)
  5390. + * @return the data read (in PE endianness, i.e BE).
  5391. + */
  5392. +u32 pe_pmem_read(int id, u32 addr, u8 size)
  5393. +{
  5394. + u32 offset = addr & 0x3;
  5395. + u32 mask = 0xffffffff >> ((4 - size) << 3);
  5396. + u32 val;
  5397. +
  5398. + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
  5399. + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
  5400. +
  5401. + writel(addr, pe[id].mem_access_addr);
  5402. + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
  5403. +
  5404. + return (val >> (offset << 3)) & mask;
  5405. +}
  5406. +
  5407. +/* Writes PE internal data memory (DMEM) from the host
  5408. + * through indirect access registers.
  5409. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5410. + * ..., UTIL_ID)
  5411. + * @param[in] addr DMEM write address (must be aligned on size)
  5412. + * @param[in] val Value to write (in PE endianness, i.e BE)
  5413. + * @param[in] size Number of bytes to write (maximum 4, must not
  5414. + * cross 32bit boundaries)
  5415. + */
  5416. +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
  5417. +{
  5418. + u32 offset = addr & 0x3;
  5419. +
  5420. + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
  5421. + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
  5422. +
  5423. + /* Indirect access interface is byte swapping data being written */
  5424. + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
  5425. + writel(addr, pe[id].mem_access_addr);
  5426. +}
  5427. +
  5428. +/* Reads PE internal data memory (DMEM) from the host
  5429. + * through indirect access registers.
  5430. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5431. + * ..., UTIL_ID)
  5432. + * @param[in] addr DMEM read address (must be aligned on size)
  5433. + * @param[in] size Number of bytes to read (maximum 4, must not
  5434. + * cross 32bit boundaries)
  5435. + * @return the data read (in PE endianness, i.e BE).
  5436. + */
  5437. +u32 pe_dmem_read(int id, u32 addr, u8 size)
  5438. +{
  5439. + u32 offset = addr & 0x3;
  5440. + u32 mask = 0xffffffff >> ((4 - size) << 3);
  5441. + u32 val;
  5442. +
  5443. + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
  5444. + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
  5445. +
  5446. + writel(addr, pe[id].mem_access_addr);
  5447. +
  5448. + /* Indirect access interface is byte swapping data being read */
  5449. + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
  5450. +
  5451. + return (val >> (offset << 3)) & mask;
  5452. +}
  5453. +
  5454. +/* This function is used to write to CLASS internal bus peripherals (ccu,
  5455. + * pe-lem) from the host
  5456. + * through indirect access registers.
  5457. + * @param[in] val value to write
  5458. + * @param[in] addr Address to write to (must be aligned on size)
  5459. + * @param[in] size Number of bytes to write (1, 2 or 4)
  5460. + *
  5461. + */
  5462. +void class_bus_write(u32 val, u32 addr, u8 size)
  5463. +{
  5464. + u32 offset = addr & 0x3;
  5465. +
  5466. + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
  5467. +
  5468. + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
  5469. + (size << 24);
  5470. +
  5471. + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
  5472. + writel(addr, CLASS_BUS_ACCESS_ADDR);
  5473. +}
  5474. +
  5475. +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
  5476. + * through indirect access registers.
  5477. + * @param[in] addr Address to read from (must be aligned on size)
  5478. + * @param[in] size Number of bytes to read (1, 2 or 4)
  5479. + * @return the read data
  5480. + *
  5481. + */
  5482. +u32 class_bus_read(u32 addr, u8 size)
  5483. +{
  5484. + u32 offset = addr & 0x3;
  5485. + u32 mask = 0xffffffff >> ((4 - size) << 3);
  5486. + u32 val;
  5487. +
  5488. + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
  5489. +
  5490. + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
  5491. +
  5492. + writel(addr, CLASS_BUS_ACCESS_ADDR);
  5493. + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
  5494. +
  5495. + return (val >> (offset << 3)) & mask;
  5496. +}
  5497. +
  5498. +/* Writes data to the cluster memory (PE_LMEM)
  5499. + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
  5500. + * @param[in] src Buffer source address
  5501. + * @param[in] len Number of bytes to copy
  5502. + */
  5503. +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
  5504. +{
  5505. + u32 len32 = len >> 2;
  5506. + int i;
  5507. +
  5508. + for (i = 0; i < len32; i++, src += 4, dst += 4)
  5509. + class_bus_write(*(u32 *)src, dst, 4);
  5510. +
  5511. + if (len & 0x2) {
  5512. + class_bus_write(*(u16 *)src, dst, 2);
  5513. + src += 2;
  5514. + dst += 2;
  5515. + }
  5516. +
  5517. + if (len & 0x1) {
  5518. + class_bus_write(*(u8 *)src, dst, 1);
  5519. + src++;
  5520. + dst++;
  5521. + }
  5522. +}
  5523. +
  5524. +/* Writes value to the cluster memory (PE_LMEM)
  5525. + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
  5526. + * @param[in] val Value to write
  5527. + * @param[in] len Number of bytes to write
  5528. + */
  5529. +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
  5530. +{
  5531. + u32 len32 = len >> 2;
  5532. + int i;
  5533. +
  5534. + val = val | (val << 8) | (val << 16) | (val << 24);
  5535. +
  5536. + for (i = 0; i < len32; i++, dst += 4)
  5537. + class_bus_write(val, dst, 4);
  5538. +
  5539. + if (len & 0x2) {
  5540. + class_bus_write(val, dst, 2);
  5541. + dst += 2;
  5542. + }
  5543. +
  5544. + if (len & 0x1) {
  5545. + class_bus_write(val, dst, 1);
  5546. + dst++;
  5547. + }
  5548. +}
  5549. +
  5550. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5551. +
  5552. +/* Writes UTIL program memory (DDR) from the host.
  5553. + *
  5554. + * @param[in] addr Address to write (virtual, must be aligned on size)
  5555. + * @param[in] val Value to write (in PE endianness, i.e BE)
  5556. + * @param[in] size Number of bytes to write (2 or 4)
  5557. + */
  5558. +static void util_pmem_write(u32 val, void *addr, u8 size)
  5559. +{
  5560. + void *addr64 = (void *)((unsigned long)addr & ~0x7);
  5561. + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
  5562. +
  5563. + /*
  5564. + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
  5565. + * location
  5566. + */
  5567. + if (size == 4)
  5568. + writel(be32_to_cpu(val), addr64 + off);
  5569. + else
  5570. + writew(be16_to_cpu((u16)val), addr64 + off);
  5571. +}
  5572. +
  5573. +/* Writes a buffer to UTIL program memory (DDR) from the host.
  5574. + *
  5575. + * @param[in] dst Address to write (virtual, must be at least 16bit
  5576. + * aligned)
  5577. + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
  5578. + * same alignment as dst)
  5579. + * @param[in] len Number of bytes to write (must be at least 16bit
  5580. + * aligned)
  5581. + */
  5582. +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
  5583. +{
  5584. + unsigned int len32;
  5585. + int i;
  5586. +
  5587. + if ((unsigned long)src & 0x2) {
  5588. + util_pmem_write(*(u16 *)src, dst, 2);
  5589. + src += 2;
  5590. + dst += 2;
  5591. + len -= 2;
  5592. + }
  5593. +
  5594. + len32 = len >> 2;
  5595. +
  5596. + for (i = 0; i < len32; i++, dst += 4, src += 4)
  5597. + util_pmem_write(*(u32 *)src, dst, 4);
  5598. +
  5599. + if (len & 0x2)
  5600. + util_pmem_write(*(u16 *)src, dst, len & 0x2);
  5601. +}
  5602. +#endif
  5603. +
  5604. +/* Loads an elf section into pmem
  5605. + * Code needs to be at least 16bit aligned and only PROGBITS sections are
  5606. + * supported
  5607. + *
  5608. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
  5609. + * TMU3_ID)
  5610. + * @param[in] data pointer to the elf firmware
  5611. + * @param[in] shdr pointer to the elf section header
  5612. + *
  5613. + */
  5614. +static int pe_load_pmem_section(int id, const void *data,
  5615. + struct elf32_shdr *shdr)
  5616. +{
  5617. + u32 offset = be32_to_cpu(shdr->sh_offset);
  5618. + u32 addr = be32_to_cpu(shdr->sh_addr);
  5619. + u32 size = be32_to_cpu(shdr->sh_size);
  5620. + u32 type = be32_to_cpu(shdr->sh_type);
  5621. +
  5622. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5623. + if (id == UTIL_ID) {
  5624. + pr_err("%s: unsupported pmem section for UTIL\n",
  5625. + __func__);
  5626. + return -EINVAL;
  5627. + }
  5628. +#endif
  5629. +
  5630. + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
  5631. + pr_err(
  5632. + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
  5633. + , __func__, addr, (unsigned long)data + offset);
  5634. +
  5635. + return -EINVAL;
  5636. + }
  5637. +
  5638. + if (addr & 0x1) {
  5639. + pr_err("%s: load address(%x) is not 16bit aligned\n",
  5640. + __func__, addr);
  5641. + return -EINVAL;
  5642. + }
  5643. +
  5644. + if (size & 0x1) {
  5645. + pr_err("%s: load size(%x) is not 16bit aligned\n",
  5646. + __func__, size);
  5647. + return -EINVAL;
  5648. + }
  5649. +
  5650. + switch (type) {
  5651. + case SHT_PROGBITS:
  5652. + pe_pmem_memcpy_to32(id, addr, data + offset, size);
  5653. +
  5654. + break;
  5655. +
  5656. + default:
  5657. + pr_err("%s: unsupported section type(%x)\n", __func__,
  5658. + type);
  5659. + return -EINVAL;
  5660. + }
  5661. +
  5662. + return 0;
  5663. +}
  5664. +
  5665. +/* Loads an elf section into dmem
  5666. + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
  5667. + * initialized to 0
  5668. + *
  5669. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5670. + * ..., UTIL_ID)
  5671. + * @param[in] data pointer to the elf firmware
  5672. + * @param[in] shdr pointer to the elf section header
  5673. + *
  5674. + */
  5675. +static int pe_load_dmem_section(int id, const void *data,
  5676. + struct elf32_shdr *shdr)
  5677. +{
  5678. + u32 offset = be32_to_cpu(shdr->sh_offset);
  5679. + u32 addr = be32_to_cpu(shdr->sh_addr);
  5680. + u32 size = be32_to_cpu(shdr->sh_size);
  5681. + u32 type = be32_to_cpu(shdr->sh_type);
  5682. + u32 size32 = size >> 2;
  5683. + int i;
  5684. +
  5685. + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
  5686. + pr_err(
  5687. + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
  5688. + __func__, addr, (unsigned long)data + offset);
  5689. +
  5690. + return -EINVAL;
  5691. + }
  5692. +
  5693. + if (addr & 0x3) {
  5694. + pr_err("%s: load address(%x) is not 32bit aligned\n",
  5695. + __func__, addr);
  5696. + return -EINVAL;
  5697. + }
  5698. +
  5699. + switch (type) {
  5700. + case SHT_PROGBITS:
  5701. + pe_dmem_memcpy_to32(id, addr, data + offset, size);
  5702. + break;
  5703. +
  5704. + case SHT_NOBITS:
  5705. + for (i = 0; i < size32; i++, addr += 4)
  5706. + pe_dmem_write(id, 0, addr, 4);
  5707. +
  5708. + if (size & 0x3)
  5709. + pe_dmem_write(id, 0, addr, size & 0x3);
  5710. +
  5711. + break;
  5712. +
  5713. + default:
  5714. + pr_err("%s: unsupported section type(%x)\n", __func__,
  5715. + type);
  5716. + return -EINVAL;
  5717. + }
  5718. +
  5719. + return 0;
  5720. +}
  5721. +
  5722. +/* Loads an elf section into DDR
  5723. + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
  5724. + * initialized to 0
  5725. + *
  5726. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5727. + * ..., UTIL_ID)
  5728. + * @param[in] data pointer to the elf firmware
  5729. + * @param[in] shdr pointer to the elf section header
  5730. + *
  5731. + */
  5732. +static int pe_load_ddr_section(int id, const void *data,
  5733. + struct elf32_shdr *shdr,
  5734. + struct device *dev) {
  5735. + u32 offset = be32_to_cpu(shdr->sh_offset);
  5736. + u32 addr = be32_to_cpu(shdr->sh_addr);
  5737. + u32 size = be32_to_cpu(shdr->sh_size);
  5738. + u32 type = be32_to_cpu(shdr->sh_type);
  5739. + u32 flags = be32_to_cpu(shdr->sh_flags);
  5740. +
  5741. + switch (type) {
  5742. + case SHT_PROGBITS:
  5743. + if (flags & SHF_EXECINSTR) {
  5744. + if (id <= CLASS_MAX_ID) {
  5745. + /* DO the loading only once in DDR */
  5746. + if (id == CLASS0_ID) {
  5747. + pr_err(
  5748. + "%s: load address(%x) and elf file address(%lx) rcvd\n",
  5749. + __func__, addr,
  5750. + (unsigned long)data + offset);
  5751. + if (((unsigned long)(data + offset)
  5752. + & 0x3) != (addr & 0x3)) {
  5753. + pr_err(
  5754. + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
  5755. + , __func__, addr,
  5756. + (unsigned long)data + offset);
  5757. +
  5758. + return -EINVAL;
  5759. + }
  5760. +
  5761. + if (addr & 0x1) {
  5762. + pr_err(
  5763. + "%s: load address(%x) is not 16bit aligned\n"
  5764. + , __func__, addr);
  5765. + return -EINVAL;
  5766. + }
  5767. +
  5768. + if (size & 0x1) {
  5769. + pr_err(
  5770. + "%s: load length(%x) is not 16bit aligned\n"
  5771. + , __func__, size);
  5772. + return -EINVAL;
  5773. + }
  5774. + memcpy(DDR_PHYS_TO_VIRT(
  5775. + DDR_PFE_TO_PHYS(addr)),
  5776. + data + offset, size);
  5777. + }
  5778. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  5779. + } else if (id == UTIL_ID) {
  5780. + if (((unsigned long)(data + offset) & 0x3)
  5781. + != (addr & 0x3)) {
  5782. + pr_err(
  5783. + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
  5784. + , __func__, addr,
  5785. + (unsigned long)data + offset);
  5786. +
  5787. + return -EINVAL;
  5788. + }
  5789. +
  5790. + if (addr & 0x1) {
  5791. + pr_err(
  5792. + "%s: load address(%x) is not 16bit aligned\n"
  5793. + , __func__, addr);
  5794. + return -EINVAL;
  5795. + }
  5796. +
  5797. + if (size & 0x1) {
  5798. + pr_err(
  5799. + "%s: load length(%x) is not 16bit aligned\n"
  5800. + , __func__, size);
  5801. + return -EINVAL;
  5802. + }
  5803. +
  5804. + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
  5805. + DDR_PFE_TO_PHYS(addr)),
  5806. + data + offset, size);
  5807. + }
  5808. +#endif
  5809. + } else {
  5810. + pr_err(
  5811. + "%s: unsupported ddr section type(%x) for PE(%d)\n"
  5812. + , __func__, type, id);
  5813. + return -EINVAL;
  5814. + }
  5815. +
  5816. + } else {
  5817. + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
  5818. + + offset, size);
  5819. + }
  5820. +
  5821. + break;
  5822. +
  5823. + case SHT_NOBITS:
  5824. + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
  5825. +
  5826. + break;
  5827. +
  5828. + default:
  5829. + pr_err("%s: unsupported section type(%x)\n", __func__,
  5830. + type);
  5831. + return -EINVAL;
  5832. + }
  5833. +
  5834. + return 0;
  5835. +}
  5836. +
  5837. +/* Loads an elf section into pe lmem
  5838. + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
  5839. + * initialized to 0
  5840. + *
  5841. + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
  5842. + * @param[in] data pointer to the elf firmware
  5843. + * @param[in] shdr pointer to the elf section header
  5844. + *
  5845. + */
  5846. +static int pe_load_pe_lmem_section(int id, const void *data,
  5847. + struct elf32_shdr *shdr)
  5848. +{
  5849. + u32 offset = be32_to_cpu(shdr->sh_offset);
  5850. + u32 addr = be32_to_cpu(shdr->sh_addr);
  5851. + u32 size = be32_to_cpu(shdr->sh_size);
  5852. + u32 type = be32_to_cpu(shdr->sh_type);
  5853. +
  5854. + if (id > CLASS_MAX_ID) {
  5855. + pr_err(
  5856. + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
  5857. + __func__, type, id);
  5858. + return -EINVAL;
  5859. + }
  5860. +
  5861. + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
  5862. + pr_err(
  5863. + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
  5864. + __func__, addr, (unsigned long)data + offset);
  5865. +
  5866. + return -EINVAL;
  5867. + }
  5868. +
  5869. + if (addr & 0x3) {
  5870. + pr_err("%s: load address(%x) is not 32bit aligned\n",
  5871. + __func__, addr);
  5872. + return -EINVAL;
  5873. + }
  5874. +
  5875. + switch (type) {
  5876. + case SHT_PROGBITS:
  5877. + class_pe_lmem_memcpy_to32(addr, data + offset, size);
  5878. + break;
  5879. +
  5880. + case SHT_NOBITS:
  5881. + class_pe_lmem_memset(addr, 0, size);
  5882. + break;
  5883. +
  5884. + default:
  5885. + pr_err("%s: unsupported section type(%x)\n", __func__,
  5886. + type);
  5887. + return -EINVAL;
  5888. + }
  5889. +
  5890. + return 0;
  5891. +}
  5892. +
  5893. +/* Loads an elf section into a PE
  5894. + * For now only supports loading a section to dmem (all PE's), pmem (class and
  5895. + * tmu PE's),
  5896. + * DDDR (util PE code)
  5897. + *
  5898. + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
  5899. + * ..., UTIL_ID)
  5900. + * @param[in] data pointer to the elf firmware
  5901. + * @param[in] shdr pointer to the elf section header
  5902. + *
  5903. + */
  5904. +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
  5905. + struct device *dev) {
  5906. + u32 addr = be32_to_cpu(shdr->sh_addr);
  5907. + u32 size = be32_to_cpu(shdr->sh_size);
  5908. +
  5909. + if (IS_DMEM(addr, size))
  5910. + return pe_load_dmem_section(id, data, shdr);
  5911. + else if (IS_PMEM(addr, size))
  5912. + return pe_load_pmem_section(id, data, shdr);
  5913. + else if (IS_PFE_LMEM(addr, size))
  5914. + return 0;
  5915. + else if (IS_PHYS_DDR(addr, size))
  5916. + return pe_load_ddr_section(id, data, shdr, dev);
  5917. + else if (IS_PE_LMEM(addr, size))
  5918. + return pe_load_pe_lmem_section(id, data, shdr);
  5919. +
  5920. + pr_err("%s: unsupported memory range(%x)\n", __func__,
  5921. + addr);
  5922. + return 0;
  5923. +}
  5924. +
  5925. +/**************************** BMU ***************************/
  5926. +
  5927. +/* Initializes a BMU block.
  5928. + * @param[in] base BMU block base address
  5929. + * @param[in] cfg BMU configuration
  5930. + */
  5931. +void bmu_init(void *base, struct BMU_CFG *cfg)
  5932. +{
  5933. + bmu_disable(base);
  5934. +
  5935. + bmu_set_config(base, cfg);
  5936. +
  5937. + bmu_reset(base);
  5938. +}
  5939. +
  5940. +/* Resets a BMU block.
  5941. + * @param[in] base BMU block base address
  5942. + */
  5943. +void bmu_reset(void *base)
  5944. +{
  5945. + writel(CORE_SW_RESET, base + BMU_CTRL);
  5946. +
  5947. + /* Wait for self clear */
  5948. + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
  5949. + ;
  5950. +}
  5951. +
  5952. +/* Enabled a BMU block.
  5953. + * @param[in] base BMU block base address
  5954. + */
  5955. +void bmu_enable(void *base)
  5956. +{
  5957. + writel(CORE_ENABLE, base + BMU_CTRL);
  5958. +}
  5959. +
  5960. +/* Disables a BMU block.
  5961. + * @param[in] base BMU block base address
  5962. + */
  5963. +void bmu_disable(void *base)
  5964. +{
  5965. + writel(CORE_DISABLE, base + BMU_CTRL);
  5966. +}
  5967. +
  5968. +/* Sets the configuration of a BMU block.
  5969. + * @param[in] base BMU block base address
  5970. + * @param[in] cfg BMU configuration
  5971. + */
  5972. +void bmu_set_config(void *base, struct BMU_CFG *cfg)
  5973. +{
  5974. + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
  5975. + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
  5976. + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
  5977. +
  5978. + /* Interrupts are never used */
  5979. + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
  5980. + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
  5981. + writel(0x0, base + BMU_INT_ENABLE);
  5982. +}
  5983. +
  5984. +/**************************** MTIP GEMAC ***************************/
  5985. +
  5986. +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
  5987. + * TCP or UDP checksums are discarded
  5988. + *
  5989. + * @param[in] base GEMAC base address.
  5990. + */
  5991. +void gemac_enable_rx_checksum_offload(void *base)
  5992. +{
  5993. + /*Do not find configuration to do this */
  5994. +}
  5995. +
  5996. +/* Disable Rx Checksum Engine.
  5997. + *
  5998. + * @param[in] base GEMAC base address.
  5999. + */
  6000. +void gemac_disable_rx_checksum_offload(void *base)
  6001. +{
  6002. + /*Do not find configuration to do this */
  6003. +}
  6004. +
  6005. +/* GEMAC set speed.
  6006. + * @param[in] base GEMAC base address
  6007. + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
  6008. + */
  6009. +void gemac_set_speed(void *base, enum mac_speed gem_speed)
  6010. +{
  6011. + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
  6012. + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
  6013. +
  6014. + switch (gem_speed) {
  6015. + case SPEED_10M:
  6016. + rcr |= EMAC_RCNTRL_RMII_10T;
  6017. + break;
  6018. +
  6019. + case SPEED_1000M:
  6020. + ecr |= EMAC_ECNTRL_SPEED;
  6021. + break;
  6022. +
  6023. + case SPEED_100M:
  6024. + default:
  6025. + /*It is in 100M mode */
  6026. + break;
  6027. + }
  6028. + writel(ecr, (base + EMAC_ECNTRL_REG));
  6029. + writel(rcr, (base + EMAC_RCNTRL_REG));
  6030. +}
  6031. +
  6032. +/* GEMAC set duplex.
  6033. + * @param[in] base GEMAC base address
  6034. + * @param[in] duplex GEMAC duplex mode (Full, Half)
  6035. + */
  6036. +void gemac_set_duplex(void *base, int duplex)
  6037. +{
  6038. + if (duplex == DUPLEX_HALF) {
  6039. + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
  6040. + + EMAC_TCNTRL_REG);
  6041. + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
  6042. + + EMAC_RCNTRL_REG));
  6043. + } else{
  6044. + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
  6045. + + EMAC_TCNTRL_REG);
  6046. + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
  6047. + + EMAC_RCNTRL_REG));
  6048. + }
  6049. +}
  6050. +
  6051. +/* GEMAC set mode.
  6052. + * @param[in] base GEMAC base address
  6053. + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
  6054. + */
  6055. +void gemac_set_mode(void *base, int mode)
  6056. +{
  6057. + u32 val = readl(base + EMAC_RCNTRL_REG);
  6058. +
  6059. + /*Remove loopbank*/
  6060. + val &= ~EMAC_RCNTRL_LOOP;
  6061. +
  6062. + /*Enable flow control and MII mode*/
  6063. + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
  6064. +
  6065. + writel(val, base + EMAC_RCNTRL_REG);
  6066. +}
  6067. +
  6068. +/* GEMAC enable function.
  6069. + * @param[in] base GEMAC base address
  6070. + */
  6071. +void gemac_enable(void *base)
  6072. +{
  6073. + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
  6074. + EMAC_ECNTRL_REG);
  6075. +}
  6076. +
  6077. +/* GEMAC disable function.
  6078. + * @param[in] base GEMAC base address
  6079. + */
  6080. +void gemac_disable(void *base)
  6081. +{
  6082. + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
  6083. + EMAC_ECNTRL_REG);
  6084. +}
  6085. +
  6086. +/* GEMAC TX disable function.
  6087. + * @param[in] base GEMAC base address
  6088. + */
  6089. +void gemac_tx_disable(void *base)
  6090. +{
  6091. + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
  6092. + EMAC_TCNTRL_REG);
  6093. +}
  6094. +
  6095. +void gemac_tx_enable(void *base)
  6096. +{
  6097. + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
  6098. + EMAC_TCNTRL_REG);
  6099. +}
  6100. +
  6101. +/* Sets the hash register of the MAC.
  6102. + * This register is used for matching unicast and multicast frames.
  6103. + *
  6104. + * @param[in] base GEMAC base address.
  6105. + * @param[in] hash 64-bit hash to be configured.
  6106. + */
  6107. +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
  6108. +{
  6109. + writel(hash->bottom, base + EMAC_GALR);
  6110. + writel(hash->top, base + EMAC_GAUR);
  6111. +}
  6112. +
  6113. +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
  6114. + unsigned int entry_index)
  6115. +{
  6116. + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
  6117. + return;
  6118. +
  6119. + entry_index = entry_index - 1;
  6120. + if (entry_index < 1) {
  6121. + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
  6122. + writel((htonl(address->top) | 0x8808), base +
  6123. + EMAC_PHY_ADDR_HIGH);
  6124. + } else {
  6125. + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
  6126. + + EMAC_SMAC_0_0);
  6127. + writel((htonl(address->top) | 0x8808), base + ((entry_index -
  6128. + 1) * 8) + EMAC_SMAC_0_1);
  6129. + }
  6130. +}
  6131. +
  6132. +void gemac_clear_laddrN(void *base, unsigned int entry_index)
  6133. +{
  6134. + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
  6135. + return;
  6136. +
  6137. + entry_index = entry_index - 1;
  6138. + if (entry_index < 1) {
  6139. + writel(0, base + EMAC_PHY_ADDR_LOW);
  6140. + writel(0, base + EMAC_PHY_ADDR_HIGH);
  6141. + } else {
  6142. + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
  6143. + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
  6144. + }
  6145. +}
  6146. +
  6147. +/* Set the loopback mode of the MAC. This can be either no loopback for
  6148. + * normal operation, local loopback through MAC internal loopback module or PHY
  6149. + * loopback for external loopback through a PHY. This asserts the external
  6150. + * loop pin.
  6151. + *
  6152. + * @param[in] base GEMAC base address.
  6153. + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
  6154. + * Loopback,
  6155. + * LB_EXT - PHY Loopback.
  6156. + */
  6157. +void gemac_set_loop(void *base, enum mac_loop gem_loop)
  6158. +{
  6159. + pr_info("%s()\n", __func__);
  6160. + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
  6161. + EMAC_RCNTRL_REG));
  6162. +}
  6163. +
  6164. +/* GEMAC allow frames
  6165. + * @param[in] base GEMAC base address
  6166. + */
  6167. +void gemac_enable_copy_all(void *base)
  6168. +{
  6169. + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
  6170. + EMAC_RCNTRL_REG));
  6171. +}
  6172. +
  6173. +/* GEMAC do not allow frames
  6174. + * @param[in] base GEMAC base address
  6175. + */
  6176. +void gemac_disable_copy_all(void *base)
  6177. +{
  6178. + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
  6179. + EMAC_RCNTRL_REG));
  6180. +}
  6181. +
  6182. +/* GEMAC allow broadcast function.
  6183. + * @param[in] base GEMAC base address
  6184. + */
  6185. +void gemac_allow_broadcast(void *base)
  6186. +{
  6187. + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
  6188. + EMAC_RCNTRL_REG);
  6189. +}
  6190. +
  6191. +/* GEMAC no broadcast function.
  6192. + * @param[in] base GEMAC base address
  6193. + */
  6194. +void gemac_no_broadcast(void *base)
  6195. +{
  6196. + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
  6197. + EMAC_RCNTRL_REG);
  6198. +}
  6199. +
  6200. +/* GEMAC enable 1536 rx function.
  6201. + * @param[in] base GEMAC base address
  6202. + */
  6203. +void gemac_enable_1536_rx(void *base)
  6204. +{
  6205. + /* Set 1536 as Maximum frame length */
  6206. + writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
  6207. + EMAC_RCNTRL_REG);
  6208. +}
  6209. +
  6210. +/* GEMAC enable jumbo function.
  6211. + * @param[in] base GEMAC base address
  6212. + */
  6213. +void gemac_enable_rx_jmb(void *base)
  6214. +{
  6215. + writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
  6216. + + EMAC_RCNTRL_REG);
  6217. +}
  6218. +
  6219. +/* GEMAC enable stacked vlan function.
  6220. + * @param[in] base GEMAC base address
  6221. + */
  6222. +void gemac_enable_stacked_vlan(void *base)
  6223. +{
  6224. + /* MTIP doesn't support stacked vlan */
  6225. +}
  6226. +
  6227. +/* GEMAC enable pause rx function.
  6228. + * @param[in] base GEMAC base address
  6229. + */
  6230. +void gemac_enable_pause_rx(void *base)
  6231. +{
  6232. + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
  6233. + base + EMAC_RCNTRL_REG);
  6234. +}
  6235. +
  6236. +/* GEMAC disable pause rx function.
  6237. + * @param[in] base GEMAC base address
  6238. + */
  6239. +void gemac_disable_pause_rx(void *base)
  6240. +{
  6241. + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
  6242. + base + EMAC_RCNTRL_REG);
  6243. +}
  6244. +
  6245. +/* GEMAC enable pause tx function.
  6246. + * @param[in] base GEMAC base address
  6247. + */
  6248. +void gemac_enable_pause_tx(void *base)
  6249. +{
  6250. + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
  6251. +}
  6252. +
  6253. +/* GEMAC disable pause tx function.
  6254. + * @param[in] base GEMAC base address
  6255. + */
  6256. +void gemac_disable_pause_tx(void *base)
  6257. +{
  6258. + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
  6259. +}
  6260. +
  6261. +/* GEMAC wol configuration
  6262. + * @param[in] base GEMAC base address
  6263. + * @param[in] wol_conf WoL register configuration
  6264. + */
  6265. +void gemac_set_wol(void *base, u32 wol_conf)
  6266. +{
  6267. + u32 val = readl(base + EMAC_ECNTRL_REG);
  6268. +
  6269. + if (wol_conf)
  6270. + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
  6271. + else
  6272. + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
  6273. + writel(val, base + EMAC_ECNTRL_REG);
  6274. +}
  6275. +
  6276. +/* Sets Gemac bus width to 64bit
  6277. + * @param[in] base GEMAC base address
  6278. + * @param[in] width gemac bus width to be set possible values are 32/64/128
  6279. + */
  6280. +void gemac_set_bus_width(void *base, int width)
  6281. +{
  6282. +}
  6283. +
  6284. +/* Sets Gemac configuration.
  6285. + * @param[in] base GEMAC base address
  6286. + * @param[in] cfg GEMAC configuration
  6287. + */
  6288. +void gemac_set_config(void *base, struct gemac_cfg *cfg)
  6289. +{
  6290. + /*GEMAC config taken from VLSI */
  6291. + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
  6292. + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
  6293. + writel(0x00003fff, base + EMAC_TRUNC_FL);
  6294. + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
  6295. + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
  6296. +
  6297. + gemac_set_mode(base, cfg->mode);
  6298. +
  6299. + gemac_set_speed(base, cfg->speed);
  6300. +
  6301. + gemac_set_duplex(base, cfg->duplex);
  6302. +}
  6303. +
  6304. +/**************************** GPI ***************************/
  6305. +
  6306. +/* Initializes a GPI block.
  6307. + * @param[in] base GPI base address
  6308. + * @param[in] cfg GPI configuration
  6309. + */
  6310. +void gpi_init(void *base, struct gpi_cfg *cfg)
  6311. +{
  6312. + gpi_reset(base);
  6313. +
  6314. + gpi_disable(base);
  6315. +
  6316. + gpi_set_config(base, cfg);
  6317. +}
  6318. +
  6319. +/* Resets a GPI block.
  6320. + * @param[in] base GPI base address
  6321. + */
  6322. +void gpi_reset(void *base)
  6323. +{
  6324. + writel(CORE_SW_RESET, base + GPI_CTRL);
  6325. +}
  6326. +
  6327. +/* Enables a GPI block.
  6328. + * @param[in] base GPI base address
  6329. + */
  6330. +void gpi_enable(void *base)
  6331. +{
  6332. + writel(CORE_ENABLE, base + GPI_CTRL);
  6333. +}
  6334. +
  6335. +/* Disables a GPI block.
  6336. + * @param[in] base GPI base address
  6337. + */
  6338. +void gpi_disable(void *base)
  6339. +{
  6340. + writel(CORE_DISABLE, base + GPI_CTRL);
  6341. +}
  6342. +
  6343. +/* Sets the configuration of a GPI block.
  6344. + * @param[in] base GPI base address
  6345. + * @param[in] cfg GPI configuration
  6346. + */
  6347. +void gpi_set_config(void *base, struct gpi_cfg *cfg)
  6348. +{
  6349. + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
  6350. + + GPI_LMEM_ALLOC_ADDR);
  6351. + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
  6352. + + GPI_LMEM_FREE_ADDR);
  6353. + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
  6354. + + GPI_DDR_ALLOC_ADDR);
  6355. + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
  6356. + + GPI_DDR_FREE_ADDR);
  6357. + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
  6358. + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
  6359. + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
  6360. + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
  6361. + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
  6362. + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
  6363. + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
  6364. +
  6365. + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
  6366. + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
  6367. + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
  6368. + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
  6369. + writel(1, base + GPI_TOE_CHKSUM_EN);
  6370. +
  6371. + if (cfg->mtip_pause_reg) {
  6372. + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
  6373. + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
  6374. + }
  6375. +}
  6376. +
  6377. +/**************************** CLASSIFIER ***************************/
  6378. +
  6379. +/* Initializes CLASSIFIER block.
  6380. + * @param[in] cfg CLASSIFIER configuration
  6381. + */
  6382. +void class_init(struct class_cfg *cfg)
  6383. +{
  6384. + class_reset();
  6385. +
  6386. + class_disable();
  6387. +
  6388. + class_set_config(cfg);
  6389. +}
  6390. +
  6391. +/* Resets CLASSIFIER block.
  6392. + *
  6393. + */
  6394. +void class_reset(void)
  6395. +{
  6396. + writel(CORE_SW_RESET, CLASS_TX_CTRL);
  6397. +}
  6398. +
  6399. +/* Enables all CLASS-PE's cores.
  6400. + *
  6401. + */
  6402. +void class_enable(void)
  6403. +{
  6404. + writel(CORE_ENABLE, CLASS_TX_CTRL);
  6405. +}
  6406. +
  6407. +/* Disables all CLASS-PE's cores.
  6408. + *
  6409. + */
  6410. +void class_disable(void)
  6411. +{
  6412. + writel(CORE_DISABLE, CLASS_TX_CTRL);
  6413. +}
  6414. +
  6415. +/*
  6416. + * Sets the configuration of the CLASSIFIER block.
  6417. + * @param[in] cfg CLASSIFIER configuration
  6418. + */
  6419. +void class_set_config(struct class_cfg *cfg)
  6420. +{
  6421. + u32 val;
  6422. +
  6423. + /* Initialize route table */
  6424. + if (!cfg->resume)
  6425. + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
  6426. + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
  6427. +
  6428. +#if !defined(LS1012A_PFE_RESET_WA)
  6429. + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
  6430. +#endif
  6431. +
  6432. + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
  6433. + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
  6434. + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
  6435. + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
  6436. + CLASS_ROUTE_HASH_ENTRY_SIZE);
  6437. + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
  6438. + CLASS_HIF_PARSE);
  6439. +
  6440. + val = HASH_CRC_PORT_IP | QB2BUS_LE;
  6441. +
  6442. +#if defined(CONFIG_IP_ALIGNED)
  6443. + val |= IP_ALIGNED;
  6444. +#endif
  6445. +
  6446. + /*
  6447. + * Class PE packet steering will only work if TOE mode, bridge fetch or
  6448. + * route fetch are enabled (see class/qb_fet.v). Route fetch would
  6449. + * trigger additional memory copies (likely from DDR because of hash
  6450. + * table size, which cannot be reduced because PE software still
  6451. + * relies on hash value computed in HW), so when not in TOE mode we
  6452. + * simply enable HW bridge fetch even though we don't use it.
  6453. + */
  6454. + if (cfg->toe_mode)
  6455. + val |= CLASS_TOE;
  6456. + else
  6457. + val |= HW_BRIDGE_FETCH;
  6458. +
  6459. + writel(val, CLASS_ROUTE_MULTI);
  6460. +
  6461. + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
  6462. + CLASS_ROUTE_TABLE_BASE);
  6463. + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
  6464. + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
  6465. + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
  6466. + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
  6467. + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
  6468. +
  6469. + writel(23, CLASS_AFULL_THRES);
  6470. + writel(23, CLASS_TSQ_FIFO_THRES);
  6471. +
  6472. + writel(24, CLASS_MAX_BUF_CNT);
  6473. + writel(24, CLASS_TSQ_MAX_CNT);
  6474. +}
  6475. +
  6476. +/**************************** TMU ***************************/
  6477. +
  6478. +void tmu_reset(void)
  6479. +{
  6480. + writel(SW_RESET, TMU_CTRL);
  6481. +}
  6482. +
  6483. +/* Initializes TMU block.
  6484. + * @param[in] cfg TMU configuration
  6485. + */
  6486. +void tmu_init(struct tmu_cfg *cfg)
  6487. +{
  6488. + int q, phyno;
  6489. +
  6490. + tmu_disable(0xF);
  6491. + mdelay(10);
  6492. +
  6493. +#if !defined(LS1012A_PFE_RESET_WA)
  6494. + /* keep in soft reset */
  6495. + writel(SW_RESET, TMU_CTRL);
  6496. +#endif
  6497. + writel(0x3, TMU_SYS_GENERIC_CONTROL);
  6498. + writel(750, TMU_INQ_WATERMARK);
  6499. + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
  6500. + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
  6501. + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
  6502. + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
  6503. + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
  6504. + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
  6505. + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
  6506. + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
  6507. + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
  6508. + TMU_BMU_INQ_ADDR);
  6509. +
  6510. + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
  6511. + * enabling all 10
  6512. + * schedulers [9:0] of each TDQ
  6513. + */
  6514. + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
  6515. + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
  6516. +
  6517. +#if !defined(LS1012A_PFE_RESET_WA)
  6518. + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
  6519. +#endif
  6520. +
  6521. +#if !defined(LS1012A_PFE_RESET_WA)
  6522. + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
  6523. + /* Extra packet pointers will be stored from this address onwards */
  6524. +
  6525. + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
  6526. + writel(5, TMU_TDQ_IIFG_CFG);
  6527. + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
  6528. +
  6529. + writel(0x0, TMU_CTRL);
  6530. +
  6531. + /* MEM init */
  6532. + pr_info("%s: mem init\n", __func__);
  6533. + writel(MEM_INIT, TMU_CTRL);
  6534. +
  6535. + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
  6536. + ;
  6537. +
  6538. + /* LLM init */
  6539. + pr_info("%s: lmem init\n", __func__);
  6540. + writel(LLM_INIT, TMU_CTRL);
  6541. +
  6542. + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
  6543. + ;
  6544. +#endif
  6545. + /* set up each queue for tail drop */
  6546. + for (phyno = 0; phyno < 4; phyno++) {
  6547. + if (phyno == 2)
  6548. + continue;
  6549. + for (q = 0; q < 16; q++) {
  6550. + u32 qdepth;
  6551. +
  6552. + writel((phyno << 8) | q, TMU_TEQ_CTRL);
  6553. + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
  6554. +
  6555. + if (phyno == 3)
  6556. + qdepth = DEFAULT_TMU3_QDEPTH;
  6557. + else
  6558. + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
  6559. + DEFAULT_MAX_QDEPTH;
  6560. +
  6561. + /* LOG: 68855 */
  6562. + /*
  6563. + * The following is a workaround for the reordered
  6564. + * packet and BMU2 buffer leakage issue.
  6565. + */
  6566. + if (CHIP_REVISION() == 0)
  6567. + qdepth = 31;
  6568. +
  6569. + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
  6570. + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
  6571. + }
  6572. + }
  6573. +
  6574. +#ifdef CFG_LRO
  6575. + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
  6576. + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
  6577. + writel(0, TMU_TEQ_QCFG);
  6578. +#endif
  6579. +
  6580. + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
  6581. +
  6582. + writel(0x0, TMU_CTRL);
  6583. +}
  6584. +
  6585. +/* Enables TMU-PE cores.
  6586. + * @param[in] pe_mask TMU PE mask
  6587. + */
  6588. +void tmu_enable(u32 pe_mask)
  6589. +{
  6590. + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
  6591. +}
  6592. +
  6593. +/* Disables TMU cores.
  6594. + * @param[in] pe_mask TMU PE mask
  6595. + */
  6596. +void tmu_disable(u32 pe_mask)
  6597. +{
  6598. + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
  6599. +}
  6600. +
  6601. +/* This will return the tmu queue status
  6602. + * @param[in] if_id gem interface id or TMU index
  6603. + * @return returns the bit mask of busy queues, zero means all
  6604. + * queues are empty
  6605. + */
  6606. +u32 tmu_qstatus(u32 if_id)
  6607. +{
  6608. + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
  6609. + offsetof(struct pe_status, tmu_qstatus), 4));
  6610. +}
  6611. +
  6612. +u32 tmu_pkts_processed(u32 if_id)
  6613. +{
  6614. + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
  6615. + offsetof(struct pe_status, rx), 4));
  6616. +}
  6617. +
  6618. +/**************************** UTIL ***************************/
  6619. +
  6620. +/* Resets UTIL block.
  6621. + */
  6622. +void util_reset(void)
  6623. +{
  6624. + writel(CORE_SW_RESET, UTIL_TX_CTRL);
  6625. +}
  6626. +
  6627. +/* Initializes UTIL block.
  6628. + * @param[in] cfg UTIL configuration
  6629. + */
  6630. +void util_init(struct util_cfg *cfg)
  6631. +{
  6632. + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
  6633. +}
  6634. +
  6635. +/* Enables UTIL-PE core.
  6636. + *
  6637. + */
  6638. +void util_enable(void)
  6639. +{
  6640. + writel(CORE_ENABLE, UTIL_TX_CTRL);
  6641. +}
  6642. +
  6643. +/* Disables UTIL-PE core.
  6644. + *
  6645. + */
  6646. +void util_disable(void)
  6647. +{
  6648. + writel(CORE_DISABLE, UTIL_TX_CTRL);
  6649. +}
  6650. +
  6651. +/**************************** HIF ***************************/
  6652. +/* Initializes HIF copy block.
  6653. + *
  6654. + */
  6655. +void hif_init(void)
  6656. +{
  6657. + /*Initialize HIF registers*/
  6658. + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
  6659. + HIF_POLL_CTRL);
  6660. +}
  6661. +
  6662. +/* Enable hif tx DMA and interrupt
  6663. + *
  6664. + */
  6665. +void hif_tx_enable(void)
  6666. +{
  6667. + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
  6668. + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
  6669. + HIF_INT_ENABLE);
  6670. +}
  6671. +
  6672. +/* Disable hif tx DMA and interrupt
  6673. + *
  6674. + */
  6675. +void hif_tx_disable(void)
  6676. +{
  6677. + u32 hif_int;
  6678. +
  6679. + writel(0, HIF_TX_CTRL);
  6680. +
  6681. + hif_int = readl(HIF_INT_ENABLE);
  6682. + hif_int &= HIF_TXPKT_INT_EN;
  6683. + writel(hif_int, HIF_INT_ENABLE);
  6684. +}
  6685. +
  6686. +/* Enable hif rx DMA and interrupt
  6687. + *
  6688. + */
  6689. +void hif_rx_enable(void)
  6690. +{
  6691. + hif_rx_dma_start();
  6692. + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
  6693. + HIF_INT_ENABLE);
  6694. +}
  6695. +
  6696. +/* Disable hif rx DMA and interrupt
  6697. + *
  6698. + */
  6699. +void hif_rx_disable(void)
  6700. +{
  6701. + u32 hif_int;
  6702. +
  6703. + writel(0, HIF_RX_CTRL);
  6704. +
  6705. + hif_int = readl(HIF_INT_ENABLE);
  6706. + hif_int &= HIF_RXPKT_INT_EN;
  6707. + writel(hif_int, HIF_INT_ENABLE);
  6708. +}
  6709. --- /dev/null
  6710. +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
  6711. @@ -0,0 +1,1072 @@
  6712. +/*
  6713. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  6714. + * Copyright 2017 NXP
  6715. + *
  6716. + * This program is free software; you can redistribute it and/or modify
  6717. + * it under the terms of the GNU General Public License as published by
  6718. + * the Free Software Foundation; either version 2 of the License, or
  6719. + * (at your option) any later version.
  6720. + *
  6721. + * This program is distributed in the hope that it will be useful,
  6722. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  6723. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  6724. + * GNU General Public License for more details.
  6725. + *
  6726. + * You should have received a copy of the GNU General Public License
  6727. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  6728. + */
  6729. +
  6730. +#include <linux/kernel.h>
  6731. +#include <linux/interrupt.h>
  6732. +#include <linux/dma-mapping.h>
  6733. +#include <linux/dmapool.h>
  6734. +#include <linux/sched.h>
  6735. +#include <linux/module.h>
  6736. +#include <linux/list.h>
  6737. +#include <linux/kthread.h>
  6738. +#include <linux/slab.h>
  6739. +
  6740. +#include <linux/io.h>
  6741. +#include <asm/irq.h>
  6742. +
  6743. +#include "pfe_mod.h"
  6744. +
  6745. +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
  6746. +
  6747. +unsigned char napi_first_batch;
  6748. +
  6749. +static void pfe_tx_do_cleanup(unsigned long data);
  6750. +
  6751. +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
  6752. +{
  6753. + void *addr;
  6754. + dma_addr_t dma_addr;
  6755. + int err = 0;
  6756. +
  6757. + pr_info("%s\n", __func__);
  6758. + addr = dma_alloc_coherent(pfe->dev,
  6759. + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
  6760. + HIF_TX_DESC_NT * sizeof(struct hif_desc),
  6761. + &dma_addr, GFP_KERNEL);
  6762. +
  6763. + if (!addr) {
  6764. + pr_err("%s: Could not allocate buffer descriptors!\n"
  6765. + , __func__);
  6766. + err = -ENOMEM;
  6767. + goto err0;
  6768. + }
  6769. +
  6770. + hif->descr_baseaddr_p = dma_addr;
  6771. + hif->descr_baseaddr_v = addr;
  6772. + hif->rx_ring_size = HIF_RX_DESC_NT;
  6773. + hif->tx_ring_size = HIF_TX_DESC_NT;
  6774. +
  6775. + return 0;
  6776. +
  6777. +err0:
  6778. + return err;
  6779. +}
  6780. +
  6781. +#if defined(LS1012A_PFE_RESET_WA)
  6782. +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
  6783. +{
  6784. + int ii;
  6785. + struct hif_desc *desc = hif->rx_base;
  6786. +
  6787. + /*Mark all descriptors as LAST_BD */
  6788. + for (ii = 0; ii < hif->rx_ring_size; ii++) {
  6789. + desc->ctrl |= BD_CTRL_LAST_BD;
  6790. + desc++;
  6791. + }
  6792. +}
  6793. +
  6794. +struct class_rx_hdr_t {
  6795. + u32 next_ptr; /* ptr to the start of the first DDR buffer */
  6796. + u16 length; /* total packet length */
  6797. + u16 phyno; /* input physical port number */
  6798. + u32 status; /* gemac status bits */
  6799. + u32 status2; /* reserved for software usage */
  6800. +};
  6801. +
  6802. +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
  6803. + * except overflow
  6804. + */
  6805. +#define STATUS_BAD_FRAME_ERR BIT(16)
  6806. +#define STATUS_LENGTH_ERR BIT(17)
  6807. +#define STATUS_CRC_ERR BIT(18)
  6808. +#define STATUS_TOO_SHORT_ERR BIT(19)
  6809. +#define STATUS_TOO_LONG_ERR BIT(20)
  6810. +#define STATUS_CODE_ERR BIT(21)
  6811. +#define STATUS_MC_HASH_MATCH BIT(22)
  6812. +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
  6813. +#define STATUS_UNICAST_HASH_MATCH BIT(24)
  6814. +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
  6815. +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
  6816. +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
  6817. +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
  6818. +#define MIN_PKT_SIZE 64
  6819. +
  6820. +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
  6821. +{
  6822. + int i;
  6823. +
  6824. + for (i = 0; i < len; i += sizeof(u32)) {
  6825. + *dst = htonl(*src);
  6826. + dst++; src++;
  6827. + }
  6828. +}
  6829. +
  6830. +static void send_dummy_pkt_to_hif(void)
  6831. +{
  6832. + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
  6833. + u32 physaddr;
  6834. + struct class_rx_hdr_t local_hdr;
  6835. + static u32 dummy_pkt[] = {
  6836. + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
  6837. + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
  6838. + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
  6839. + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
  6840. +
  6841. + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
  6842. + if (!ddr_ptr)
  6843. + return;
  6844. +
  6845. + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
  6846. + if (!lmem_ptr)
  6847. + return;
  6848. +
  6849. + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
  6850. + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
  6851. +
  6852. + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
  6853. +
  6854. + local_hdr.phyno = htons(0); /* RX_PHY_0 */
  6855. + local_hdr.length = htons(MIN_PKT_SIZE);
  6856. +
  6857. + local_hdr.next_ptr = htonl((u32)physaddr);
  6858. + /*Mark checksum is correct */
  6859. + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
  6860. + STATUS_UDP_CHECKSUM_CORRECT |
  6861. + STATUS_TCP_CHECKSUM_CORRECT |
  6862. + STATUS_UNICAST_HASH_MATCH |
  6863. + STATUS_CUMULATIVE_ARC_HIT));
  6864. + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
  6865. + sizeof(local_hdr));
  6866. +
  6867. + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
  6868. + 0x40);
  6869. +
  6870. + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
  6871. +}
  6872. +
  6873. +void pfe_hif_rx_idle(struct pfe_hif *hif)
  6874. +{
  6875. + int hif_stop_loop = 10;
  6876. + u32 rx_status;
  6877. +
  6878. + pfe_hif_disable_rx_desc(hif);
  6879. + pr_info("Bringing hif to idle state...");
  6880. + writel(0, HIF_INT_ENABLE);
  6881. + /*If HIF Rx BDP is busy send a dummy packet */
  6882. + do {
  6883. + rx_status = readl(HIF_RX_STATUS);
  6884. + if (rx_status & BDP_CSR_RX_DMA_ACTV)
  6885. + send_dummy_pkt_to_hif();
  6886. +
  6887. + usleep_range(100, 150);
  6888. + } while (--hif_stop_loop);
  6889. +
  6890. + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
  6891. + pr_info("Failed\n");
  6892. + else
  6893. + pr_info("Done\n");
  6894. +}
  6895. +#endif
  6896. +
  6897. +static void pfe_hif_free_descr(struct pfe_hif *hif)
  6898. +{
  6899. + pr_info("%s\n", __func__);
  6900. +
  6901. + dma_free_coherent(pfe->dev,
  6902. + hif->rx_ring_size * sizeof(struct hif_desc) +
  6903. + hif->tx_ring_size * sizeof(struct hif_desc),
  6904. + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
  6905. +}
  6906. +
  6907. +void pfe_hif_desc_dump(struct pfe_hif *hif)
  6908. +{
  6909. + struct hif_desc *desc;
  6910. + unsigned long desc_p;
  6911. + int ii = 0;
  6912. +
  6913. + pr_info("%s\n", __func__);
  6914. +
  6915. + desc = hif->rx_base;
  6916. + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
  6917. + hif->descr_baseaddr_p);
  6918. +
  6919. + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
  6920. + for (ii = 0; ii < hif->rx_ring_size; ii++) {
  6921. + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
  6922. + readl(&desc->status), readl(&desc->ctrl),
  6923. + readl(&desc->data), readl(&desc->next));
  6924. + desc++;
  6925. + }
  6926. +
  6927. + desc = hif->tx_base;
  6928. + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
  6929. + hif->descr_baseaddr_p);
  6930. +
  6931. + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
  6932. + for (ii = 0; ii < hif->tx_ring_size; ii++) {
  6933. + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
  6934. + readl(&desc->status), readl(&desc->ctrl),
  6935. + readl(&desc->data), readl(&desc->next));
  6936. + desc++;
  6937. + }
  6938. +}
  6939. +
  6940. +/* pfe_hif_release_buffers */
  6941. +static void pfe_hif_release_buffers(struct pfe_hif *hif)
  6942. +{
  6943. + struct hif_desc *desc;
  6944. + int i = 0;
  6945. +
  6946. + hif->rx_base = hif->descr_baseaddr_v;
  6947. +
  6948. + pr_info("%s\n", __func__);
  6949. +
  6950. + /*Free Rx buffers */
  6951. + desc = hif->rx_base;
  6952. + for (i = 0; i < hif->rx_ring_size; i++) {
  6953. + if (readl(&desc->data)) {
  6954. + if ((i < hif->shm->rx_buf_pool_cnt) &&
  6955. + (!hif->shm->rx_buf_pool[i])) {
  6956. + /*
  6957. + * dma_unmap_single(hif->dev, desc->data,
  6958. + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
  6959. + */
  6960. + dma_unmap_single(hif->dev,
  6961. + DDR_PFE_TO_PHYS(
  6962. + readl(&desc->data)),
  6963. + hif->rx_buf_len[i],
  6964. + DMA_FROM_DEVICE);
  6965. + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
  6966. + } else {
  6967. + pr_err("%s: buffer pool already full\n"
  6968. + , __func__);
  6969. + }
  6970. + }
  6971. +
  6972. + writel(0, &desc->data);
  6973. + writel(0, &desc->status);
  6974. + writel(0, &desc->ctrl);
  6975. + desc++;
  6976. + }
  6977. +}
  6978. +
  6979. +/*
  6980. + * pfe_hif_init_buffers
  6981. + * This function initializes the HIF Rx/Tx ring descriptors and
  6982. + * initialize Rx queue with buffers.
  6983. + */
  6984. +static int pfe_hif_init_buffers(struct pfe_hif *hif)
  6985. +{
  6986. + struct hif_desc *desc, *first_desc_p;
  6987. + u32 data;
  6988. + int i = 0;
  6989. +
  6990. + pr_info("%s\n", __func__);
  6991. +
  6992. + /* Check enough Rx buffers available in the shared memory */
  6993. + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
  6994. + return -ENOMEM;
  6995. +
  6996. + hif->rx_base = hif->descr_baseaddr_v;
  6997. + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
  6998. +
  6999. + /*Initialize Rx descriptors */
  7000. + desc = hif->rx_base;
  7001. + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
  7002. +
  7003. + for (i = 0; i < hif->rx_ring_size; i++) {
  7004. + /* Initialize Rx buffers from the shared memory */
  7005. +
  7006. + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
  7007. + pfe_pkt_size, DMA_FROM_DEVICE);
  7008. + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
  7009. + hif->rx_buf_len[i] = pfe_pkt_size;
  7010. + hif->shm->rx_buf_pool[i] = NULL;
  7011. +
  7012. + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
  7013. + writel(DDR_PHYS_TO_PFE(data), &desc->data);
  7014. + } else {
  7015. + pr_err("%s : low on mem\n", __func__);
  7016. +
  7017. + goto err;
  7018. + }
  7019. +
  7020. + writel(0, &desc->status);
  7021. +
  7022. + /*
  7023. + * Ensure everything else is written to DDR before
  7024. + * writing bd->ctrl
  7025. + */
  7026. + wmb();
  7027. +
  7028. + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
  7029. + | BD_CTRL_DIR | BD_CTRL_DESC_EN
  7030. + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
  7031. +
  7032. + /* Chain descriptors */
  7033. + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
  7034. + desc++;
  7035. + }
  7036. +
  7037. + /* Overwrite last descriptor to chain it to first one*/
  7038. + desc--;
  7039. + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
  7040. +
  7041. + hif->rxtoclean_index = 0;
  7042. +
  7043. + /*Initialize Rx buffer descriptor ring base address */
  7044. + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
  7045. +
  7046. + hif->tx_base = hif->rx_base + hif->rx_ring_size;
  7047. + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
  7048. + hif->rx_ring_size;
  7049. + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
  7050. +
  7051. + /*Initialize tx descriptors */
  7052. + desc = hif->tx_base;
  7053. +
  7054. + for (i = 0; i < hif->tx_ring_size; i++) {
  7055. + /* Chain descriptors */
  7056. + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
  7057. + writel(0, &desc->ctrl);
  7058. + desc++;
  7059. + }
  7060. +
  7061. + /* Overwrite last descriptor to chain it to first one */
  7062. + desc--;
  7063. + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
  7064. + hif->txavail = hif->tx_ring_size;
  7065. + hif->txtosend = 0;
  7066. + hif->txtoclean = 0;
  7067. + hif->txtoflush = 0;
  7068. +
  7069. + /*Initialize Tx buffer descriptor ring base address */
  7070. + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
  7071. +
  7072. + return 0;
  7073. +
  7074. +err:
  7075. + pfe_hif_release_buffers(hif);
  7076. + return -ENOMEM;
  7077. +}
  7078. +
  7079. +/*
  7080. + * pfe_hif_client_register
  7081. + *
  7082. + * This function used to register a client driver with the HIF driver.
  7083. + *
  7084. + * Return value:
  7085. + * 0 - on Successful registration
  7086. + */
  7087. +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
  7088. + struct hif_client_shm *client_shm)
  7089. +{
  7090. + struct hif_client *client = &hif->client[client_id];
  7091. + u32 i, cnt;
  7092. + struct rx_queue_desc *rx_qbase;
  7093. + struct tx_queue_desc *tx_qbase;
  7094. + struct hif_rx_queue *rx_queue;
  7095. + struct hif_tx_queue *tx_queue;
  7096. + int err = 0;
  7097. +
  7098. + pr_info("%s\n", __func__);
  7099. +
  7100. + spin_lock_bh(&hif->tx_lock);
  7101. +
  7102. + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
  7103. + pr_err("%s: client %d already registered\n",
  7104. + __func__, client_id);
  7105. + err = -1;
  7106. + goto unlock;
  7107. + }
  7108. +
  7109. + memset(client, 0, sizeof(struct hif_client));
  7110. +
  7111. + /* Initialize client Rx queues baseaddr, size */
  7112. +
  7113. + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
  7114. + /* Check if client is requesting for more queues than supported */
  7115. + if (cnt > HIF_CLIENT_QUEUES_MAX)
  7116. + cnt = HIF_CLIENT_QUEUES_MAX;
  7117. +
  7118. + client->rx_qn = cnt;
  7119. + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
  7120. + for (i = 0; i < cnt; i++) {
  7121. + rx_queue = &client->rx_q[i];
  7122. + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
  7123. + rx_queue->size = client_shm->rx_qsize;
  7124. + rx_queue->write_idx = 0;
  7125. + }
  7126. +
  7127. + /* Initialize client Tx queues baseaddr, size */
  7128. + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
  7129. +
  7130. + /* Check if client is requesting for more queues than supported */
  7131. + if (cnt > HIF_CLIENT_QUEUES_MAX)
  7132. + cnt = HIF_CLIENT_QUEUES_MAX;
  7133. +
  7134. + client->tx_qn = cnt;
  7135. + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
  7136. + for (i = 0; i < cnt; i++) {
  7137. + tx_queue = &client->tx_q[i];
  7138. + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
  7139. + tx_queue->size = client_shm->tx_qsize;
  7140. + tx_queue->ack_idx = 0;
  7141. + }
  7142. +
  7143. + set_bit(client_id, &hif->shm->g_client_status[0]);
  7144. +
  7145. +unlock:
  7146. + spin_unlock_bh(&hif->tx_lock);
  7147. +
  7148. + return err;
  7149. +}
  7150. +
  7151. +/*
  7152. + * pfe_hif_client_unregister
  7153. + *
  7154. + * This function used to unregister a client from the HIF driver.
  7155. + *
  7156. + */
  7157. +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
  7158. +{
  7159. + pr_info("%s\n", __func__);
  7160. +
  7161. + /*
  7162. + * Mark client as no longer available (which prevents further packet
  7163. + * receive for this client)
  7164. + */
  7165. + spin_lock_bh(&hif->tx_lock);
  7166. +
  7167. + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
  7168. + pr_err("%s: client %d not registered\n", __func__,
  7169. + client_id);
  7170. +
  7171. + spin_unlock_bh(&hif->tx_lock);
  7172. + return;
  7173. + }
  7174. +
  7175. + clear_bit(client_id, &hif->shm->g_client_status[0]);
  7176. +
  7177. + spin_unlock_bh(&hif->tx_lock);
  7178. +}
  7179. +
  7180. +/*
  7181. + * client_put_rxpacket-
  7182. + * This functions puts the Rx pkt in the given client Rx queue.
  7183. + * It actually swap the Rx pkt in the client Rx descriptor buffer
  7184. + * and returns the free buffer from it.
  7185. + *
  7186. + * If the function returns NULL means client Rx queue is full and
  7187. + * packet couldn't send to client queue.
  7188. + */
  7189. +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
  7190. + u32 flags, u32 client_ctrl, u32 *rem_len)
  7191. +{
  7192. + void *free_pkt = NULL;
  7193. + struct rx_queue_desc *desc = queue->base + queue->write_idx;
  7194. +
  7195. + if (readl(&desc->ctrl) & CL_DESC_OWN) {
  7196. + if (page_mode) {
  7197. + int rem_page_size = PAGE_SIZE -
  7198. + PRESENT_OFST_IN_PAGE(pkt);
  7199. + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
  7200. + pfe_pkt_headroom);
  7201. + *rem_len = (rem_page_size - cur_pkt_size);
  7202. + if (*rem_len) {
  7203. + free_pkt = pkt + cur_pkt_size;
  7204. + get_page(virt_to_page(free_pkt));
  7205. + } else {
  7206. + free_pkt = (void
  7207. + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
  7208. + *rem_len = pfe_pkt_size;
  7209. + }
  7210. + } else {
  7211. + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
  7212. + GFP_DMA_PFE);
  7213. + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
  7214. + }
  7215. +
  7216. + if (free_pkt) {
  7217. + desc->data = pkt;
  7218. + desc->client_ctrl = client_ctrl;
  7219. + /*
  7220. + * Ensure everything else is written to DDR before
  7221. + * writing bd->ctrl
  7222. + */
  7223. + smp_wmb();
  7224. + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
  7225. + queue->write_idx = (queue->write_idx + 1)
  7226. + & (queue->size - 1);
  7227. +
  7228. + free_pkt += pfe_pkt_headroom;
  7229. + }
  7230. + }
  7231. +
  7232. + return free_pkt;
  7233. +}
  7234. +
  7235. +/*
  7236. + * pfe_hif_rx_process-
  7237. + * This function does pfe hif rx queue processing.
  7238. + * Dequeue packet from Rx queue and send it to corresponding client queue
  7239. + */
  7240. +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
  7241. +{
  7242. + struct hif_desc *desc;
  7243. + struct hif_hdr *pkt_hdr;
  7244. + struct __hif_hdr hif_hdr;
  7245. + void *free_buf;
  7246. + int rtc, len, rx_processed = 0;
  7247. + struct __hif_desc local_desc;
  7248. + int flags;
  7249. + unsigned int desc_p;
  7250. + unsigned int buf_size = 0;
  7251. +
  7252. + spin_lock_bh(&hif->lock);
  7253. +
  7254. + rtc = hif->rxtoclean_index;
  7255. +
  7256. + while (rx_processed < budget) {
  7257. + desc = hif->rx_base + rtc;
  7258. +
  7259. + __memcpy12(&local_desc, desc);
  7260. +
  7261. + /* ACK pending Rx interrupt */
  7262. + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
  7263. + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
  7264. +
  7265. + if (rx_processed == 0) {
  7266. + if (napi_first_batch == 1) {
  7267. + desc_p = hif->descr_baseaddr_p +
  7268. + ((unsigned long int)(desc) -
  7269. + (unsigned long
  7270. + int)hif->descr_baseaddr_v);
  7271. + napi_first_batch = 0;
  7272. + }
  7273. + }
  7274. +
  7275. + __memcpy12(&local_desc, desc);
  7276. +
  7277. + if (local_desc.ctrl & BD_CTRL_DESC_EN)
  7278. + break;
  7279. + }
  7280. +
  7281. + napi_first_batch = 0;
  7282. +
  7283. +#ifdef HIF_NAPI_STATS
  7284. + hif->napi_counters[NAPI_DESC_COUNT]++;
  7285. +#endif
  7286. + len = BD_BUF_LEN(local_desc.ctrl);
  7287. + /*
  7288. + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
  7289. + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
  7290. + */
  7291. + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
  7292. + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
  7293. +
  7294. + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
  7295. +
  7296. + /* Track last HIF header received */
  7297. + if (!hif->started) {
  7298. + hif->started = 1;
  7299. +
  7300. + __memcpy8(&hif_hdr, pkt_hdr);
  7301. +
  7302. + hif->qno = hif_hdr.hdr.q_num;
  7303. + hif->client_id = hif_hdr.hdr.client_id;
  7304. + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
  7305. + hif_hdr.hdr.client_ctrl;
  7306. + flags = CL_DESC_FIRST;
  7307. +
  7308. + } else {
  7309. + flags = 0;
  7310. + }
  7311. +
  7312. + if (local_desc.ctrl & BD_CTRL_LIFM)
  7313. + flags |= CL_DESC_LAST;
  7314. +
  7315. + /* Check for valid client id and still registered */
  7316. + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
  7317. + !(test_bit(hif->client_id,
  7318. + &hif->shm->g_client_status[0]))) {
  7319. + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
  7320. + __func__,
  7321. + hif->client_id,
  7322. + hif->qno);
  7323. +
  7324. + free_buf = pkt_hdr;
  7325. +
  7326. + goto pkt_drop;
  7327. + }
  7328. +
  7329. + /* Check to valid queue number */
  7330. + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
  7331. + pr_info("%s: packet with invalid queue: %d\n"
  7332. + , __func__, hif->qno);
  7333. + hif->qno = 0;
  7334. + }
  7335. +
  7336. + free_buf =
  7337. + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
  7338. + (void *)pkt_hdr, len, flags,
  7339. + hif->client_ctrl, &buf_size);
  7340. +
  7341. + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
  7342. + hif->qno);
  7343. +
  7344. + if (unlikely(!free_buf)) {
  7345. +#ifdef HIF_NAPI_STATS
  7346. + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
  7347. +#endif
  7348. + /*
  7349. + * If we want to keep in polling mode to retry later,
  7350. + * we need to tell napi that we consumed
  7351. + * the full budget or we will hit a livelock scenario.
  7352. + * The core code keeps this napi instance
  7353. + * at the head of the list and none of the other
  7354. + * instances get to run
  7355. + */
  7356. + rx_processed = budget;
  7357. +
  7358. + if (flags & CL_DESC_FIRST)
  7359. + hif->started = 0;
  7360. +
  7361. + break;
  7362. + }
  7363. +
  7364. +pkt_drop:
  7365. + /*Fill free buffer in the descriptor */
  7366. + hif->rx_buf_addr[rtc] = free_buf;
  7367. + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
  7368. + writel((DDR_PHYS_TO_PFE
  7369. + ((u32)dma_map_single(hif->dev,
  7370. + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
  7371. + &desc->data);
  7372. + /*
  7373. + * Ensure everything else is written to DDR before
  7374. + * writing bd->ctrl
  7375. + */
  7376. + wmb();
  7377. + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
  7378. + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
  7379. + &desc->ctrl);
  7380. +
  7381. + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
  7382. +
  7383. + if (local_desc.ctrl & BD_CTRL_LIFM) {
  7384. + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
  7385. + rx_processed++;
  7386. +
  7387. +#ifdef HIF_NAPI_STATS
  7388. + hif->napi_counters[NAPI_PACKET_COUNT]++;
  7389. +#endif
  7390. + }
  7391. + hif->started = 0;
  7392. + }
  7393. + }
  7394. +
  7395. + hif->rxtoclean_index = rtc;
  7396. + spin_unlock_bh(&hif->lock);
  7397. +
  7398. + /* we made some progress, re-start rx dma in case it stopped */
  7399. + hif_rx_dma_start();
  7400. +
  7401. + return rx_processed;
  7402. +}
  7403. +
  7404. +/*
  7405. + * client_ack_txpacket-
  7406. + * This function ack the Tx packet in the give client Tx queue by resetting
  7407. + * ownership bit in the descriptor.
  7408. + */
  7409. +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
  7410. + unsigned int q_no)
  7411. +{
  7412. + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
  7413. + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
  7414. +
  7415. + if (readl(&desc->ctrl) & CL_DESC_OWN) {
  7416. + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
  7417. + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
  7418. +
  7419. + return 0;
  7420. +
  7421. + } else {
  7422. + /*This should not happen */
  7423. + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
  7424. + hif->txtosend, hif->txtoclean, hif->txavail,
  7425. + client_id, q_no, queue, queue->ack_idx);
  7426. + WARN(1, "%s: doesn't own this descriptor", __func__);
  7427. + return 1;
  7428. + }
  7429. +}
  7430. +
  7431. +void __hif_tx_done_process(struct pfe_hif *hif, int count)
  7432. +{
  7433. + struct hif_desc *desc;
  7434. + struct hif_desc_sw *desc_sw;
  7435. + int ttc, tx_avl;
  7436. + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
  7437. +
  7438. + ttc = hif->txtoclean;
  7439. + tx_avl = hif->txavail;
  7440. +
  7441. + while ((tx_avl < hif->tx_ring_size) && count--) {
  7442. + desc = hif->tx_base + ttc;
  7443. +
  7444. + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
  7445. + break;
  7446. +
  7447. + desc_sw = &hif->tx_sw_queue[ttc];
  7448. +
  7449. + if (desc_sw->data) {
  7450. + /*
  7451. + * dmap_unmap_single(hif->dev, desc_sw->data,
  7452. + * desc_sw->len, DMA_TO_DEVICE);
  7453. + */
  7454. + dma_unmap_single(hif->dev, desc_sw->data,
  7455. + desc_sw->len, DMA_TO_DEVICE);
  7456. + }
  7457. +
  7458. + if (desc_sw->client_id > HIF_CLIENTS_MAX)
  7459. + pr_err("Invalid cl id %d\n", desc_sw->client_id);
  7460. +
  7461. + pkts_done[desc_sw->client_id]++;
  7462. +
  7463. + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
  7464. +
  7465. + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
  7466. + tx_avl++;
  7467. + }
  7468. +
  7469. + if (pkts_done[0])
  7470. + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
  7471. + if (pkts_done[1])
  7472. + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
  7473. +
  7474. + hif->txtoclean = ttc;
  7475. + hif->txavail = tx_avl;
  7476. +
  7477. + if (!count) {
  7478. + tasklet_schedule(&hif->tx_cleanup_tasklet);
  7479. + } else {
  7480. + /*Enable Tx done interrupt */
  7481. + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
  7482. + HIF_INT_ENABLE);
  7483. + }
  7484. +}
  7485. +
  7486. +static void pfe_tx_do_cleanup(unsigned long data)
  7487. +{
  7488. + struct pfe_hif *hif = (struct pfe_hif *)data;
  7489. +
  7490. + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
  7491. +
  7492. + hif_tx_done_process(hif, 64);
  7493. +}
  7494. +
  7495. +/*
  7496. + * __hif_xmit_pkt -
  7497. + * This function puts one packet in the HIF Tx queue
  7498. + */
  7499. +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
  7500. + q_no, void *data, u32 len, unsigned int flags)
  7501. +{
  7502. + struct hif_desc *desc;
  7503. + struct hif_desc_sw *desc_sw;
  7504. +
  7505. + desc = hif->tx_base + hif->txtosend;
  7506. + desc_sw = &hif->tx_sw_queue[hif->txtosend];
  7507. +
  7508. + desc_sw->len = len;
  7509. + desc_sw->client_id = client_id;
  7510. + desc_sw->q_no = q_no;
  7511. + desc_sw->flags = flags;
  7512. +
  7513. + if (flags & HIF_DONT_DMA_MAP) {
  7514. + desc_sw->data = 0;
  7515. + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
  7516. + } else {
  7517. + desc_sw->data = dma_map_single(hif->dev, data, len,
  7518. + DMA_TO_DEVICE);
  7519. + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
  7520. + }
  7521. +
  7522. + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
  7523. + hif->txavail--;
  7524. +
  7525. + if ((!((flags & HIF_DATA_VALID) && (flags &
  7526. + HIF_LAST_BUFFER))))
  7527. + goto skip_tx;
  7528. +
  7529. + /*
  7530. + * Ensure everything else is written to DDR before
  7531. + * writing bd->ctrl
  7532. + */
  7533. + wmb();
  7534. +
  7535. + do {
  7536. + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
  7537. + desc = hif->tx_base + hif->txtoflush;
  7538. +
  7539. + if (desc_sw->flags & HIF_LAST_BUFFER) {
  7540. + writel((BD_CTRL_LIFM |
  7541. + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
  7542. + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
  7543. + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
  7544. + &desc->ctrl);
  7545. + } else {
  7546. + writel((BD_CTRL_DESC_EN |
  7547. + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
  7548. + }
  7549. + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
  7550. + }
  7551. + while (hif->txtoflush != hif->txtosend)
  7552. + ;
  7553. +
  7554. +skip_tx:
  7555. + return;
  7556. +}
  7557. +
  7558. +static irqreturn_t wol_isr(int irq, void *dev_id)
  7559. +{
  7560. + pr_info("WoL\n");
  7561. + gemac_set_wol(EMAC1_BASE_ADDR, 0);
  7562. + gemac_set_wol(EMAC2_BASE_ADDR, 0);
  7563. + return IRQ_HANDLED;
  7564. +}
  7565. +
  7566. +/*
  7567. + * hif_isr-
  7568. + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
  7569. + */
  7570. +static irqreturn_t hif_isr(int irq, void *dev_id)
  7571. +{
  7572. + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
  7573. + int int_status;
  7574. + int int_enable_mask;
  7575. +
  7576. + /*Read hif interrupt source register */
  7577. + int_status = readl_relaxed(HIF_INT_SRC);
  7578. + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
  7579. +
  7580. + if ((int_status & HIF_INT) == 0)
  7581. + return IRQ_NONE;
  7582. +
  7583. + int_status &= ~(HIF_INT);
  7584. +
  7585. + if (int_status & HIF_RXPKT_INT) {
  7586. + int_status &= ~(HIF_RXPKT_INT);
  7587. + int_enable_mask &= ~(HIF_RXPKT_INT);
  7588. +
  7589. + napi_first_batch = 1;
  7590. +
  7591. + if (napi_schedule_prep(&hif->napi)) {
  7592. +#ifdef HIF_NAPI_STATS
  7593. + hif->napi_counters[NAPI_SCHED_COUNT]++;
  7594. +#endif
  7595. + __napi_schedule(&hif->napi);
  7596. + }
  7597. + }
  7598. +
  7599. + if (int_status & HIF_TXPKT_INT) {
  7600. + int_status &= ~(HIF_TXPKT_INT);
  7601. + int_enable_mask &= ~(HIF_TXPKT_INT);
  7602. + /*Schedule tx cleanup tassklet */
  7603. + tasklet_schedule(&hif->tx_cleanup_tasklet);
  7604. + }
  7605. +
  7606. + /*Disable interrupts, they will be enabled after they are serviced */
  7607. + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
  7608. +
  7609. + if (int_status) {
  7610. + pr_info("%s : Invalid interrupt : %d\n", __func__,
  7611. + int_status);
  7612. + writel(int_status, HIF_INT_SRC);
  7613. + }
  7614. +
  7615. + return IRQ_HANDLED;
  7616. +}
  7617. +
  7618. +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
  7619. +{
  7620. + unsigned int client_id = data1;
  7621. +
  7622. + if (client_id >= HIF_CLIENTS_MAX) {
  7623. + pr_err("%s: client id %d out of bounds\n", __func__,
  7624. + client_id);
  7625. + return;
  7626. + }
  7627. +
  7628. + switch (req) {
  7629. + case REQUEST_CL_REGISTER:
  7630. + /* Request for register a client */
  7631. + pr_info("%s: register client_id %d\n",
  7632. + __func__, client_id);
  7633. + pfe_hif_client_register(hif, client_id, (struct
  7634. + hif_client_shm *)&hif->shm->client[client_id]);
  7635. + break;
  7636. +
  7637. + case REQUEST_CL_UNREGISTER:
  7638. + pr_info("%s: unregister client_id %d\n",
  7639. + __func__, client_id);
  7640. +
  7641. + /* Request for unregister a client */
  7642. + pfe_hif_client_unregister(hif, client_id);
  7643. +
  7644. + break;
  7645. +
  7646. + default:
  7647. + pr_err("%s: unsupported request %d\n",
  7648. + __func__, req);
  7649. + break;
  7650. + }
  7651. +
  7652. + /*
  7653. + * Process client Tx queues
  7654. + * Currently we don't have checking for tx pending
  7655. + */
  7656. +}
  7657. +
  7658. +/*
  7659. + * pfe_hif_rx_poll
  7660. + * This function is NAPI poll function to process HIF Rx queue.
  7661. + */
  7662. +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
  7663. +{
  7664. + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
  7665. + int work_done;
  7666. +
  7667. +#ifdef HIF_NAPI_STATS
  7668. + hif->napi_counters[NAPI_POLL_COUNT]++;
  7669. +#endif
  7670. +
  7671. + work_done = pfe_hif_rx_process(hif, budget);
  7672. +
  7673. + if (work_done < budget) {
  7674. + napi_complete(napi);
  7675. + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
  7676. + HIF_INT_ENABLE);
  7677. + }
  7678. +#ifdef HIF_NAPI_STATS
  7679. + else
  7680. + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
  7681. +#endif
  7682. +
  7683. + return work_done;
  7684. +}
  7685. +
  7686. +/*
  7687. + * pfe_hif_init
  7688. + * This function initializes the baseaddresses and irq, etc.
  7689. + */
  7690. +int pfe_hif_init(struct pfe *pfe)
  7691. +{
  7692. + struct pfe_hif *hif = &pfe->hif;
  7693. + int err;
  7694. +
  7695. + pr_info("%s\n", __func__);
  7696. +
  7697. + hif->dev = pfe->dev;
  7698. + hif->irq = pfe->hif_irq;
  7699. +
  7700. + err = pfe_hif_alloc_descr(hif);
  7701. + if (err)
  7702. + goto err0;
  7703. +
  7704. + if (pfe_hif_init_buffers(hif)) {
  7705. + pr_err("%s: Could not initialize buffer descriptors\n"
  7706. + , __func__);
  7707. + err = -ENOMEM;
  7708. + goto err1;
  7709. + }
  7710. +
  7711. + /* Initialize NAPI for Rx processing */
  7712. + init_dummy_netdev(&hif->dummy_dev);
  7713. + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
  7714. + HIF_RX_POLL_WEIGHT);
  7715. + napi_enable(&hif->napi);
  7716. +
  7717. + spin_lock_init(&hif->tx_lock);
  7718. + spin_lock_init(&hif->lock);
  7719. +
  7720. + hif_init();
  7721. + hif_rx_enable();
  7722. + hif_tx_enable();
  7723. +
  7724. + /* Disable tx done interrupt */
  7725. + writel(HIF_INT_MASK, HIF_INT_ENABLE);
  7726. +
  7727. + gpi_enable(HGPI_BASE_ADDR);
  7728. +
  7729. + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
  7730. + if (err) {
  7731. + pr_err("%s: failed to get the hif IRQ = %d\n",
  7732. + __func__, hif->irq);
  7733. + goto err1;
  7734. + }
  7735. +
  7736. + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
  7737. + if (err) {
  7738. + pr_err("%s: failed to get the wol IRQ = %d\n",
  7739. + __func__, pfe->wol_irq);
  7740. + goto err1;
  7741. + }
  7742. +
  7743. + tasklet_init(&hif->tx_cleanup_tasklet,
  7744. + (void(*)(unsigned long))pfe_tx_do_cleanup,
  7745. + (unsigned long)hif);
  7746. +
  7747. + return 0;
  7748. +err1:
  7749. + pfe_hif_free_descr(hif);
  7750. +err0:
  7751. + return err;
  7752. +}
  7753. +
  7754. +/* pfe_hif_exit- */
  7755. +void pfe_hif_exit(struct pfe *pfe)
  7756. +{
  7757. + struct pfe_hif *hif = &pfe->hif;
  7758. +
  7759. + pr_info("%s\n", __func__);
  7760. +
  7761. + tasklet_kill(&hif->tx_cleanup_tasklet);
  7762. +
  7763. + spin_lock_bh(&hif->lock);
  7764. + hif->shm->g_client_status[0] = 0;
  7765. + /* Make sure all clients are disabled*/
  7766. + hif->shm->g_client_status[1] = 0;
  7767. +
  7768. + spin_unlock_bh(&hif->lock);
  7769. +
  7770. + /*Disable Rx/Tx */
  7771. + gpi_disable(HGPI_BASE_ADDR);
  7772. + hif_rx_disable();
  7773. + hif_tx_disable();
  7774. +
  7775. + napi_disable(&hif->napi);
  7776. + netif_napi_del(&hif->napi);
  7777. +
  7778. + free_irq(pfe->wol_irq, pfe);
  7779. + free_irq(hif->irq, hif);
  7780. +
  7781. + pfe_hif_release_buffers(hif);
  7782. + pfe_hif_free_descr(hif);
  7783. +}
  7784. --- /dev/null
  7785. +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
  7786. @@ -0,0 +1,211 @@
  7787. +/*
  7788. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  7789. + * Copyright 2017 NXP
  7790. + *
  7791. + * This program is free software; you can redistribute it and/or modify
  7792. + * it under the terms of the GNU General Public License as published by
  7793. + * the Free Software Foundation; either version 2 of the License, or
  7794. + * (at your option) any later version.
  7795. + *
  7796. + * This program is distributed in the hope that it will be useful,
  7797. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7798. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  7799. + * GNU General Public License for more details.
  7800. + *
  7801. + * You should have received a copy of the GNU General Public License
  7802. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  7803. + */
  7804. +
  7805. +#ifndef _PFE_HIF_H_
  7806. +#define _PFE_HIF_H_
  7807. +
  7808. +#include <linux/netdevice.h>
  7809. +
  7810. +#define HIF_NAPI_STATS
  7811. +
  7812. +#define HIF_CLIENT_QUEUES_MAX 16
  7813. +#define HIF_RX_POLL_WEIGHT 64
  7814. +
  7815. +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
  7816. +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
  7817. +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
  7818. + & HIF_RX_PKT_MIN_SIZE_MASK)
  7819. +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
  7820. + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
  7821. +
  7822. +enum {
  7823. + NAPI_SCHED_COUNT = 0,
  7824. + NAPI_POLL_COUNT,
  7825. + NAPI_PACKET_COUNT,
  7826. + NAPI_DESC_COUNT,
  7827. + NAPI_FULL_BUDGET_COUNT,
  7828. + NAPI_CLIENT_FULL_COUNT,
  7829. + NAPI_MAX_COUNT
  7830. +};
  7831. +
  7832. +/*
  7833. + * HIF_TX_DESC_NT value should be always greter than 4,
  7834. + * Otherwise HIF_TX_POLL_MARK will become zero.
  7835. + */
  7836. +#define HIF_RX_DESC_NT 256
  7837. +#define HIF_TX_DESC_NT 2048
  7838. +
  7839. +#define HIF_FIRST_BUFFER BIT(0)
  7840. +#define HIF_LAST_BUFFER BIT(1)
  7841. +#define HIF_DONT_DMA_MAP BIT(2)
  7842. +#define HIF_DATA_VALID BIT(3)
  7843. +#define HIF_TSO BIT(4)
  7844. +
  7845. +enum {
  7846. + PFE_CL_GEM0 = 0,
  7847. + PFE_CL_GEM1,
  7848. + HIF_CLIENTS_MAX
  7849. +};
  7850. +
  7851. +/*structure to store client queue info */
  7852. +struct hif_rx_queue {
  7853. + struct rx_queue_desc *base;
  7854. + u32 size;
  7855. + u32 write_idx;
  7856. +};
  7857. +
  7858. +struct hif_tx_queue {
  7859. + struct tx_queue_desc *base;
  7860. + u32 size;
  7861. + u32 ack_idx;
  7862. +};
  7863. +
  7864. +/*Structure to store the client info */
  7865. +struct hif_client {
  7866. + int rx_qn;
  7867. + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
  7868. + int tx_qn;
  7869. + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
  7870. +};
  7871. +
  7872. +/*HIF hardware buffer descriptor */
  7873. +struct hif_desc {
  7874. + u32 ctrl;
  7875. + u32 status;
  7876. + u32 data;
  7877. + u32 next;
  7878. +};
  7879. +
  7880. +struct __hif_desc {
  7881. + u32 ctrl;
  7882. + u32 status;
  7883. + u32 data;
  7884. +};
  7885. +
  7886. +struct hif_desc_sw {
  7887. + dma_addr_t data;
  7888. + u16 len;
  7889. + u8 client_id;
  7890. + u8 q_no;
  7891. + u16 flags;
  7892. +};
  7893. +
  7894. +struct hif_hdr {
  7895. + u8 client_id;
  7896. + u8 q_num;
  7897. + u16 client_ctrl;
  7898. + u16 client_ctrl1;
  7899. +};
  7900. +
  7901. +struct __hif_hdr {
  7902. + union {
  7903. + struct hif_hdr hdr;
  7904. + u32 word[2];
  7905. + };
  7906. +};
  7907. +
  7908. +struct hif_ipsec_hdr {
  7909. + u16 sa_handle[2];
  7910. +} __packed;
  7911. +
  7912. +/* HIF_CTRL_TX... defines */
  7913. +#define HIF_CTRL_TX_CHECKSUM BIT(2)
  7914. +
  7915. +/* HIF_CTRL_RX... defines */
  7916. +#define HIF_CTRL_RX_OFFSET_OFST (24)
  7917. +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
  7918. +#define HIF_CTRL_RX_CONTINUED BIT(1)
  7919. +
  7920. +struct pfe_hif {
  7921. + /* To store registered clients in hif layer */
  7922. + struct hif_client client[HIF_CLIENTS_MAX];
  7923. + struct hif_shm *shm;
  7924. + int irq;
  7925. +
  7926. + void *descr_baseaddr_v;
  7927. + unsigned long descr_baseaddr_p;
  7928. +
  7929. + struct hif_desc *rx_base;
  7930. + u32 rx_ring_size;
  7931. + u32 rxtoclean_index;
  7932. + void *rx_buf_addr[HIF_RX_DESC_NT];
  7933. + int rx_buf_len[HIF_RX_DESC_NT];
  7934. + unsigned int qno;
  7935. + unsigned int client_id;
  7936. + unsigned int client_ctrl;
  7937. + unsigned int started;
  7938. +
  7939. + struct hif_desc *tx_base;
  7940. + u32 tx_ring_size;
  7941. + u32 txtosend;
  7942. + u32 txtoclean;
  7943. + u32 txavail;
  7944. + u32 txtoflush;
  7945. + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
  7946. +
  7947. +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
  7948. + spinlock_t tx_lock;
  7949. +/* lock synchronizes hif rx queue processing */
  7950. + spinlock_t lock;
  7951. + struct net_device dummy_dev;
  7952. + struct napi_struct napi;
  7953. + struct device *dev;
  7954. +
  7955. +#ifdef HIF_NAPI_STATS
  7956. + unsigned int napi_counters[NAPI_MAX_COUNT];
  7957. +#endif
  7958. + struct tasklet_struct tx_cleanup_tasklet;
  7959. +};
  7960. +
  7961. +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
  7962. + q_no, void *data, u32 len, unsigned int flags);
  7963. +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
  7964. + void *data, unsigned int len);
  7965. +void __hif_tx_done_process(struct pfe_hif *hif, int count);
  7966. +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
  7967. + data2);
  7968. +int pfe_hif_init(struct pfe *pfe);
  7969. +void pfe_hif_exit(struct pfe *pfe);
  7970. +void pfe_hif_rx_idle(struct pfe_hif *hif);
  7971. +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
  7972. +{
  7973. + spin_lock_bh(&hif->tx_lock);
  7974. + __hif_tx_done_process(hif, count);
  7975. + spin_unlock_bh(&hif->tx_lock);
  7976. +}
  7977. +
  7978. +static inline void hif_tx_lock(struct pfe_hif *hif)
  7979. +{
  7980. + spin_lock_bh(&hif->tx_lock);
  7981. +}
  7982. +
  7983. +static inline void hif_tx_unlock(struct pfe_hif *hif)
  7984. +{
  7985. + spin_unlock_bh(&hif->tx_lock);
  7986. +}
  7987. +
  7988. +static inline int __hif_tx_avail(struct pfe_hif *hif)
  7989. +{
  7990. + return hif->txavail;
  7991. +}
  7992. +
  7993. +#define __memcpy8(dst, src) memcpy(dst, src, 8)
  7994. +#define __memcpy12(dst, src) memcpy(dst, src, 12)
  7995. +#define __memcpy(dst, src, len) memcpy(dst, src, len)
  7996. +
  7997. +#endif /* _PFE_HIF_H_ */
  7998. --- /dev/null
  7999. +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
  8000. @@ -0,0 +1,640 @@
  8001. +/*
  8002. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  8003. + * Copyright 2017 NXP
  8004. + *
  8005. + * This program is free software; you can redistribute it and/or modify
  8006. + * it under the terms of the GNU General Public License as published by
  8007. + * the Free Software Foundation; either version 2 of the License, or
  8008. + * (at your option) any later version.
  8009. + *
  8010. + * This program is distributed in the hope that it will be useful,
  8011. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8012. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8013. + * GNU General Public License for more details.
  8014. + *
  8015. + * You should have received a copy of the GNU General Public License
  8016. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  8017. + */
  8018. +
  8019. +#include <linux/version.h>
  8020. +#include <linux/kernel.h>
  8021. +#include <linux/slab.h>
  8022. +#include <linux/interrupt.h>
  8023. +#include <linux/workqueue.h>
  8024. +#include <linux/dma-mapping.h>
  8025. +#include <linux/dmapool.h>
  8026. +#include <linux/sched.h>
  8027. +#include <linux/skbuff.h>
  8028. +#include <linux/moduleparam.h>
  8029. +#include <linux/cpu.h>
  8030. +
  8031. +#include "pfe_mod.h"
  8032. +#include "pfe_hif.h"
  8033. +#include "pfe_hif_lib.h"
  8034. +
  8035. +unsigned int lro_mode;
  8036. +unsigned int page_mode;
  8037. +unsigned int tx_qos = 1;
  8038. +module_param(tx_qos, uint, 0444);
  8039. +MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
  8040. + "1: enable (default), guarantee no packet drop at TMU level\n");
  8041. +unsigned int pfe_pkt_size;
  8042. +unsigned int pfe_pkt_headroom;
  8043. +unsigned int emac_txq_cnt;
  8044. +
  8045. +/*
  8046. + * @pfe_hal_lib.c.
  8047. + * Common functions used by HIF client drivers
  8048. + */
  8049. +
  8050. +/*HIF shared memory Global variable */
  8051. +struct hif_shm ghif_shm;
  8052. +
  8053. +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
  8054. + * This function should be called after pfe_hif_exit
  8055. + *
  8056. + * @param[in] hif_shm Shared memory address location in DDR
  8057. + */
  8058. +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
  8059. +{
  8060. + int i;
  8061. + void *pkt;
  8062. +
  8063. + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
  8064. + pkt = hif_shm->rx_buf_pool[i];
  8065. + if (pkt) {
  8066. + hif_shm->rx_buf_pool[i] = NULL;
  8067. + pkt -= pfe_pkt_headroom;
  8068. +
  8069. + if (page_mode)
  8070. + put_page(virt_to_page(pkt));
  8071. + else
  8072. + kfree(pkt);
  8073. + }
  8074. + }
  8075. +}
  8076. +
  8077. +/* Initialize shared memory used between HIF driver and clients,
  8078. + * allocate rx_buffer_pool required for HIF Rx descriptors.
  8079. + * This function should be called before initializing HIF driver.
  8080. + *
  8081. + * @param[in] hif_shm Shared memory address location in DDR
  8082. + * @rerurn 0 - on succes, <0 on fail to initialize
  8083. + */
  8084. +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
  8085. +{
  8086. + int i;
  8087. + void *pkt;
  8088. +
  8089. + memset(hif_shm, 0, sizeof(struct hif_shm));
  8090. + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
  8091. +
  8092. + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
  8093. + if (page_mode) {
  8094. + pkt = (void *)__get_free_page(GFP_KERNEL |
  8095. + GFP_DMA_PFE);
  8096. + } else {
  8097. + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
  8098. + }
  8099. +
  8100. + if (pkt)
  8101. + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
  8102. + else
  8103. + goto err0;
  8104. + }
  8105. +
  8106. + return 0;
  8107. +
  8108. +err0:
  8109. + pr_err("%s Low memory\n", __func__);
  8110. + pfe_hif_shm_clean(hif_shm);
  8111. + return -ENOMEM;
  8112. +}
  8113. +
  8114. +/*This function sends indication to HIF driver
  8115. + *
  8116. + * @param[in] hif hif context
  8117. + */
  8118. +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
  8119. + data2)
  8120. +{
  8121. + hif_process_client_req(hif, req, data1, data2);
  8122. +}
  8123. +
  8124. +void hif_lib_indicate_client(int client_id, int event_type, int qno)
  8125. +{
  8126. + struct hif_client_s *client = pfe->hif_client[client_id];
  8127. +
  8128. + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
  8129. + HIF_CLIENT_QUEUES_MAX))
  8130. + return;
  8131. +
  8132. + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
  8133. + client->event_handler(client->priv, event_type, qno);
  8134. +}
  8135. +
  8136. +/*This function releases Rx queue descriptors memory and pre-filled buffers
  8137. + *
  8138. + * @param[in] client hif_client context
  8139. + */
  8140. +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
  8141. +{
  8142. + struct rx_queue_desc *desc;
  8143. + int qno, ii;
  8144. + void *buf;
  8145. +
  8146. + for (qno = 0; qno < client->rx_qn; qno++) {
  8147. + desc = client->rx_q[qno].base;
  8148. +
  8149. + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
  8150. + buf = (void *)desc->data;
  8151. + if (buf) {
  8152. + buf -= pfe_pkt_headroom;
  8153. +
  8154. + if (page_mode)
  8155. + free_page((unsigned long)buf);
  8156. + else
  8157. + kfree(buf);
  8158. +
  8159. + desc->ctrl = 0;
  8160. + }
  8161. +
  8162. + desc++;
  8163. + }
  8164. + }
  8165. +
  8166. + kfree(client->rx_qbase);
  8167. +}
  8168. +
  8169. +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
  8170. + * with buffers.
  8171. + * @param[in] client client context
  8172. + * @param[in] q_size size of the rxQ, all queues are of same size
  8173. + */
  8174. +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
  8175. + q_size)
  8176. +{
  8177. + struct rx_queue_desc *desc;
  8178. + struct hif_client_rx_queue *queue;
  8179. + int ii, qno;
  8180. +
  8181. + /*Allocate memory for the client queues */
  8182. + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
  8183. + rx_queue_desc), GFP_KERNEL);
  8184. + if (!client->rx_qbase)
  8185. + goto err;
  8186. +
  8187. + for (qno = 0; qno < client->rx_qn; qno++) {
  8188. + queue = &client->rx_q[qno];
  8189. +
  8190. + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
  8191. + rx_queue_desc);
  8192. + queue->size = q_size;
  8193. + queue->read_idx = 0;
  8194. + queue->write_idx = 0;
  8195. +
  8196. + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
  8197. + queue->base, queue->size);
  8198. + }
  8199. +
  8200. + for (qno = 0; qno < client->rx_qn; qno++) {
  8201. + queue = &client->rx_q[qno];
  8202. + desc = queue->base;
  8203. +
  8204. + for (ii = 0; ii < queue->size; ii++) {
  8205. + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
  8206. + CL_DESC_OWN;
  8207. + desc++;
  8208. + }
  8209. + }
  8210. +
  8211. + return 0;
  8212. +
  8213. +err:
  8214. + return 1;
  8215. +}
  8216. +
  8217. +
  8218. +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
  8219. +{
  8220. + pr_debug("%s\n", __func__);
  8221. +
  8222. + /*
  8223. + * Check if there are any pending packets. Client must flush the tx
  8224. + * queues before unregistering, by calling by calling
  8225. + * hif_lib_tx_get_next_complete()
  8226. + *
  8227. + * Hif no longer calls since we are no longer registered
  8228. + */
  8229. + if (queue->tx_pending)
  8230. + pr_err("%s: pending transmit packets\n", __func__);
  8231. +}
  8232. +
  8233. +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
  8234. +{
  8235. + int qno;
  8236. +
  8237. + pr_debug("%s\n", __func__);
  8238. +
  8239. + for (qno = 0; qno < client->tx_qn; qno++)
  8240. + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
  8241. +
  8242. + kfree(client->tx_qbase);
  8243. +}
  8244. +
  8245. +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
  8246. + q_size)
  8247. +{
  8248. + struct hif_client_tx_queue *queue;
  8249. + int qno;
  8250. +
  8251. + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
  8252. + tx_queue_desc), GFP_KERNEL);
  8253. + if (!client->tx_qbase)
  8254. + return 1;
  8255. +
  8256. + for (qno = 0; qno < client->tx_qn; qno++) {
  8257. + queue = &client->tx_q[qno];
  8258. +
  8259. + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
  8260. + tx_queue_desc);
  8261. + queue->size = q_size;
  8262. + queue->read_idx = 0;
  8263. + queue->write_idx = 0;
  8264. + queue->tx_pending = 0;
  8265. + queue->nocpy_flag = 0;
  8266. + queue->prev_tmu_tx_pkts = 0;
  8267. + queue->done_tmu_tx_pkts = 0;
  8268. +
  8269. + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
  8270. + queue->base, queue->size);
  8271. + }
  8272. +
  8273. + return 0;
  8274. +}
  8275. +
  8276. +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
  8277. +{
  8278. + return 0;
  8279. +}
  8280. +
  8281. +int hif_lib_client_register(struct hif_client_s *client)
  8282. +{
  8283. + struct hif_shm *hif_shm;
  8284. + struct hif_client_shm *client_shm;
  8285. + int err, i;
  8286. + /* int loop_cnt = 0; */
  8287. +
  8288. + pr_debug("%s\n", __func__);
  8289. +
  8290. + /*Allocate memory before spin_lock*/
  8291. + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
  8292. + err = -ENOMEM;
  8293. + goto err_rx;
  8294. + }
  8295. +
  8296. + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
  8297. + err = -ENOMEM;
  8298. + goto err_tx;
  8299. + }
  8300. +
  8301. + spin_lock_bh(&pfe->hif.lock);
  8302. + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
  8303. + (pfe->hif_client[client->id])) {
  8304. + err = -EINVAL;
  8305. + goto err;
  8306. + }
  8307. +
  8308. + hif_shm = client->pfe->hif.shm;
  8309. +
  8310. + if (!client->event_handler)
  8311. + client->event_handler = hif_lib_event_dummy;
  8312. +
  8313. + /*Initialize client specific shared memory */
  8314. + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
  8315. + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
  8316. + client_shm->rx_qsize = client->rx_qsize;
  8317. + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
  8318. + client_shm->tx_qsize = client->tx_qsize;
  8319. + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
  8320. + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
  8321. + /* spin_lock_init(&client->rx_lock); */
  8322. +
  8323. + for (i = 0; i < HIF_EVENT_MAX; i++) {
  8324. + client->queue_mask[i] = 0; /*
  8325. + * By default all events are
  8326. + * unmasked
  8327. + */
  8328. + }
  8329. +
  8330. + /*Indicate to HIF driver*/
  8331. + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
  8332. +
  8333. + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
  8334. + __func__, client, client->id, client->tx_qsize,
  8335. + client->rx_qsize);
  8336. +
  8337. + client->cpu_id = -1;
  8338. +
  8339. + pfe->hif_client[client->id] = client;
  8340. + spin_unlock_bh(&pfe->hif.lock);
  8341. +
  8342. + return 0;
  8343. +
  8344. +err:
  8345. + spin_unlock_bh(&pfe->hif.lock);
  8346. + hif_lib_client_release_tx_buffers(client);
  8347. +
  8348. +err_tx:
  8349. + hif_lib_client_release_rx_buffers(client);
  8350. +
  8351. +err_rx:
  8352. + return err;
  8353. +}
  8354. +
  8355. +int hif_lib_client_unregister(struct hif_client_s *client)
  8356. +{
  8357. + struct pfe *pfe = client->pfe;
  8358. + u32 client_id = client->id;
  8359. +
  8360. + pr_info(
  8361. + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
  8362. + , __func__, client, client->id, client->tx_qsize,
  8363. + client->rx_qsize);
  8364. +
  8365. + spin_lock_bh(&pfe->hif.lock);
  8366. + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
  8367. +
  8368. + hif_lib_client_release_tx_buffers(client);
  8369. + hif_lib_client_release_rx_buffers(client);
  8370. + pfe->hif_client[client_id] = NULL;
  8371. + spin_unlock_bh(&pfe->hif.lock);
  8372. +
  8373. + return 0;
  8374. +}
  8375. +
  8376. +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
  8377. + int qno)
  8378. +{
  8379. + struct hif_client_rx_queue *queue = &client->rx_q[qno];
  8380. + struct rx_queue_desc *desc = queue->base + queue->read_idx;
  8381. +
  8382. + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
  8383. + pr_debug("%s: Unsupported event : %d queue number : %d\n",
  8384. + __func__, event, qno);
  8385. + return -1;
  8386. + }
  8387. +
  8388. + test_and_clear_bit(qno, &client->queue_mask[event]);
  8389. +
  8390. + switch (event) {
  8391. + case EVENT_RX_PKT_IND:
  8392. + if (!(desc->ctrl & CL_DESC_OWN))
  8393. + hif_lib_indicate_client(client->id,
  8394. + EVENT_RX_PKT_IND, qno);
  8395. + break;
  8396. +
  8397. + case EVENT_HIGH_RX_WM:
  8398. + case EVENT_TXDONE_IND:
  8399. + default:
  8400. + break;
  8401. + }
  8402. +
  8403. + return 0;
  8404. +}
  8405. +
  8406. +/*
  8407. + * This function gets one packet from the specified client queue
  8408. + * It also refill the rx buffer
  8409. + */
  8410. +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
  8411. + *ofst, unsigned int *rx_ctrl,
  8412. + unsigned int *desc_ctrl, void **priv_data)
  8413. +{
  8414. + struct hif_client_rx_queue *queue = &client->rx_q[qno];
  8415. + struct rx_queue_desc *desc;
  8416. + void *pkt = NULL;
  8417. +
  8418. + /*
  8419. + * Following lock is to protect rx queue access from,
  8420. + * hif_lib_event_handler_start.
  8421. + * In general below lock is not required, because hif_lib_xmit_pkt and
  8422. + * hif_lib_event_handler_start are called from napi poll and which is
  8423. + * not re-entrant. But if some client use in different way this lock is
  8424. + * required.
  8425. + */
  8426. + /*spin_lock_irqsave(&client->rx_lock, flags); */
  8427. + desc = queue->base + queue->read_idx;
  8428. + if (!(desc->ctrl & CL_DESC_OWN)) {
  8429. + pkt = desc->data - pfe_pkt_headroom;
  8430. +
  8431. + *rx_ctrl = desc->client_ctrl;
  8432. + *desc_ctrl = desc->ctrl;
  8433. +
  8434. + if (desc->ctrl & CL_DESC_FIRST) {
  8435. + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
  8436. +
  8437. + if (size) {
  8438. + size += PFE_PARSE_INFO_SIZE;
  8439. + *len = CL_DESC_BUF_LEN(desc->ctrl) -
  8440. + PFE_PKT_HEADER_SZ - size;
  8441. + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
  8442. + + size;
  8443. + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
  8444. + } else {
  8445. + *len = CL_DESC_BUF_LEN(desc->ctrl) -
  8446. + PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
  8447. + *ofst = pfe_pkt_headroom
  8448. + + PFE_PKT_HEADER_SZ
  8449. + + PFE_PARSE_INFO_SIZE;
  8450. + *priv_data = NULL;
  8451. + }
  8452. +
  8453. + } else {
  8454. + *len = CL_DESC_BUF_LEN(desc->ctrl);
  8455. + *ofst = pfe_pkt_headroom;
  8456. + }
  8457. +
  8458. + /*
  8459. + * Needed so we don't free a buffer/page
  8460. + * twice on module_exit
  8461. + */
  8462. + desc->data = NULL;
  8463. +
  8464. + /*
  8465. + * Ensure everything else is written to DDR before
  8466. + * writing bd->ctrl
  8467. + */
  8468. + smp_wmb();
  8469. +
  8470. + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
  8471. + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
  8472. + }
  8473. +
  8474. + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
  8475. + return pkt;
  8476. +}
  8477. +
  8478. +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
  8479. + client_id, unsigned int qno,
  8480. + u32 client_ctrl)
  8481. +{
  8482. + /* Optimize the write since the destinaton may be non-cacheable */
  8483. + if (!((unsigned long)pkt_hdr & 0x3)) {
  8484. + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
  8485. + client_id;
  8486. + } else {
  8487. + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
  8488. + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
  8489. + }
  8490. +}
  8491. +
  8492. +/*This function puts the given packet in the specific client queue */
  8493. +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
  8494. + *data, unsigned int len, u32 client_ctrl,
  8495. + unsigned int flags, void *client_data)
  8496. +{
  8497. + struct hif_client_tx_queue *queue = &client->tx_q[qno];
  8498. + struct tx_queue_desc *desc = queue->base + queue->write_idx;
  8499. +
  8500. + /* First buffer */
  8501. + if (flags & HIF_FIRST_BUFFER) {
  8502. + data -= sizeof(struct hif_hdr);
  8503. + len += sizeof(struct hif_hdr);
  8504. +
  8505. + hif_hdr_write(data, client->id, qno, client_ctrl);
  8506. + }
  8507. +
  8508. + desc->data = client_data;
  8509. + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
  8510. +
  8511. + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
  8512. +
  8513. + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
  8514. + queue->tx_pending++;
  8515. + queue->jiffies_last_packet = jiffies;
  8516. +}
  8517. +
  8518. +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
  8519. + unsigned int *flags, int count)
  8520. +{
  8521. + struct hif_client_tx_queue *queue = &client->tx_q[qno];
  8522. + struct tx_queue_desc *desc = queue->base + queue->read_idx;
  8523. +
  8524. + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
  8525. + queue->read_idx, queue->tx_pending);
  8526. +
  8527. + if (!queue->tx_pending)
  8528. + return NULL;
  8529. +
  8530. + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
  8531. + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
  8532. + client->id, TMU_DM_TX_TRANS, 4));
  8533. +
  8534. + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
  8535. + queue->done_tmu_tx_pkts = UINT_MAX -
  8536. + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
  8537. + else
  8538. + queue->done_tmu_tx_pkts = tmu_tx_pkts -
  8539. + queue->prev_tmu_tx_pkts;
  8540. +
  8541. + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
  8542. +
  8543. + if (!queue->done_tmu_tx_pkts)
  8544. + return NULL;
  8545. + }
  8546. +
  8547. + if (desc->ctrl & CL_DESC_OWN)
  8548. + return NULL;
  8549. +
  8550. + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
  8551. + queue->tx_pending--;
  8552. +
  8553. + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
  8554. +
  8555. + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
  8556. + queue->done_tmu_tx_pkts--;
  8557. +
  8558. + return desc->data;
  8559. +}
  8560. +
  8561. +static void hif_lib_tmu_credit_init(struct pfe *pfe)
  8562. +{
  8563. + int i, q;
  8564. +
  8565. + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
  8566. + for (q = 0; q < emac_txq_cnt; q++) {
  8567. + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
  8568. + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
  8569. + pfe->tmu_credit.tx_credit[i][q] =
  8570. + pfe->tmu_credit.tx_credit_max[i][q];
  8571. + }
  8572. +}
  8573. +
  8574. +/* __hif_lib_update_credit
  8575. + *
  8576. + * @param[in] client hif client context
  8577. + * @param[in] queue queue number in match with TMU
  8578. + */
  8579. +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
  8580. +{
  8581. + unsigned int tmu_tx_packets, tmp;
  8582. +
  8583. + if (tx_qos) {
  8584. + tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
  8585. + client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
  8586. +
  8587. + /* tx_packets counter overflowed */
  8588. + if (tmu_tx_packets >
  8589. + pfe->tmu_credit.tx_packets[client->id][queue]) {
  8590. + tmp = UINT_MAX - tmu_tx_packets +
  8591. + pfe->tmu_credit.tx_packets[client->id][queue];
  8592. +
  8593. + pfe->tmu_credit.tx_credit[client->id][queue] =
  8594. + pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
  8595. + } else {
  8596. + /* TMU tx <= pfe_eth tx, normal case or both OF since
  8597. + * last time
  8598. + */
  8599. + pfe->tmu_credit.tx_credit[client->id][queue] =
  8600. + pfe->tmu_credit.tx_credit_max[client->id][queue] -
  8601. + (pfe->tmu_credit.tx_packets[client->id][queue] -
  8602. + tmu_tx_packets);
  8603. + }
  8604. + }
  8605. +}
  8606. +
  8607. +int pfe_hif_lib_init(struct pfe *pfe)
  8608. +{
  8609. + int rc;
  8610. +
  8611. + pr_info("%s\n", __func__);
  8612. +
  8613. + if (lro_mode) {
  8614. + page_mode = 1;
  8615. + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
  8616. + pfe_pkt_headroom = 0;
  8617. + } else {
  8618. + page_mode = 0;
  8619. + pfe_pkt_size = PFE_PKT_SIZE;
  8620. + pfe_pkt_headroom = PFE_PKT_HEADROOM;
  8621. + }
  8622. +
  8623. + if (tx_qos)
  8624. + emac_txq_cnt = EMAC_TXQ_CNT / 2;
  8625. + else
  8626. + emac_txq_cnt = EMAC_TXQ_CNT;
  8627. +
  8628. + hif_lib_tmu_credit_init(pfe);
  8629. + pfe->hif.shm = &ghif_shm;
  8630. + rc = pfe_hif_shm_init(pfe->hif.shm);
  8631. +
  8632. + return rc;
  8633. +}
  8634. +
  8635. +void pfe_hif_lib_exit(struct pfe *pfe)
  8636. +{
  8637. + pr_info("%s\n", __func__);
  8638. +
  8639. + pfe_hif_shm_clean(pfe->hif.shm);
  8640. +}
  8641. --- /dev/null
  8642. +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
  8643. @@ -0,0 +1,241 @@
  8644. +/*
  8645. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  8646. + * Copyright 2017 NXP
  8647. + *
  8648. + * This program is free software; you can redistribute it and/or modify
  8649. + * it under the terms of the GNU General Public License as published by
  8650. + * the Free Software Foundation; either version 2 of the License, or
  8651. + * (at your option) any later version.
  8652. + *
  8653. + * This program is distributed in the hope that it will be useful,
  8654. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8655. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8656. + * GNU General Public License for more details.
  8657. + *
  8658. + * You should have received a copy of the GNU General Public License
  8659. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  8660. + */
  8661. +
  8662. +#ifndef _PFE_HIF_LIB_H_
  8663. +#define _PFE_HIF_LIB_H_
  8664. +
  8665. +#include "pfe_hif.h"
  8666. +
  8667. +#define HIF_CL_REQ_TIMEOUT 10
  8668. +#define GFP_DMA_PFE 0
  8669. +#define PFE_PARSE_INFO_SIZE 16
  8670. +
  8671. +enum {
  8672. + REQUEST_CL_REGISTER = 0,
  8673. + REQUEST_CL_UNREGISTER,
  8674. + HIF_REQUEST_MAX
  8675. +};
  8676. +
  8677. +enum {
  8678. + /* Event to indicate that client rx queue is reached water mark level */
  8679. + EVENT_HIGH_RX_WM = 0,
  8680. + /* Event to indicate that, packet received for client */
  8681. + EVENT_RX_PKT_IND,
  8682. + /* Event to indicate that, packet tx done for client */
  8683. + EVENT_TXDONE_IND,
  8684. + HIF_EVENT_MAX
  8685. +};
  8686. +
  8687. +/*structure to store client queue info */
  8688. +
  8689. +/*structure to store client queue info */
  8690. +struct hif_client_rx_queue {
  8691. + struct rx_queue_desc *base;
  8692. + u32 size;
  8693. + u32 read_idx;
  8694. + u32 write_idx;
  8695. +};
  8696. +
  8697. +struct hif_client_tx_queue {
  8698. + struct tx_queue_desc *base;
  8699. + u32 size;
  8700. + u32 read_idx;
  8701. + u32 write_idx;
  8702. + u32 tx_pending;
  8703. + unsigned long jiffies_last_packet;
  8704. + u32 nocpy_flag;
  8705. + u32 prev_tmu_tx_pkts;
  8706. + u32 done_tmu_tx_pkts;
  8707. +};
  8708. +
  8709. +struct hif_client_s {
  8710. + int id;
  8711. + int tx_qn;
  8712. + int rx_qn;
  8713. + void *rx_qbase;
  8714. + void *tx_qbase;
  8715. + int tx_qsize;
  8716. + int rx_qsize;
  8717. + int cpu_id;
  8718. + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
  8719. + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
  8720. + int (*event_handler)(void *priv, int event, int data);
  8721. + unsigned long queue_mask[HIF_EVENT_MAX];
  8722. + struct pfe *pfe;
  8723. + void *priv;
  8724. +};
  8725. +
  8726. +/*
  8727. + * Client specific shared memory
  8728. + * It contains number of Rx/Tx queues, base addresses and queue sizes
  8729. + */
  8730. +struct hif_client_shm {
  8731. + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
  8732. + unsigned long rx_qbase; /*Rx queue base address */
  8733. + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
  8734. + unsigned long tx_qbase; /* Tx queue base address */
  8735. + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
  8736. +};
  8737. +
  8738. +/*Client shared memory ctrl bit description */
  8739. +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
  8740. +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
  8741. +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
  8742. + & 0xFF)
  8743. +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
  8744. + & 0xFF)
  8745. +
  8746. +/*
  8747. + * Shared memory used to communicate between HIF driver and host/client drivers
  8748. + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
  8749. + * initialized with host buffers and buffers count in the pool.
  8750. + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
  8751. + *
  8752. + */
  8753. +struct hif_shm {
  8754. + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
  8755. + /*Rx buffers required to initialize HIF rx descriptors */
  8756. + void *rx_buf_pool[HIF_RX_DESC_NT];
  8757. + unsigned long g_client_status[2]; /*Global client status bit mask */
  8758. + /* Client specific shared memory */
  8759. + struct hif_client_shm client[HIF_CLIENTS_MAX];
  8760. +};
  8761. +
  8762. +#define CL_DESC_OWN BIT(31)
  8763. +/* This sets owner ship to HIF driver */
  8764. +#define CL_DESC_LAST BIT(30)
  8765. +/* This indicates last packet for multi buffers handling */
  8766. +#define CL_DESC_FIRST BIT(29)
  8767. +/* This indicates first packet for multi buffers handling */
  8768. +
  8769. +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
  8770. +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
  8771. +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
  8772. +
  8773. +struct rx_queue_desc {
  8774. + void *data;
  8775. + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
  8776. + u32 client_ctrl;
  8777. +};
  8778. +
  8779. +struct tx_queue_desc {
  8780. + void *data;
  8781. + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
  8782. +};
  8783. +
  8784. +/* HIF Rx is not working properly for 2-byte aligned buffers and
  8785. + * ip_header should be 4byte aligned for better iperformance.
  8786. + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
  8787. + */
  8788. +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
  8789. +/* must be big enough for headroom, pkt size and skb shared info */
  8790. +#define PFE_BUF_SIZE 2048
  8791. +#define PFE_PKT_HEADROOM 128
  8792. +
  8793. +#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
  8794. +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
  8795. + - SKB_SHARED_INFO_SIZE)
  8796. +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
  8797. +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
  8798. +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
  8799. +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
  8800. + + MAX_L4_HDR_SIZE)
  8801. +/* Used in page mode to clamp packet size to the maximum supported by the hif
  8802. + *hw interface (<16KiB)
  8803. + */
  8804. +#define MAX_PFE_PKT_SIZE 16380UL
  8805. +
  8806. +extern unsigned int pfe_pkt_size;
  8807. +extern unsigned int pfe_pkt_headroom;
  8808. +extern unsigned int page_mode;
  8809. +extern unsigned int lro_mode;
  8810. +extern unsigned int tx_qos;
  8811. +extern unsigned int emac_txq_cnt;
  8812. +
  8813. +int pfe_hif_lib_init(struct pfe *pfe);
  8814. +void pfe_hif_lib_exit(struct pfe *pfe);
  8815. +int hif_lib_client_register(struct hif_client_s *client);
  8816. +int hif_lib_client_unregister(struct hif_client_s *client);
  8817. +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
  8818. + *data, unsigned int len, u32 client_ctrl,
  8819. + unsigned int flags, void *client_data);
  8820. +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
  8821. + unsigned int len, u32 client_ctrl, void *client_data);
  8822. +void hif_lib_indicate_client(int cl_id, int event, int data);
  8823. +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
  8824. + data);
  8825. +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
  8826. +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
  8827. +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
  8828. + unsigned int *flags, int count);
  8829. +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
  8830. + *ofst, unsigned int *rx_ctrl,
  8831. + unsigned int *desc_ctrl, void **priv_data);
  8832. +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
  8833. +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
  8834. +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
  8835. + enable);
  8836. +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
  8837. + qno)
  8838. +{
  8839. + struct hif_client_tx_queue *queue = &client->tx_q[qno];
  8840. +
  8841. + return (queue->size - queue->tx_pending);
  8842. +}
  8843. +
  8844. +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
  8845. + int qno)
  8846. +{
  8847. + struct hif_client_tx_queue *queue = &client->tx_q[qno];
  8848. +
  8849. + return queue->write_idx;
  8850. +}
  8851. +
  8852. +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
  8853. + qno)
  8854. +{
  8855. + struct hif_client_tx_queue *queue = &client->tx_q[qno];
  8856. +
  8857. + return queue->tx_pending;
  8858. +}
  8859. +
  8860. +#define hif_lib_tx_credit_avail(pfe, id, qno) \
  8861. + ((pfe)->tmu_credit.tx_credit[id][qno])
  8862. +
  8863. +#define hif_lib_tx_credit_max(pfe, id, qno) \
  8864. + ((pfe)->tmu_credit.tx_credit_max[id][qno])
  8865. +
  8866. +/*
  8867. + * Test comment
  8868. + */
  8869. +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
  8870. + ({ typeof(pfe) pfe_ = pfe; \
  8871. + typeof(id) id_ = id; \
  8872. + typeof(qno) qno_ = qno_; \
  8873. + typeof(credit) credit_ = credit; \
  8874. + do { \
  8875. + if (tx_qos) { \
  8876. + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
  8877. + -= credit_; \
  8878. + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
  8879. + += credit_; \
  8880. + } \
  8881. + } while (0); \
  8882. + })
  8883. +
  8884. +#endif /* _PFE_HIF_LIB_H_ */
  8885. --- /dev/null
  8886. +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
  8887. @@ -0,0 +1,176 @@
  8888. +/*
  8889. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  8890. + * Copyright 2017 NXP
  8891. + *
  8892. + * This program is free software; you can redistribute it and/or modify
  8893. + * it under the terms of the GNU General Public License as published by
  8894. + * the Free Software Foundation; either version 2 of the License, or
  8895. + * (at your option) any later version.
  8896. + *
  8897. + * This program is distributed in the hope that it will be useful,
  8898. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8899. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8900. + * GNU General Public License for more details.
  8901. + *
  8902. + * You should have received a copy of the GNU General Public License
  8903. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  8904. + */
  8905. +
  8906. +#include "pfe_mod.h"
  8907. +#include "pfe_hw.h"
  8908. +
  8909. +/* Functions to handle most of pfe hw register initialization */
  8910. +int pfe_hw_init(struct pfe *pfe, int resume)
  8911. +{
  8912. + struct class_cfg class_cfg = {
  8913. + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
  8914. + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
  8915. + ROUTE_TABLE_BASEADDR,
  8916. + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
  8917. + };
  8918. +
  8919. + struct tmu_cfg tmu_cfg = {
  8920. + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
  8921. + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
  8922. + .llm_queue_len = TMU_LLM_QUEUE_LEN,
  8923. + };
  8924. +
  8925. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  8926. + struct util_cfg util_cfg = {
  8927. + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
  8928. + };
  8929. +#endif
  8930. +
  8931. + struct BMU_CFG bmu1_cfg = {
  8932. + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
  8933. + BMU1_LMEM_BASEADDR),
  8934. + .count = BMU1_BUF_COUNT,
  8935. + .size = BMU1_BUF_SIZE,
  8936. + .low_watermark = 10,
  8937. + .high_watermark = 15,
  8938. + };
  8939. +
  8940. + struct BMU_CFG bmu2_cfg = {
  8941. + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
  8942. + BMU2_DDR_BASEADDR),
  8943. + .count = BMU2_BUF_COUNT,
  8944. + .size = BMU2_BUF_SIZE,
  8945. + .low_watermark = 250,
  8946. + .high_watermark = 253,
  8947. + };
  8948. +
  8949. + struct gpi_cfg egpi1_cfg = {
  8950. + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
  8951. + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
  8952. + .aseq_len = EGPI1_ASEQ_LEN,
  8953. + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
  8954. + EMAC_TCNTRL_REG),
  8955. + };
  8956. +
  8957. + struct gpi_cfg egpi2_cfg = {
  8958. + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
  8959. + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
  8960. + .aseq_len = EGPI2_ASEQ_LEN,
  8961. + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
  8962. + EMAC_TCNTRL_REG),
  8963. + };
  8964. +
  8965. + struct gpi_cfg hgpi_cfg = {
  8966. + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
  8967. + .tmlf_txthres = HGPI_TMLF_TXTHRES,
  8968. + .aseq_len = HGPI_ASEQ_LEN,
  8969. + .mtip_pause_reg = 0,
  8970. + };
  8971. +
  8972. + pr_info("%s\n", __func__);
  8973. +
  8974. +#if !defined(LS1012A_PFE_RESET_WA)
  8975. + /* LS1012A needs this to make PE work correctly */
  8976. + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
  8977. + writel(0x3, TMU_PE_SYS_CLK_RATIO);
  8978. + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
  8979. + usleep_range(10, 20);
  8980. +#endif
  8981. +
  8982. + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
  8983. + pr_info("TMU version: %x\n", readl(TMU_VERSION));
  8984. +
  8985. + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
  8986. + BMU_VERSION));
  8987. + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
  8988. + BMU_VERSION));
  8989. +
  8990. + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
  8991. + GPI_VERSION));
  8992. + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
  8993. + GPI_VERSION));
  8994. + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
  8995. + GPI_VERSION));
  8996. +
  8997. + pr_info("HIF version: %x\n", readl(HIF_VERSION));
  8998. + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
  8999. +
  9000. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  9001. + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
  9002. +#endif
  9003. + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
  9004. + ;
  9005. +
  9006. + hif_rx_disable();
  9007. + hif_tx_disable();
  9008. +
  9009. + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
  9010. +
  9011. + pr_info("bmu_init(1) done\n");
  9012. +
  9013. + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
  9014. +
  9015. + pr_info("bmu_init(2) done\n");
  9016. +
  9017. + class_cfg.resume = resume ? 1 : 0;
  9018. +
  9019. + class_init(&class_cfg);
  9020. +
  9021. + pr_info("class_init() done\n");
  9022. +
  9023. + tmu_init(&tmu_cfg);
  9024. +
  9025. + pr_info("tmu_init() done\n");
  9026. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  9027. + util_init(&util_cfg);
  9028. +
  9029. + pr_info("util_init() done\n");
  9030. +#endif
  9031. + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
  9032. +
  9033. + pr_info("gpi_init(1) done\n");
  9034. +
  9035. + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
  9036. +
  9037. + pr_info("gpi_init(2) done\n");
  9038. +
  9039. + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
  9040. +
  9041. + pr_info("gpi_init(hif) done\n");
  9042. +
  9043. + bmu_enable(BMU1_BASE_ADDR);
  9044. +
  9045. + pr_info("bmu_enable(1) done\n");
  9046. +
  9047. + bmu_enable(BMU2_BASE_ADDR);
  9048. +
  9049. + pr_info("bmu_enable(2) done\n");
  9050. +
  9051. + return 0;
  9052. +}
  9053. +
  9054. +void pfe_hw_exit(struct pfe *pfe)
  9055. +{
  9056. + pr_info("%s\n", __func__);
  9057. +
  9058. + bmu_disable(BMU1_BASE_ADDR);
  9059. + bmu_reset(BMU1_BASE_ADDR);
  9060. +
  9061. + bmu_disable(BMU2_BASE_ADDR);
  9062. + bmu_reset(BMU2_BASE_ADDR);
  9063. +}
  9064. --- /dev/null
  9065. +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
  9066. @@ -0,0 +1,27 @@
  9067. +/*
  9068. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9069. + * Copyright 2017 NXP
  9070. + *
  9071. + * This program is free software; you can redistribute it and/or modify
  9072. + * it under the terms of the GNU General Public License as published by
  9073. + * the Free Software Foundation; either version 2 of the License, or
  9074. + * (at your option) any later version.
  9075. + *
  9076. + * This program is distributed in the hope that it will be useful,
  9077. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9078. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9079. + * GNU General Public License for more details.
  9080. + *
  9081. + * You should have received a copy of the GNU General Public License
  9082. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9083. + */
  9084. +
  9085. +#ifndef _PFE_HW_H_
  9086. +#define _PFE_HW_H_
  9087. +
  9088. +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
  9089. +
  9090. +int pfe_hw_init(struct pfe *pfe, int resume);
  9091. +void pfe_hw_exit(struct pfe *pfe);
  9092. +
  9093. +#endif /* _PFE_HW_H_ */
  9094. --- /dev/null
  9095. +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
  9096. @@ -0,0 +1,385 @@
  9097. +/*
  9098. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9099. + * Copyright 2017 NXP
  9100. + *
  9101. + * This program is free software; you can redistribute it and/or modify
  9102. + * it under the terms of the GNU General Public License as published by
  9103. + * the Free Software Foundation; either version 2 of the License, or
  9104. + * (at your option) any later version.
  9105. + *
  9106. + * This program is distributed in the hope that it will be useful,
  9107. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9108. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9109. + * GNU General Public License for more details.
  9110. + *
  9111. + * You should have received a copy of the GNU General Public License
  9112. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9113. + */
  9114. +
  9115. +#include <linux/module.h>
  9116. +#include <linux/device.h>
  9117. +#include <linux/of_net.h>
  9118. +#include <linux/of_address.h>
  9119. +#include <linux/platform_device.h>
  9120. +#include <linux/slab.h>
  9121. +#include <linux/clk.h>
  9122. +#include <linux/mfd/syscon.h>
  9123. +#include <linux/regmap.h>
  9124. +
  9125. +#include "pfe_mod.h"
  9126. +
  9127. +struct ls1012a_pfe_platform_data pfe_platform_data;
  9128. +
  9129. +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
  9130. + if_cnt,
  9131. + struct ls1012a_pfe_platform_data
  9132. + *pdata)
  9133. +{
  9134. + struct device_node *gem = NULL, *phy = NULL;
  9135. + int size;
  9136. + int ii = 0, phy_id = 0;
  9137. + const u32 *addr;
  9138. + const void *mac_addr;
  9139. +
  9140. + for (ii = 0; ii < if_cnt; ii++) {
  9141. + gem = of_get_next_child(parent, gem);
  9142. + if (!gem)
  9143. + goto err;
  9144. + addr = of_get_property(gem, "reg", &size);
  9145. + if (addr && (be32_to_cpup(addr) == port))
  9146. + break;
  9147. + }
  9148. +
  9149. + if (ii >= if_cnt) {
  9150. + pr_err("%s:%d Failed to find interface = %d\n",
  9151. + __func__, __LINE__, if_cnt);
  9152. + goto err;
  9153. + }
  9154. +
  9155. + pdata->ls1012a_eth_pdata[port].gem_id = port;
  9156. +
  9157. + mac_addr = of_get_mac_address(gem);
  9158. +
  9159. + if (mac_addr) {
  9160. + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
  9161. + ETH_ALEN);
  9162. + }
  9163. +
  9164. + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
  9165. +
  9166. + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
  9167. + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
  9168. + __LINE__);
  9169. +
  9170. + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
  9171. + if (!addr)
  9172. + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
  9173. + __LINE__);
  9174. + else
  9175. + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
  9176. +
  9177. + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
  9178. + if (!addr) {
  9179. + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
  9180. + __LINE__);
  9181. + } else {
  9182. + phy_id = be32_to_cpup(addr);
  9183. + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
  9184. + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
  9185. + }
  9186. +
  9187. + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
  9188. + if (!addr)
  9189. + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
  9190. + else
  9191. + phy_id = be32_to_cpup(addr);
  9192. + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
  9193. +
  9194. + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
  9195. + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
  9196. + pdata->ls1012a_eth_pdata[port].mdio_muxval;
  9197. +
  9198. + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
  9199. + if (!addr)
  9200. + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
  9201. + __func__, __LINE__);
  9202. + else
  9203. + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
  9204. +
  9205. + /* If PHY is enabled, read mdio properties */
  9206. + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
  9207. + goto done;
  9208. +
  9209. + phy = of_get_next_child(gem, NULL);
  9210. +
  9211. + addr = of_get_property(phy, "reg", &size);
  9212. +
  9213. + if (!addr)
  9214. + pr_err("%s:%d Invalid phy enable flag....\n",
  9215. + __func__, __LINE__);
  9216. + else
  9217. + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
  9218. +
  9219. + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
  9220. +
  9221. +done:
  9222. +
  9223. + return 0;
  9224. +
  9225. +err:
  9226. + return -1;
  9227. +}
  9228. +
  9229. +/*
  9230. + *
  9231. + * pfe_platform_probe -
  9232. + *
  9233. + *
  9234. + */
  9235. +static int pfe_platform_probe(struct platform_device *pdev)
  9236. +{
  9237. + struct resource res;
  9238. + int ii, rc, interface_count = 0, size = 0;
  9239. + const u32 *prop;
  9240. + struct device_node *np;
  9241. + struct clk *pfe_clk;
  9242. +
  9243. + np = pdev->dev.of_node;
  9244. +
  9245. + if (!np) {
  9246. + pr_err("Invalid device node\n");
  9247. + return -EINVAL;
  9248. + }
  9249. +
  9250. + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
  9251. + if (!pfe) {
  9252. + rc = -ENOMEM;
  9253. + goto err_alloc;
  9254. + }
  9255. +
  9256. + platform_set_drvdata(pdev, pfe);
  9257. +
  9258. + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  9259. +
  9260. + if (of_address_to_resource(np, 1, &res)) {
  9261. + rc = -ENOMEM;
  9262. + pr_err("failed to get ddr resource\n");
  9263. + goto err_ddr;
  9264. + }
  9265. +
  9266. + pfe->ddr_phys_baseaddr = res.start;
  9267. + pfe->ddr_size = resource_size(&res);
  9268. + pfe->ddr_baseaddr = phys_to_virt(res.start);
  9269. +
  9270. + pfe->scfg =
  9271. + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  9272. + "fsl,pfe-scfg");
  9273. + if (IS_ERR(pfe->scfg)) {
  9274. + dev_err(&pdev->dev, "No syscfg phandle specified\n");
  9275. + return PTR_ERR(pfe->scfg);
  9276. + }
  9277. +
  9278. + pfe->cbus_baseaddr = of_iomap(np, 0);
  9279. + if (!pfe->cbus_baseaddr) {
  9280. + rc = -ENOMEM;
  9281. + pr_err("failed to get axi resource\n");
  9282. + goto err_axi;
  9283. + }
  9284. +
  9285. + pfe->hif_irq = platform_get_irq(pdev, 0);
  9286. + if (pfe->hif_irq < 0) {
  9287. + pr_err("platform_get_irq for hif failed\n");
  9288. + rc = pfe->hif_irq;
  9289. + goto err_hif_irq;
  9290. + }
  9291. +
  9292. + pfe->wol_irq = platform_get_irq(pdev, 2);
  9293. + if (pfe->wol_irq < 0) {
  9294. + pr_err("platform_get_irq for WoL failed\n");
  9295. + rc = pfe->wol_irq;
  9296. + goto err_hif_irq;
  9297. + }
  9298. +
  9299. + /* Read interface count */
  9300. + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
  9301. + if (!prop) {
  9302. + pr_err("Failed to read number of interfaces\n");
  9303. + rc = -ENXIO;
  9304. + goto err_prop;
  9305. + }
  9306. +
  9307. + interface_count = be32_to_cpup(prop);
  9308. + if (interface_count <= 0) {
  9309. + pr_err("No ethernet interface count : %d\n",
  9310. + interface_count);
  9311. + rc = -ENXIO;
  9312. + goto err_prop;
  9313. + }
  9314. +
  9315. + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
  9316. +
  9317. + for (ii = 0; ii < interface_count; ii++) {
  9318. + pfe_get_gemac_if_proprties(np, ii, interface_count,
  9319. + &pfe_platform_data);
  9320. + }
  9321. +
  9322. + pfe->dev = &pdev->dev;
  9323. +
  9324. + pfe->dev->platform_data = &pfe_platform_data;
  9325. +
  9326. + /* declare WoL capabilities */
  9327. + device_init_wakeup(&pdev->dev, true);
  9328. +
  9329. + /* find the clocks */
  9330. + pfe_clk = devm_clk_get(pfe->dev, "pfe");
  9331. + if (IS_ERR(pfe_clk))
  9332. + return PTR_ERR(pfe_clk);
  9333. +
  9334. + /* PFE clock is (platform clock / 2) */
  9335. + /* save sys_clk value as KHz */
  9336. + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
  9337. +
  9338. + rc = pfe_probe(pfe);
  9339. + if (rc < 0)
  9340. + goto err_probe;
  9341. +
  9342. + return 0;
  9343. +
  9344. +err_probe:
  9345. +err_prop:
  9346. +err_hif_irq:
  9347. + iounmap(pfe->cbus_baseaddr);
  9348. +
  9349. +err_axi:
  9350. +err_ddr:
  9351. + platform_set_drvdata(pdev, NULL);
  9352. +
  9353. + kfree(pfe);
  9354. +
  9355. +err_alloc:
  9356. + return rc;
  9357. +}
  9358. +
  9359. +/*
  9360. + * pfe_platform_remove -
  9361. + */
  9362. +static int pfe_platform_remove(struct platform_device *pdev)
  9363. +{
  9364. + struct pfe *pfe = platform_get_drvdata(pdev);
  9365. + int rc;
  9366. +
  9367. + pr_info("%s\n", __func__);
  9368. +
  9369. + rc = pfe_remove(pfe);
  9370. +
  9371. + iounmap(pfe->cbus_baseaddr);
  9372. +
  9373. + platform_set_drvdata(pdev, NULL);
  9374. +
  9375. + kfree(pfe);
  9376. +
  9377. + return rc;
  9378. +}
  9379. +
  9380. +#ifdef CONFIG_PM
  9381. +#ifdef CONFIG_PM_SLEEP
  9382. +int pfe_platform_suspend(struct device *dev)
  9383. +{
  9384. + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
  9385. + struct net_device *netdev;
  9386. + int i;
  9387. +
  9388. + pfe->wake = 0;
  9389. +
  9390. + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
  9391. + netdev = pfe->eth.eth_priv[i]->ndev;
  9392. +
  9393. + netif_device_detach(netdev);
  9394. +
  9395. + if (netif_running(netdev))
  9396. + if (pfe_eth_suspend(netdev))
  9397. + pfe->wake = 1;
  9398. + }
  9399. +
  9400. + /* Shutdown PFE only if we're not waking up the system */
  9401. + if (!pfe->wake) {
  9402. +#if defined(LS1012A_PFE_RESET_WA)
  9403. + pfe_hif_rx_idle(&pfe->hif);
  9404. +#endif
  9405. + pfe_ctrl_suspend(&pfe->ctrl);
  9406. + pfe_firmware_exit(pfe);
  9407. +
  9408. + pfe_hif_exit(pfe);
  9409. + pfe_hif_lib_exit(pfe);
  9410. +
  9411. + pfe_hw_exit(pfe);
  9412. + }
  9413. +
  9414. + return 0;
  9415. +}
  9416. +
  9417. +static int pfe_platform_resume(struct device *dev)
  9418. +{
  9419. + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
  9420. + struct net_device *netdev;
  9421. + int i;
  9422. +
  9423. + if (!pfe->wake) {
  9424. + pfe_hw_init(pfe, 1);
  9425. + pfe_hif_lib_init(pfe);
  9426. + pfe_hif_init(pfe);
  9427. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  9428. + util_enable();
  9429. +#endif
  9430. + tmu_enable(0xf);
  9431. + class_enable();
  9432. + pfe_ctrl_resume(&pfe->ctrl);
  9433. + }
  9434. +
  9435. + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
  9436. + netdev = pfe->eth.eth_priv[i]->ndev;
  9437. +
  9438. + if (pfe->eth.eth_priv[i]->mii_bus)
  9439. + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
  9440. +
  9441. + if (netif_running(netdev))
  9442. + pfe_eth_resume(netdev);
  9443. +
  9444. + netif_device_attach(netdev);
  9445. + }
  9446. + return 0;
  9447. +}
  9448. +#else
  9449. +#define pfe_platform_suspend NULL
  9450. +#define pfe_platform_resume NULL
  9451. +#endif
  9452. +
  9453. +static const struct dev_pm_ops pfe_platform_pm_ops = {
  9454. + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
  9455. +};
  9456. +#endif
  9457. +
  9458. +static const struct of_device_id pfe_match[] = {
  9459. + {
  9460. + .compatible = "fsl,pfe",
  9461. + },
  9462. + {},
  9463. +};
  9464. +MODULE_DEVICE_TABLE(of, pfe_match);
  9465. +
  9466. +static struct platform_driver pfe_platform_driver = {
  9467. + .probe = pfe_platform_probe,
  9468. + .remove = pfe_platform_remove,
  9469. + .driver = {
  9470. + .name = "pfe",
  9471. + .of_match_table = pfe_match,
  9472. +#ifdef CONFIG_PM
  9473. + .pm = &pfe_platform_pm_ops,
  9474. +#endif
  9475. + },
  9476. +};
  9477. +
  9478. +module_platform_driver(pfe_platform_driver);
  9479. +MODULE_LICENSE("GPL");
  9480. +MODULE_DESCRIPTION("PFE Ethernet driver");
  9481. +MODULE_AUTHOR("NXP DNCPE");
  9482. --- /dev/null
  9483. +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
  9484. @@ -0,0 +1,156 @@
  9485. +/*
  9486. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9487. + * Copyright 2017 NXP
  9488. + *
  9489. + * This program is free software; you can redistribute it and/or modify
  9490. + * it under the terms of the GNU General Public License as published by
  9491. + * the Free Software Foundation; either version 2 of the License, or
  9492. + * (at your option) any later version.
  9493. + *
  9494. + * This program is distributed in the hope that it will be useful,
  9495. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9496. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9497. + * GNU General Public License for more details.
  9498. + *
  9499. + * You should have received a copy of the GNU General Public License
  9500. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9501. + */
  9502. +
  9503. +#include <linux/dma-mapping.h>
  9504. +#include "pfe_mod.h"
  9505. +
  9506. +unsigned int us;
  9507. +module_param(us, uint, 0444);
  9508. +MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
  9509. + "1: module enabled for userspace networking\n");
  9510. +struct pfe *pfe;
  9511. +
  9512. +/*
  9513. + * pfe_probe -
  9514. + */
  9515. +int pfe_probe(struct pfe *pfe)
  9516. +{
  9517. + int rc;
  9518. +
  9519. + if (pfe->ddr_size < DDR_MAX_SIZE) {
  9520. + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
  9521. + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
  9522. + rc = -ENOMEM;
  9523. + goto err_hw;
  9524. + }
  9525. +
  9526. + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
  9527. + (8 * SZ_1M - 1)) != 0) {
  9528. + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
  9529. + __func__, (int)pfe->ddr_phys_baseaddr +
  9530. + BMU2_DDR_BASEADDR);
  9531. + rc = -ENOMEM;
  9532. + goto err_hw;
  9533. + }
  9534. +
  9535. + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
  9536. + (unsigned long)pfe->cbus_baseaddr,
  9537. + (unsigned long)pfe->ddr_baseaddr,
  9538. + pfe->ddr_phys_baseaddr, pfe->ddr_size);
  9539. +
  9540. + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
  9541. + pfe->ddr_phys_baseaddr, pfe->ddr_size);
  9542. +
  9543. + rc = pfe_hw_init(pfe, 0);
  9544. + if (rc < 0)
  9545. + goto err_hw;
  9546. +
  9547. + if (us)
  9548. + goto firmware_init;
  9549. +
  9550. + rc = pfe_hif_lib_init(pfe);
  9551. + if (rc < 0)
  9552. + goto err_hif_lib;
  9553. +
  9554. + rc = pfe_hif_init(pfe);
  9555. + if (rc < 0)
  9556. + goto err_hif;
  9557. +
  9558. +firmware_init:
  9559. + rc = pfe_firmware_init(pfe);
  9560. + if (rc < 0)
  9561. + goto err_firmware;
  9562. +
  9563. + rc = pfe_ctrl_init(pfe);
  9564. + if (rc < 0)
  9565. + goto err_ctrl;
  9566. +
  9567. + rc = pfe_eth_init(pfe);
  9568. + if (rc < 0)
  9569. + goto err_eth;
  9570. +
  9571. + rc = pfe_sysfs_init(pfe);
  9572. + if (rc < 0)
  9573. + goto err_sysfs;
  9574. +
  9575. + rc = pfe_debugfs_init(pfe);
  9576. + if (rc < 0)
  9577. + goto err_debugfs;
  9578. +
  9579. + return 0;
  9580. +
  9581. +err_debugfs:
  9582. + pfe_sysfs_exit(pfe);
  9583. +
  9584. +err_sysfs:
  9585. + pfe_eth_exit(pfe);
  9586. +
  9587. +err_eth:
  9588. + pfe_ctrl_exit(pfe);
  9589. +
  9590. +err_ctrl:
  9591. + pfe_firmware_exit(pfe);
  9592. +
  9593. +err_firmware:
  9594. + if (us)
  9595. + goto err_hif_lib;
  9596. +
  9597. + pfe_hif_exit(pfe);
  9598. +
  9599. +err_hif:
  9600. + pfe_hif_lib_exit(pfe);
  9601. +
  9602. +err_hif_lib:
  9603. + pfe_hw_exit(pfe);
  9604. +
  9605. +err_hw:
  9606. + return rc;
  9607. +}
  9608. +
  9609. +/*
  9610. + * pfe_remove -
  9611. + */
  9612. +int pfe_remove(struct pfe *pfe)
  9613. +{
  9614. + pr_info("%s\n", __func__);
  9615. +
  9616. + pfe_debugfs_exit(pfe);
  9617. +
  9618. + pfe_sysfs_exit(pfe);
  9619. +
  9620. + pfe_eth_exit(pfe);
  9621. +
  9622. + pfe_ctrl_exit(pfe);
  9623. +
  9624. +#if defined(LS1012A_PFE_RESET_WA)
  9625. + pfe_hif_rx_idle(&pfe->hif);
  9626. +#endif
  9627. + pfe_firmware_exit(pfe);
  9628. +
  9629. + if (us)
  9630. + goto hw_exit;
  9631. +
  9632. + pfe_hif_exit(pfe);
  9633. +
  9634. + pfe_hif_lib_exit(pfe);
  9635. +
  9636. +hw_exit:
  9637. + pfe_hw_exit(pfe);
  9638. +
  9639. + return 0;
  9640. +}
  9641. --- /dev/null
  9642. +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
  9643. @@ -0,0 +1,114 @@
  9644. +/*
  9645. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9646. + * Copyright 2017 NXP
  9647. + *
  9648. + * This program is free software; you can redistribute it and/or modify
  9649. + * it under the terms of the GNU General Public License as published by
  9650. + * the Free Software Foundation; either version 2 of the License, or
  9651. + * (at your option) any later version.
  9652. + *
  9653. + * This program is distributed in the hope that it will be useful,
  9654. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9655. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9656. + * GNU General Public License for more details.
  9657. + *
  9658. + * You should have received a copy of the GNU General Public License
  9659. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9660. + */
  9661. +
  9662. +#ifndef _PFE_MOD_H_
  9663. +#define _PFE_MOD_H_
  9664. +
  9665. +#include <linux/device.h>
  9666. +#include <linux/elf.h>
  9667. +
  9668. +extern unsigned int us;
  9669. +
  9670. +struct pfe;
  9671. +
  9672. +#include "pfe_hw.h"
  9673. +#include "pfe_firmware.h"
  9674. +#include "pfe_ctrl.h"
  9675. +#include "pfe_hif.h"
  9676. +#include "pfe_hif_lib.h"
  9677. +#include "pfe_eth.h"
  9678. +#include "pfe_sysfs.h"
  9679. +#include "pfe_perfmon.h"
  9680. +#include "pfe_debugfs.h"
  9681. +
  9682. +#define PHYID_MAX_VAL 32
  9683. +
  9684. +struct pfe_tmu_credit {
  9685. + /* Number of allowed TX packet in-flight, matches TMU queue size */
  9686. + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
  9687. + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
  9688. + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
  9689. +};
  9690. +
  9691. +struct pfe {
  9692. + struct regmap *scfg;
  9693. + unsigned long ddr_phys_baseaddr;
  9694. + void *ddr_baseaddr;
  9695. + unsigned int ddr_size;
  9696. + void *cbus_baseaddr;
  9697. + void *apb_baseaddr;
  9698. + unsigned long iram_phys_baseaddr;
  9699. + void *iram_baseaddr;
  9700. + unsigned long ipsec_phys_baseaddr;
  9701. + void *ipsec_baseaddr;
  9702. + int hif_irq;
  9703. + int wol_irq;
  9704. + int hif_client_irq;
  9705. + struct device *dev;
  9706. + struct dentry *dentry;
  9707. + struct pfe_ctrl ctrl;
  9708. + struct pfe_hif hif;
  9709. + struct pfe_eth eth;
  9710. + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
  9711. +#if defined(CFG_DIAGS)
  9712. + struct pfe_diags diags;
  9713. +#endif
  9714. + struct pfe_tmu_credit tmu_credit;
  9715. + struct pfe_cpumon cpumon;
  9716. + struct pfe_memmon memmon;
  9717. + int wake;
  9718. + int mdio_muxval[PHYID_MAX_VAL];
  9719. + struct clk *hfe_clock;
  9720. +};
  9721. +
  9722. +extern struct pfe *pfe;
  9723. +
  9724. +int pfe_probe(struct pfe *pfe);
  9725. +int pfe_remove(struct pfe *pfe);
  9726. +
  9727. +/* DDR Mapping in reserved memory*/
  9728. +#define ROUTE_TABLE_BASEADDR 0
  9729. +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
  9730. +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
  9731. + * CLASS_ROUTE_SIZE)
  9732. +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
  9733. +#define BMU2_BUF_COUNT (4096 - 256)
  9734. +/* This is to get a total DDR size of 12MiB */
  9735. +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
  9736. +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
  9737. +#define UTIL_CODE_SIZE (128 * SZ_1K)
  9738. +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
  9739. +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
  9740. +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
  9741. +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
  9742. +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
  9743. +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
  9744. +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
  9745. +#define TMU_LLM_QUEUE_LEN (8 * 512)
  9746. +/* Must be power of two and at least 16 * 8 = 128 bytes */
  9747. +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
  9748. +/* (4 TMU's x 16 queues x queue_len) */
  9749. +
  9750. +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
  9751. +
  9752. +/* LMEM Mapping */
  9753. +#define BMU1_LMEM_BASEADDR 0
  9754. +#define BMU1_BUF_COUNT 256
  9755. +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
  9756. +
  9757. +#endif /* _PFE_MOD_H */
  9758. --- /dev/null
  9759. +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
  9760. @@ -0,0 +1,38 @@
  9761. +/*
  9762. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9763. + * Copyright 2017 NXP
  9764. + *
  9765. + * This program is free software; you can redistribute it and/or modify
  9766. + * it under the terms of the GNU General Public License as published by
  9767. + * the Free Software Foundation; either version 2 of the License, or
  9768. + * (at your option) any later version.
  9769. + *
  9770. + * This program is distributed in the hope that it will be useful,
  9771. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9772. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9773. + * GNU General Public License for more details.
  9774. + *
  9775. + * You should have received a copy of the GNU General Public License
  9776. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9777. + */
  9778. +
  9779. +#ifndef _PFE_PERFMON_H_
  9780. +#define _PFE_PERFMON_H_
  9781. +
  9782. +#include "pfe/pfe.h"
  9783. +
  9784. +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
  9785. +
  9786. +struct pfe_cpumon {
  9787. + u32 cpu_usage_pct[MAX_PE];
  9788. + u32 class_usage_pct;
  9789. +};
  9790. +
  9791. +struct pfe_memmon {
  9792. + u32 kernel_memory_allocated;
  9793. +};
  9794. +
  9795. +int pfe_perfmon_init(struct pfe *pfe);
  9796. +void pfe_perfmon_exit(struct pfe *pfe);
  9797. +
  9798. +#endif /* _PFE_PERFMON_H_ */
  9799. --- /dev/null
  9800. +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
  9801. @@ -0,0 +1,818 @@
  9802. +/*
  9803. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  9804. + * Copyright 2017 NXP
  9805. + *
  9806. + * This program is free software; you can redistribute it and/or modify
  9807. + * it under the terms of the GNU General Public License as published by
  9808. + * the Free Software Foundation; either version 2 of the License, or
  9809. + * (at your option) any later version.
  9810. + *
  9811. + * This program is distributed in the hope that it will be useful,
  9812. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9813. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9814. + * GNU General Public License for more details.
  9815. + *
  9816. + * You should have received a copy of the GNU General Public License
  9817. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  9818. + */
  9819. +
  9820. +#include <linux/module.h>
  9821. +#include <linux/platform_device.h>
  9822. +
  9823. +#include "pfe_mod.h"
  9824. +
  9825. +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
  9826. +#define NUM_QUEUES 16
  9827. +
  9828. +static char register_name[20][5] = {
  9829. + "EPC", "ECAS", "EID", "ED",
  9830. + "r0", "r1", "r2", "r3",
  9831. + "r4", "r5", "r6", "r7",
  9832. + "r8", "r9", "r10", "r11",
  9833. + "r12", "r13", "r14", "r15",
  9834. +};
  9835. +
  9836. +static char exception_name[14][20] = {
  9837. + "Reset",
  9838. + "HardwareFailure",
  9839. + "NMI",
  9840. + "InstBreakpoint",
  9841. + "DataBreakpoint",
  9842. + "Unsupported",
  9843. + "PrivilegeViolation",
  9844. + "InstBusError",
  9845. + "DataBusError",
  9846. + "AlignmentError",
  9847. + "ArithmeticError",
  9848. + "SystemCall",
  9849. + "MemoryManagement",
  9850. + "Interrupt",
  9851. +};
  9852. +
  9853. +static unsigned long class_do_clear;
  9854. +static unsigned long tmu_do_clear;
  9855. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  9856. +static unsigned long util_do_clear;
  9857. +#endif
  9858. +
  9859. +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
  9860. + do_clear)
  9861. +{
  9862. + ssize_t len = 0;
  9863. + u32 val;
  9864. + char statebuf[5];
  9865. + struct pfe_cpumon *cpumon = &pfe->cpumon;
  9866. + u32 debug_indicator;
  9867. + u32 debug[20];
  9868. +
  9869. + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
  9870. + dmem_addr += 4;
  9871. +
  9872. + statebuf[4] = '\0';
  9873. + len += sprintf(buf + len, "state=%4s ", statebuf);
  9874. +
  9875. + val = pe_dmem_read(id, dmem_addr, 4);
  9876. + dmem_addr += 4;
  9877. + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
  9878. +
  9879. + val = pe_dmem_read(id, dmem_addr, 4);
  9880. + if (do_clear && val)
  9881. + pe_dmem_write(id, 0, dmem_addr, 4);
  9882. + dmem_addr += 4;
  9883. + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
  9884. +
  9885. + val = pe_dmem_read(id, dmem_addr, 4);
  9886. + if (do_clear && val)
  9887. + pe_dmem_write(id, 0, dmem_addr, 4);
  9888. + dmem_addr += 4;
  9889. + if (id >= TMU0_ID && id <= TMU_MAX_ID)
  9890. + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
  9891. + else
  9892. + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
  9893. +
  9894. + val = pe_dmem_read(id, dmem_addr, 4);
  9895. + if (do_clear && val)
  9896. + pe_dmem_write(id, 0, dmem_addr, 4);
  9897. + dmem_addr += 4;
  9898. + if (val)
  9899. + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
  9900. +
  9901. + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
  9902. +
  9903. + len += sprintf(buf + len, "\n");
  9904. +
  9905. + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
  9906. + dmem_addr += 4;
  9907. + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
  9908. + int j, last = 0;
  9909. +
  9910. + for (j = 0; j < 16; j++) {
  9911. + debug[j] = pe_dmem_read(id, dmem_addr, 4);
  9912. + if (debug[j]) {
  9913. + if (do_clear)
  9914. + pe_dmem_write(id, 0, dmem_addr, 4);
  9915. + last = j + 1;
  9916. + }
  9917. + dmem_addr += 4;
  9918. + }
  9919. + for (j = 0; j < last; j++) {
  9920. + len += sprintf(buf + len, "%08x%s",
  9921. + cpu_to_be32(debug[j]),
  9922. + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
  9923. + }
  9924. + }
  9925. +
  9926. + if (!strncmp(statebuf, "DEAD", 4)) {
  9927. + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
  9928. +
  9929. + len += sprintf(buf + len, "Exception details:\n");
  9930. + for (i = 0; i < 20; i++) {
  9931. + debug[i] = pe_dmem_read(id, dump, 4);
  9932. + dump += 4;
  9933. + if (i == 2)
  9934. + len += sprintf(buf + len, "%4s = %08x (=%s) ",
  9935. + register_name[i], cpu_to_be32(debug[i]),
  9936. + exception_name[min((u32)
  9937. + cpu_to_be32(debug[i]), (u32)13)]);
  9938. + else
  9939. + len += sprintf(buf + len, "%4s = %08x%s",
  9940. + register_name[i], cpu_to_be32(debug[i]),
  9941. + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
  9942. + }
  9943. + }
  9944. +
  9945. + return len;
  9946. +}
  9947. +
  9948. +static ssize_t class_phy_stats(char *buf, int phy)
  9949. +{
  9950. + ssize_t len = 0;
  9951. + int off1 = phy * 0x28;
  9952. + int off2 = phy * 0x10;
  9953. +
  9954. + if (phy == 3)
  9955. + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
  9956. +
  9957. + len += sprintf(buf + len, "phy: %d\n", phy);
  9958. + len += sprintf(buf + len,
  9959. + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
  9960. + readl(CLASS_PHY1_RX_PKTS + off1),
  9961. + readl(CLASS_PHY1_TX_PKTS + off1),
  9962. + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
  9963. + readl(CLASS_PHY1_V4_PKTS + off1),
  9964. + readl(CLASS_PHY1_V6_PKTS + off1));
  9965. +
  9966. + len += sprintf(buf + len,
  9967. + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
  9968. + readl(CLASS_PHY1_ICMP_PKTS + off2),
  9969. + readl(CLASS_PHY1_IGMP_PKTS + off2),
  9970. + readl(CLASS_PHY1_TCP_PKTS + off2),
  9971. + readl(CLASS_PHY1_UDP_PKTS + off2));
  9972. +
  9973. + len += sprintf(buf + len, " err\n");
  9974. + len += sprintf(buf + len,
  9975. + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
  9976. + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
  9977. + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
  9978. + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
  9979. + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
  9980. + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
  9981. +
  9982. + return len;
  9983. +}
  9984. +
  9985. +/* qm_read_drop_stat
  9986. + * This function is used to read the drop statistics from the TMU
  9987. + * hw drop counter. Since the hw counter is always cleared afer
  9988. + * reading, this function maintains the previous drop count, and
  9989. + * adds the new value to it. That value can be retrieved by
  9990. + * passing a pointer to it with the total_drops arg.
  9991. + *
  9992. + * @param tmu TMU number (0 - 3)
  9993. + * @param queue queue number (0 - 15)
  9994. + * @param total_drops pointer to location to store total drops (or NULL)
  9995. + * @param do_reset if TRUE, clear total drops after updating
  9996. + */
  9997. +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
  9998. +{
  9999. + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
  10000. + u32 val;
  10001. +
  10002. + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
  10003. + writel((tmu << 8) | queue, TMU_LLM_CTRL);
  10004. + val = readl(TMU_TEQ_DROP_STAT);
  10005. + qtotal[tmu][queue] += val;
  10006. + if (total_drops)
  10007. + *total_drops = qtotal[tmu][queue];
  10008. + if (do_reset)
  10009. + qtotal[tmu][queue] = 0;
  10010. + return val;
  10011. +}
  10012. +
  10013. +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
  10014. +{
  10015. + ssize_t len = 0;
  10016. + u32 drops;
  10017. +
  10018. + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
  10019. +
  10020. + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
  10021. +
  10022. + /* Select queue */
  10023. + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
  10024. + writel((tmu << 8) | queue, TMU_LLM_CTRL);
  10025. +
  10026. + len += sprintf(buf + len,
  10027. + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
  10028. + drops, readl(TMU_TEQ_TRANS_STAT),
  10029. + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
  10030. + readl(TMU_LLM_QUE_DROPCNT));
  10031. +
  10032. + return len;
  10033. +}
  10034. +
  10035. +static ssize_t tmu_queues(char *buf, int tmu)
  10036. +{
  10037. + ssize_t len = 0;
  10038. + int queue;
  10039. +
  10040. + for (queue = 0; queue < 16; queue++)
  10041. + len += tmu_queue_stats(buf + len, tmu, queue);
  10042. +
  10043. + return len;
  10044. +}
  10045. +
  10046. +static ssize_t block_version(char *buf, void *addr)
  10047. +{
  10048. + ssize_t len = 0;
  10049. + u32 val;
  10050. +
  10051. + val = readl(addr);
  10052. + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
  10053. + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
  10054. +
  10055. + return len;
  10056. +}
  10057. +
  10058. +static ssize_t bmu(char *buf, int id, void *base)
  10059. +{
  10060. + ssize_t len = 0;
  10061. +
  10062. + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
  10063. +
  10064. + len += block_version(buf + len, base + BMU_VERSION);
  10065. +
  10066. + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
  10067. + BMU_BUF_SIZE)));
  10068. + len += sprintf(buf + len, " buf count: %x\n", readl(base +
  10069. + BMU_BUF_CNT));
  10070. + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
  10071. + BMU_REM_BUF_CNT));
  10072. + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
  10073. + BMU_CURR_BUF_CNT));
  10074. + len += sprintf(buf + len, " free err: %x\n", readl(base +
  10075. + BMU_FREE_ERR_ADDR));
  10076. +
  10077. + return len;
  10078. +}
  10079. +
  10080. +static ssize_t gpi(char *buf, int id, void *base)
  10081. +{
  10082. + ssize_t len = 0;
  10083. + u32 val;
  10084. +
  10085. + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
  10086. + len += block_version(buf + len, base + GPI_VERSION);
  10087. +
  10088. + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
  10089. + GPI_FIFO_STATUS));
  10090. + val = readl(base + GPI_FIFO_DEBUG);
  10091. + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
  10092. + 0x3f);
  10093. + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
  10094. + 0x3f);
  10095. + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
  10096. + 0x1ff);
  10097. + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
  10098. + 0x1ff);
  10099. + len += sprintf(buf + len, " overrun: %x\n", readl(base +
  10100. + GPI_OVERRUN_DROPCNT));
  10101. +
  10102. + return len;
  10103. +}
  10104. +
  10105. +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
  10106. + const char *buf, size_t count)
  10107. +{
  10108. + class_do_clear = kstrtoul(buf, 0, 0);
  10109. + return count;
  10110. +}
  10111. +
  10112. +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
  10113. + char *buf)
  10114. +{
  10115. + ssize_t len = 0;
  10116. + int id;
  10117. + u32 val;
  10118. + struct pfe_cpumon *cpumon = &pfe->cpumon;
  10119. +
  10120. + len += block_version(buf + len, CLASS_VERSION);
  10121. +
  10122. + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
  10123. + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
  10124. +
  10125. + val = readl(CLASS_PE0_DEBUG + id * 4);
  10126. + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
  10127. +
  10128. + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
  10129. + class_do_clear);
  10130. + }
  10131. + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
  10132. + cpumon->class_usage_pct);
  10133. +
  10134. + len += sprintf(buf + len, "pe status: 0x%x\n",
  10135. + readl(CLASS_PE_STATUS));
  10136. + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
  10137. + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
  10138. + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
  10139. + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
  10140. + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
  10141. +
  10142. + len += class_phy_stats(buf + len, 0);
  10143. + len += class_phy_stats(buf + len, 1);
  10144. + len += class_phy_stats(buf + len, 2);
  10145. + len += class_phy_stats(buf + len, 3);
  10146. +
  10147. + return len;
  10148. +}
  10149. +
  10150. +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
  10151. + const char *buf, size_t count)
  10152. +{
  10153. + tmu_do_clear = kstrtoul(buf, 0, 0);
  10154. + return count;
  10155. +}
  10156. +
  10157. +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
  10158. + char *buf)
  10159. +{
  10160. + ssize_t len = 0;
  10161. + int id;
  10162. + u32 val;
  10163. +
  10164. + len += block_version(buf + len, TMU_VERSION);
  10165. +
  10166. + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
  10167. + if (id == TMU2_ID)
  10168. + continue;
  10169. + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
  10170. +
  10171. + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
  10172. + tmu_do_clear);
  10173. + }
  10174. +
  10175. + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
  10176. + len += sprintf(buf + len, "inq fifo cnt: %x\n",
  10177. + readl(TMU_PHY_INQ_FIFO_CNT));
  10178. + val = readl(TMU_INQ_STAT);
  10179. + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
  10180. + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
  10181. +
  10182. + return len;
  10183. +}
  10184. +
  10185. +static unsigned long drops_do_clear;
  10186. +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
  10187. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10188. +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
  10189. +#endif
  10190. +
  10191. +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
  10192. + "ICC",
  10193. + "Host Pkt Error",
  10194. + "Rx Error",
  10195. + "IPsec Outbound",
  10196. + "IPsec Inbound",
  10197. + "EXPT IPsec Error",
  10198. + "Reassembly",
  10199. + "Fragmenter",
  10200. + "NAT-T",
  10201. + "Socket",
  10202. + "Multicast",
  10203. + "NAT-PT",
  10204. + "Tx Disabled",
  10205. +};
  10206. +
  10207. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10208. +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
  10209. + "IPsec Outbound",
  10210. + "IPsec Inbound",
  10211. + "IPsec Rate Limiter",
  10212. + "Fragmenter",
  10213. + "Socket",
  10214. + "Tx Disabled",
  10215. + "Rx Error",
  10216. +};
  10217. +#endif
  10218. +
  10219. +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
  10220. + const char *buf, size_t count)
  10221. +{
  10222. + drops_do_clear = kstrtoul(buf, 0, 0);
  10223. + return count;
  10224. +}
  10225. +
  10226. +static u32 tmu_drops[4][16];
  10227. +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
  10228. + char *buf)
  10229. +{
  10230. + ssize_t len = 0;
  10231. + int id, dropnum;
  10232. + int tmu, queue;
  10233. + u32 val;
  10234. + u32 dmem_addr;
  10235. + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
  10236. + struct pfe_ctrl *ctrl = &pfe->ctrl;
  10237. +
  10238. + memset(class_drop_counter, 0, sizeof(class_drop_counter));
  10239. + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
  10240. + if (drops_do_clear)
  10241. + pe_sync_stop(ctrl, (1 << id));
  10242. + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
  10243. + dropnum++) {
  10244. + dmem_addr = CLASS_DM_DROP_CNTR;
  10245. + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
  10246. + class_drop_counter[dropnum] += val;
  10247. + num_class_drops += val;
  10248. + if (drops_do_clear)
  10249. + pe_dmem_write(id, 0, dmem_addr, 4);
  10250. + }
  10251. + if (drops_do_clear)
  10252. + pe_start(ctrl, (1 << id));
  10253. + }
  10254. +
  10255. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10256. + if (drops_do_clear)
  10257. + pe_sync_stop(ctrl, (1 << UTIL_ID));
  10258. + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
  10259. + dmem_addr = UTIL_DM_DROP_CNTR;
  10260. + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
  10261. + util_drop_counter[dropnum] = val;
  10262. + num_util_drops += val;
  10263. + if (drops_do_clear)
  10264. + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
  10265. + }
  10266. + if (drops_do_clear)
  10267. + pe_start(ctrl, (1 << UTIL_ID));
  10268. +#endif
  10269. + for (tmu = 0; tmu < 4; tmu++) {
  10270. + for (queue = 0; queue < 16; queue++) {
  10271. + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
  10272. + drops_do_clear);
  10273. + num_tmu_drops += tmu_drops[tmu][queue];
  10274. + }
  10275. + }
  10276. +
  10277. + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
  10278. + len += sprintf(buf + len, "No PE drops\n\n");
  10279. +
  10280. + if (num_class_drops > 0) {
  10281. + len += sprintf(buf + len, "Class PE drops --\n");
  10282. + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
  10283. + dropnum++) {
  10284. + if (class_drop_counter[dropnum] > 0)
  10285. + len += sprintf(buf + len, " %s: %d\n",
  10286. + class_drop_description[dropnum],
  10287. + class_drop_counter[dropnum]);
  10288. + }
  10289. + len += sprintf(buf + len, "\n");
  10290. + }
  10291. +
  10292. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10293. + if (num_util_drops > 0) {
  10294. + len += sprintf(buf + len, "Util PE drops --\n");
  10295. + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
  10296. + if (util_drop_counter[dropnum] > 0)
  10297. + len += sprintf(buf + len, " %s: %d\n",
  10298. + util_drop_description[dropnum],
  10299. + util_drop_counter[dropnum]);
  10300. + }
  10301. + len += sprintf(buf + len, "\n");
  10302. + }
  10303. +#endif
  10304. + if (num_tmu_drops > 0) {
  10305. + len += sprintf(buf + len, "TMU drops --\n");
  10306. + for (tmu = 0; tmu < 4; tmu++) {
  10307. + for (queue = 0; queue < 16; queue++) {
  10308. + if (tmu_drops[tmu][queue] > 0)
  10309. + len += sprintf(buf + len,
  10310. + " TMU%d-Q%d: %d\n"
  10311. + , tmu, queue, tmu_drops[tmu][queue]);
  10312. + }
  10313. + }
  10314. + len += sprintf(buf + len, "\n");
  10315. + }
  10316. +
  10317. + return len;
  10318. +}
  10319. +
  10320. +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
  10321. + *attr, char *buf)
  10322. +{
  10323. + return tmu_queues(buf, 0);
  10324. +}
  10325. +
  10326. +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
  10327. + *attr, char *buf)
  10328. +{
  10329. + return tmu_queues(buf, 1);
  10330. +}
  10331. +
  10332. +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
  10333. + *attr, char *buf)
  10334. +{
  10335. + return tmu_queues(buf, 2);
  10336. +}
  10337. +
  10338. +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
  10339. + *attr, char *buf)
  10340. +{
  10341. + return tmu_queues(buf, 3);
  10342. +}
  10343. +
  10344. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10345. +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
  10346. + const char *buf, size_t count)
  10347. +{
  10348. + util_do_clear = kstrtoul(buf, NULL, 0);
  10349. + return count;
  10350. +}
  10351. +
  10352. +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
  10353. + char *buf)
  10354. +{
  10355. + ssize_t len = 0;
  10356. + struct pfe_ctrl *ctrl = &pfe->ctrl;
  10357. +
  10358. + len += block_version(buf + len, UTIL_VERSION);
  10359. +
  10360. + pe_sync_stop(ctrl, (1 << UTIL_ID));
  10361. + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
  10362. + util_do_clear);
  10363. + pe_start(ctrl, (1 << UTIL_ID));
  10364. +
  10365. + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
  10366. + len += sprintf(buf + len, "max buf cnt: %x\n",
  10367. + readl(UTIL_MAX_BUF_CNT));
  10368. + len += sprintf(buf + len, "tsq max cnt: %x\n",
  10369. + readl(UTIL_TSQ_MAX_CNT));
  10370. +
  10371. + return len;
  10372. +}
  10373. +#endif
  10374. +
  10375. +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
  10376. + char *buf)
  10377. +{
  10378. + ssize_t len = 0;
  10379. +
  10380. + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
  10381. + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
  10382. +
  10383. + return len;
  10384. +}
  10385. +
  10386. +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
  10387. + char *buf)
  10388. +{
  10389. + ssize_t len = 0;
  10390. +
  10391. + len += sprintf(buf + len, "hif:\n ");
  10392. + len += block_version(buf + len, HIF_VERSION);
  10393. +
  10394. + len += sprintf(buf + len, " tx curr bd: %x\n",
  10395. + readl(HIF_TX_CURR_BD_ADDR));
  10396. + len += sprintf(buf + len, " tx status: %x\n",
  10397. + readl(HIF_TX_STATUS));
  10398. + len += sprintf(buf + len, " tx dma status: %x\n",
  10399. + readl(HIF_TX_DMA_STATUS));
  10400. +
  10401. + len += sprintf(buf + len, " rx curr bd: %x\n",
  10402. + readl(HIF_RX_CURR_BD_ADDR));
  10403. + len += sprintf(buf + len, " rx status: %x\n",
  10404. + readl(HIF_RX_STATUS));
  10405. + len += sprintf(buf + len, " rx dma status: %x\n",
  10406. + readl(HIF_RX_DMA_STATUS));
  10407. +
  10408. + len += sprintf(buf + len, "hif nocopy:\n ");
  10409. + len += block_version(buf + len, HIF_NOCPY_VERSION);
  10410. +
  10411. + len += sprintf(buf + len, " tx curr bd: %x\n",
  10412. + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
  10413. + len += sprintf(buf + len, " tx status: %x\n",
  10414. + readl(HIF_NOCPY_TX_STATUS));
  10415. + len += sprintf(buf + len, " tx dma status: %x\n",
  10416. + readl(HIF_NOCPY_TX_DMA_STATUS));
  10417. +
  10418. + len += sprintf(buf + len, " rx curr bd: %x\n",
  10419. + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
  10420. + len += sprintf(buf + len, " rx status: %x\n",
  10421. + readl(HIF_NOCPY_RX_STATUS));
  10422. + len += sprintf(buf + len, " rx dma status: %x\n",
  10423. + readl(HIF_NOCPY_RX_DMA_STATUS));
  10424. +
  10425. + return len;
  10426. +}
  10427. +
  10428. +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
  10429. + char *buf)
  10430. +{
  10431. + ssize_t len = 0;
  10432. +
  10433. + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
  10434. + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
  10435. + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
  10436. +
  10437. + return len;
  10438. +}
  10439. +
  10440. +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
  10441. + *attr, char *buf)
  10442. +{
  10443. + ssize_t len = 0;
  10444. + struct pfe_memmon *memmon = &pfe->memmon;
  10445. +
  10446. + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
  10447. + memmon->kernel_memory_allocated,
  10448. + (memmon->kernel_memory_allocated + 1023) / 1024);
  10449. +
  10450. + return len;
  10451. +}
  10452. +
  10453. +#ifdef HIF_NAPI_STATS
  10454. +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
  10455. + struct device_attribute *attr,
  10456. + char *buf)
  10457. +{
  10458. + struct platform_device *pdev = to_platform_device(dev);
  10459. + struct pfe *pfe = platform_get_drvdata(pdev);
  10460. + ssize_t len = 0;
  10461. +
  10462. + len += sprintf(buf + len, "sched: %u\n",
  10463. + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
  10464. + len += sprintf(buf + len, "poll: %u\n",
  10465. + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
  10466. + len += sprintf(buf + len, "packet: %u\n",
  10467. + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
  10468. + len += sprintf(buf + len, "budget: %u\n",
  10469. + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
  10470. + len += sprintf(buf + len, "desc: %u\n",
  10471. + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
  10472. + len += sprintf(buf + len, "full: %u\n",
  10473. + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
  10474. +
  10475. + return len;
  10476. +}
  10477. +
  10478. +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
  10479. + struct device_attribute *attr,
  10480. + const char *buf, size_t count)
  10481. +{
  10482. + struct platform_device *pdev = to_platform_device(dev);
  10483. + struct pfe *pfe = platform_get_drvdata(pdev);
  10484. +
  10485. + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
  10486. +
  10487. + return count;
  10488. +}
  10489. +
  10490. +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
  10491. + pfe_set_hif_napi_stats);
  10492. +#endif
  10493. +
  10494. +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
  10495. +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
  10496. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10497. +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
  10498. +#endif
  10499. +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
  10500. +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
  10501. +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
  10502. +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
  10503. +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
  10504. +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
  10505. +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
  10506. +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
  10507. +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
  10508. +
  10509. +int pfe_sysfs_init(struct pfe *pfe)
  10510. +{
  10511. + if (device_create_file(pfe->dev, &dev_attr_class))
  10512. + goto err_class;
  10513. +
  10514. + if (device_create_file(pfe->dev, &dev_attr_tmu))
  10515. + goto err_tmu;
  10516. +
  10517. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10518. + if (device_create_file(pfe->dev, &dev_attr_util))
  10519. + goto err_util;
  10520. +#endif
  10521. +
  10522. + if (device_create_file(pfe->dev, &dev_attr_bmu))
  10523. + goto err_bmu;
  10524. +
  10525. + if (device_create_file(pfe->dev, &dev_attr_hif))
  10526. + goto err_hif;
  10527. +
  10528. + if (device_create_file(pfe->dev, &dev_attr_gpi))
  10529. + goto err_gpi;
  10530. +
  10531. + if (device_create_file(pfe->dev, &dev_attr_drops))
  10532. + goto err_drops;
  10533. +
  10534. + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
  10535. + goto err_tmu0_queues;
  10536. +
  10537. + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
  10538. + goto err_tmu1_queues;
  10539. +
  10540. + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
  10541. + goto err_tmu2_queues;
  10542. +
  10543. + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
  10544. + goto err_tmu3_queues;
  10545. +
  10546. + if (device_create_file(pfe->dev, &dev_attr_pfemem))
  10547. + goto err_pfemem;
  10548. +
  10549. +#ifdef HIF_NAPI_STATS
  10550. + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
  10551. + goto err_hif_napi_stats;
  10552. +#endif
  10553. +
  10554. + return 0;
  10555. +
  10556. +#ifdef HIF_NAPI_STATS
  10557. +err_hif_napi_stats:
  10558. + device_remove_file(pfe->dev, &dev_attr_pfemem);
  10559. +#endif
  10560. +
  10561. +err_pfemem:
  10562. + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
  10563. +
  10564. +err_tmu3_queues:
  10565. + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
  10566. +
  10567. +err_tmu2_queues:
  10568. + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
  10569. +
  10570. +err_tmu1_queues:
  10571. + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
  10572. +
  10573. +err_tmu0_queues:
  10574. + device_remove_file(pfe->dev, &dev_attr_drops);
  10575. +
  10576. +err_drops:
  10577. + device_remove_file(pfe->dev, &dev_attr_gpi);
  10578. +
  10579. +err_gpi:
  10580. + device_remove_file(pfe->dev, &dev_attr_hif);
  10581. +
  10582. +err_hif:
  10583. + device_remove_file(pfe->dev, &dev_attr_bmu);
  10584. +
  10585. +err_bmu:
  10586. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10587. + device_remove_file(pfe->dev, &dev_attr_util);
  10588. +
  10589. +err_util:
  10590. +#endif
  10591. + device_remove_file(pfe->dev, &dev_attr_tmu);
  10592. +
  10593. +err_tmu:
  10594. + device_remove_file(pfe->dev, &dev_attr_class);
  10595. +
  10596. +err_class:
  10597. + return -1;
  10598. +}
  10599. +
  10600. +void pfe_sysfs_exit(struct pfe *pfe)
  10601. +{
  10602. +#ifdef HIF_NAPI_STATS
  10603. + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
  10604. +#endif
  10605. + device_remove_file(pfe->dev, &dev_attr_pfemem);
  10606. + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
  10607. + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
  10608. + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
  10609. + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
  10610. + device_remove_file(pfe->dev, &dev_attr_drops);
  10611. + device_remove_file(pfe->dev, &dev_attr_gpi);
  10612. + device_remove_file(pfe->dev, &dev_attr_hif);
  10613. + device_remove_file(pfe->dev, &dev_attr_bmu);
  10614. +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
  10615. + device_remove_file(pfe->dev, &dev_attr_util);
  10616. +#endif
  10617. + device_remove_file(pfe->dev, &dev_attr_tmu);
  10618. + device_remove_file(pfe->dev, &dev_attr_class);
  10619. +}
  10620. --- /dev/null
  10621. +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
  10622. @@ -0,0 +1,29 @@
  10623. +/*
  10624. + * Copyright 2015-2016 Freescale Semiconductor, Inc.
  10625. + * Copyright 2017 NXP
  10626. + *
  10627. + * This program is free software; you can redistribute it and/or modify
  10628. + * it under the terms of the GNU General Public License as published by
  10629. + * the Free Software Foundation; either version 2 of the License, or
  10630. + * (at your option) any later version.
  10631. + *
  10632. + * This program is distributed in the hope that it will be useful,
  10633. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10634. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10635. + * GNU General Public License for more details.
  10636. + *
  10637. + * You should have received a copy of the GNU General Public License
  10638. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  10639. + */
  10640. +
  10641. +#ifndef _PFE_SYSFS_H_
  10642. +#define _PFE_SYSFS_H_
  10643. +
  10644. +#include <linux/proc_fs.h>
  10645. +
  10646. +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
  10647. +
  10648. +int pfe_sysfs_init(struct pfe *pfe);
  10649. +void pfe_sysfs_exit(struct pfe *pfe);
  10650. +
  10651. +#endif /* _PFE_SYSFS_H_ */