0050-stmmac-form-4-10.patch 111 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497
  1. --- a/Documentation/devicetree/bindings/net/stmmac.txt
  2. +++ b/Documentation/devicetree/bindings/net/stmmac.txt
  3. @@ -1,7 +1,7 @@
  4. * STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
  5. Required properties:
  6. -- compatible: Should be "snps,dwmac-<ip_version>" "snps,dwmac"
  7. +- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
  8. For backwards compatibility: "st,spear600-gmac" is also supported.
  9. - reg: Address and length of the register set for the device
  10. - interrupt-parent: Should be the phandle for the interrupt controller
  11. @@ -34,7 +34,13 @@ Optional properties:
  12. platforms.
  13. - tx-fifo-depth: See ethernet.txt file in the same directory
  14. - rx-fifo-depth: See ethernet.txt file in the same directory
  15. -- snps,pbl Programmable Burst Length
  16. +- snps,pbl Programmable Burst Length (tx and rx)
  17. +- snps,txpbl Tx Programmable Burst Length. Only for GMAC and newer.
  18. + If set, DMA tx will use this value rather than snps,pbl.
  19. +- snps,rxpbl Rx Programmable Burst Length. Only for GMAC and newer.
  20. + If set, DMA rx will use this value rather than snps,pbl.
  21. +- snps,no-pbl-x8 Don't multiply the pbl/txpbl/rxpbl values by 8.
  22. + For core rev < 3.50, don't multiply the values by 4.
  23. - snps,aal Address-Aligned Beats
  24. - snps,fixed-burst Program the DMA to use the fixed burst mode
  25. - snps,mixed-burst Program the DMA to use the mixed burst mode
  26. @@ -50,6 +56,8 @@ Optional properties:
  27. - snps,ps-speed: port selection speed that can be passed to the core when
  28. PCS is supported. For example, this is used in case of SGMII
  29. and MAC2MAC connection.
  30. +- snps,tso: this enables the TSO feature otherwise it will be managed by
  31. + MAC HW capability register. Only for GMAC4 and newer.
  32. - AXI BUS Mode parameters: below the list of all the parameters to program the
  33. AXI register inside the DMA module:
  34. - snps,lpi_en: enable Low Power Interface
  35. @@ -62,8 +70,6 @@ Optional properties:
  36. - snps,fb: fixed-burst
  37. - snps,mb: mixed-burst
  38. - snps,rb: rebuild INCRx Burst
  39. - - snps,tso: this enables the TSO feature otherwise it will be managed by
  40. - MAC HW capability register.
  41. - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
  42. Examples:
  43. --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
  44. +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
  45. @@ -69,6 +69,17 @@ config DWMAC_MESON
  46. the stmmac device driver. This driver is used for Meson6,
  47. Meson8, Meson8b and GXBB SoCs.
  48. +config DWMAC_OXNAS
  49. + tristate "Oxford Semiconductor OXNAS dwmac support"
  50. + default ARCH_OXNAS
  51. + depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
  52. + select MFD_SYSCON
  53. + help
  54. + Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
  55. +
  56. + This selects the Oxford Semiconductor OXNASSoC glue layer support for
  57. + the stmmac device driver. This driver is used for OX820.
  58. +
  59. config DWMAC_ROCKCHIP
  60. tristate "Rockchip dwmac support"
  61. default ARCH_ROCKCHIP
  62. --- a/drivers/net/ethernet/stmicro/stmmac/Makefile
  63. +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
  64. @@ -10,6 +10,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-
  65. obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
  66. obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
  67. obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
  68. +obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
  69. obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
  70. obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
  71. obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
  72. --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
  73. +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
  74. @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
  75. unsigned int entry = priv->cur_tx;
  76. struct dma_desc *desc = priv->dma_tx + entry;
  77. unsigned int nopaged_len = skb_headlen(skb);
  78. - unsigned int bmax;
  79. + unsigned int bmax, des2;
  80. unsigned int i = 1, len;
  81. if (priv->plat->enh_desc)
  82. @@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, str
  83. len = nopaged_len - bmax;
  84. - desc->des2 = dma_map_single(priv->device, skb->data,
  85. - bmax, DMA_TO_DEVICE);
  86. - if (dma_mapping_error(priv->device, desc->des2))
  87. + des2 = dma_map_single(priv->device, skb->data,
  88. + bmax, DMA_TO_DEVICE);
  89. + desc->des2 = cpu_to_le32(des2);
  90. + if (dma_mapping_error(priv->device, des2))
  91. return -1;
  92. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  93. + priv->tx_skbuff_dma[entry].buf = des2;
  94. priv->tx_skbuff_dma[entry].len = bmax;
  95. /* do not close the descriptor and do not set own bit */
  96. priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
  97. @@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, str
  98. desc = priv->dma_tx + entry;
  99. if (len > bmax) {
  100. - desc->des2 = dma_map_single(priv->device,
  101. - (skb->data + bmax * i),
  102. - bmax, DMA_TO_DEVICE);
  103. - if (dma_mapping_error(priv->device, desc->des2))
  104. + des2 = dma_map_single(priv->device,
  105. + (skb->data + bmax * i),
  106. + bmax, DMA_TO_DEVICE);
  107. + desc->des2 = cpu_to_le32(des2);
  108. + if (dma_mapping_error(priv->device, des2))
  109. return -1;
  110. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  111. + priv->tx_skbuff_dma[entry].buf = des2;
  112. priv->tx_skbuff_dma[entry].len = bmax;
  113. priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
  114. STMMAC_CHAIN_MODE, 1,
  115. @@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, str
  116. len -= bmax;
  117. i++;
  118. } else {
  119. - desc->des2 = dma_map_single(priv->device,
  120. - (skb->data + bmax * i), len,
  121. - DMA_TO_DEVICE);
  122. - if (dma_mapping_error(priv->device, desc->des2))
  123. + des2 = dma_map_single(priv->device,
  124. + (skb->data + bmax * i), len,
  125. + DMA_TO_DEVICE);
  126. + desc->des2 = cpu_to_le32(des2);
  127. + if (dma_mapping_error(priv->device, des2))
  128. return -1;
  129. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  130. + priv->tx_skbuff_dma[entry].buf = des2;
  131. priv->tx_skbuff_dma[entry].len = len;
  132. /* last descriptor can be set now */
  133. priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
  134. @@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void *
  135. struct dma_extended_desc *p = (struct dma_extended_desc *)des;
  136. for (i = 0; i < (size - 1); i++) {
  137. dma_phy += sizeof(struct dma_extended_desc);
  138. - p->basic.des3 = (unsigned int)dma_phy;
  139. + p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
  140. p++;
  141. }
  142. - p->basic.des3 = (unsigned int)phy_addr;
  143. + p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
  144. } else {
  145. struct dma_desc *p = (struct dma_desc *)des;
  146. for (i = 0; i < (size - 1); i++) {
  147. dma_phy += sizeof(struct dma_desc);
  148. - p->des3 = (unsigned int)dma_phy;
  149. + p->des3 = cpu_to_le32((unsigned int)dma_phy);
  150. p++;
  151. }
  152. - p->des3 = (unsigned int)phy_addr;
  153. + p->des3 = cpu_to_le32((unsigned int)phy_addr);
  154. }
  155. }
  156. @@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *pr
  157. * 1588-2002 time stamping is enabled, hence reinitialize it
  158. * to keep explicit chaining in the descriptor.
  159. */
  160. - p->des3 = (unsigned int)(priv->dma_rx_phy +
  161. - (((priv->dirty_rx) + 1) %
  162. - DMA_RX_SIZE) *
  163. - sizeof(struct dma_desc));
  164. + p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
  165. + (((priv->dirty_rx) + 1) %
  166. + DMA_RX_SIZE) *
  167. + sizeof(struct dma_desc)));
  168. }
  169. static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
  170. @@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *pri
  171. * 1588-2002 time stamping is enabled, hence reinitialize it
  172. * to keep explicit chaining in the descriptor.
  173. */
  174. - p->des3 = (unsigned int)((priv->dma_tx_phy +
  175. - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
  176. - * sizeof(struct dma_desc));
  177. + p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
  178. + ((priv->dirty_tx + 1) % DMA_TX_SIZE))
  179. + * sizeof(struct dma_desc)));
  180. }
  181. const struct stmmac_mode_ops chain_mode_ops = {
  182. --- a/drivers/net/ethernet/stmicro/stmmac/common.h
  183. +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
  184. @@ -44,6 +44,7 @@
  185. #define DWMAC_CORE_4_00 0x40
  186. #define STMMAC_CHAN0 0 /* Always supported and default for all chips */
  187. +/* These need to be power of two, and >= 4 */
  188. #define DMA_TX_SIZE 512
  189. #define DMA_RX_SIZE 512
  190. #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
  191. @@ -411,8 +412,8 @@ extern const struct stmmac_desc_ops ndes
  192. struct stmmac_dma_ops {
  193. /* DMA core initialization */
  194. int (*reset)(void __iomem *ioaddr);
  195. - void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
  196. - int aal, u32 dma_tx, u32 dma_rx, int atds);
  197. + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
  198. + u32 dma_tx, u32 dma_rx, int atds);
  199. /* Configure the AXI Bus Mode Register */
  200. void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
  201. /* Dump DMA registers */
  202. @@ -506,6 +507,12 @@ struct mac_link {
  203. struct mii_regs {
  204. unsigned int addr; /* MII Address */
  205. unsigned int data; /* MII Data */
  206. + unsigned int addr_shift; /* MII address shift */
  207. + unsigned int reg_shift; /* MII reg shift */
  208. + unsigned int addr_mask; /* MII address mask */
  209. + unsigned int reg_mask; /* MII reg mask */
  210. + unsigned int clk_csr_shift;
  211. + unsigned int clk_csr_mask;
  212. };
  213. /* Helpers to manage the descriptors for chain and ring modes */
  214. --- a/drivers/net/ethernet/stmicro/stmmac/descs.h
  215. +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
  216. @@ -87,7 +87,7 @@
  217. #define TDES0_ERROR_SUMMARY BIT(15)
  218. #define TDES0_IP_HEADER_ERROR BIT(16)
  219. #define TDES0_TIME_STAMP_STATUS BIT(17)
  220. -#define TDES0_OWN BIT(31)
  221. +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */
  222. /* TDES1 */
  223. #define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
  224. #define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
  225. @@ -130,7 +130,7 @@
  226. #define ETDES0_FIRST_SEGMENT BIT(28)
  227. #define ETDES0_LAST_SEGMENT BIT(29)
  228. #define ETDES0_INTERRUPT BIT(30)
  229. -#define ETDES0_OWN BIT(31)
  230. +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */
  231. /* TDES1 */
  232. #define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
  233. #define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
  234. @@ -170,19 +170,19 @@
  235. /* Basic descriptor structure for normal and alternate descriptors */
  236. struct dma_desc {
  237. - unsigned int des0;
  238. - unsigned int des1;
  239. - unsigned int des2;
  240. - unsigned int des3;
  241. + __le32 des0;
  242. + __le32 des1;
  243. + __le32 des2;
  244. + __le32 des3;
  245. };
  246. /* Extended descriptor structure (e.g. >= databook 3.50a) */
  247. struct dma_extended_desc {
  248. struct dma_desc basic; /* Basic descriptors */
  249. - unsigned int des4; /* Extended Status */
  250. - unsigned int des5; /* Reserved */
  251. - unsigned int des6; /* Tx/Rx Timestamp Low */
  252. - unsigned int des7; /* Tx/Rx Timestamp High */
  253. + __le32 des4; /* Extended Status */
  254. + __le32 des5; /* Reserved */
  255. + __le32 des6; /* Tx/Rx Timestamp Low */
  256. + __le32 des7; /* Tx/Rx Timestamp High */
  257. };
  258. /* Transmit checksum insertion control */
  259. --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
  260. +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
  261. @@ -35,47 +35,50 @@
  262. /* Enhanced descriptors */
  263. static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
  264. {
  265. - p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
  266. - & ERDES1_BUFFER2_SIZE_MASK;
  267. + p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
  268. + << ERDES1_BUFFER2_SIZE_SHIFT)
  269. + & ERDES1_BUFFER2_SIZE_MASK);
  270. if (end)
  271. - p->des1 |= ERDES1_END_RING;
  272. + p->des1 |= cpu_to_le32(ERDES1_END_RING);
  273. }
  274. static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
  275. {
  276. if (end)
  277. - p->des0 |= ETDES0_END_RING;
  278. + p->des0 |= cpu_to_le32(ETDES0_END_RING);
  279. else
  280. - p->des0 &= ~ETDES0_END_RING;
  281. + p->des0 &= cpu_to_le32(~ETDES0_END_RING);
  282. }
  283. static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
  284. {
  285. if (unlikely(len > BUF_SIZE_4KiB)) {
  286. - p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
  287. + p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
  288. + << ETDES1_BUFFER2_SIZE_SHIFT)
  289. & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
  290. - & ETDES1_BUFFER1_SIZE_MASK);
  291. + & ETDES1_BUFFER1_SIZE_MASK));
  292. } else
  293. - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
  294. + p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
  295. }
  296. /* Normal descriptors */
  297. static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
  298. {
  299. - p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
  300. - & RDES1_BUFFER2_SIZE_MASK;
  301. + p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
  302. + << RDES1_BUFFER2_SIZE_SHIFT)
  303. + & RDES1_BUFFER2_SIZE_MASK);
  304. if (end)
  305. - p->des1 |= RDES1_END_RING;
  306. + p->des1 |= cpu_to_le32(RDES1_END_RING);
  307. }
  308. static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
  309. {
  310. if (end)
  311. - p->des1 |= TDES1_END_RING;
  312. + p->des1 |= cpu_to_le32(TDES1_END_RING);
  313. else
  314. - p->des1 &= ~TDES1_END_RING;
  315. + p->des1 &= cpu_to_le32(~TDES1_END_RING);
  316. }
  317. static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
  318. @@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_
  319. if (unlikely(len > BUF_SIZE_2KiB)) {
  320. unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
  321. & TDES1_BUFFER1_SIZE_MASK;
  322. - p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
  323. - & TDES1_BUFFER2_SIZE_MASK) | buffer1);
  324. + p->des1 |= cpu_to_le32((((len - buffer1)
  325. + << TDES1_BUFFER2_SIZE_SHIFT)
  326. + & TDES1_BUFFER2_SIZE_MASK) | buffer1);
  327. } else
  328. - p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
  329. + p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
  330. }
  331. /* Specific functions used for Chain mode */
  332. @@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_
  333. /* Enhanced descriptors */
  334. static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
  335. {
  336. - p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
  337. + p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
  338. }
  339. static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
  340. {
  341. - p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
  342. + p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
  343. }
  344. static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
  345. {
  346. - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
  347. + p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
  348. }
  349. /* Normal descriptors */
  350. static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
  351. {
  352. - p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
  353. + p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
  354. }
  355. static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
  356. {
  357. - p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
  358. + p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
  359. }
  360. static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
  361. {
  362. - p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
  363. + p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
  364. }
  365. #endif /* __DESC_COM_H__ */
  366. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
  367. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
  368. @@ -71,9 +71,12 @@ err_remove_config_dt:
  369. static const struct of_device_id dwmac_generic_match[] = {
  370. { .compatible = "st,spear600-gmac"},
  371. + { .compatible = "snps,dwmac-3.50a"},
  372. { .compatible = "snps,dwmac-3.610"},
  373. { .compatible = "snps,dwmac-3.70a"},
  374. { .compatible = "snps,dwmac-3.710"},
  375. + { .compatible = "snps,dwmac-4.00"},
  376. + { .compatible = "snps,dwmac-4.10a"},
  377. { .compatible = "snps,dwmac"},
  378. { }
  379. };
  380. --- /dev/null
  381. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
  382. @@ -0,0 +1,194 @@
  383. +/*
  384. + * Oxford Semiconductor OXNAS DWMAC glue layer
  385. + *
  386. + * Copyright (C) 2016 Neil Armstrong <[email protected]>
  387. + * Copyright (C) 2014 Daniel Golle <[email protected]>
  388. + * Copyright (C) 2013 Ma Haijun <[email protected]>
  389. + * Copyright (C) 2012 John Crispin <[email protected]>
  390. + *
  391. + * This program is free software; you can redistribute it and/or modify
  392. + * it under the terms of the GNU General Public License version 2 as
  393. + * published by the Free Software Foundation.
  394. + *
  395. + * You should have received a copy of the GNU General Public License
  396. + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  397. + */
  398. +
  399. +#include <linux/device.h>
  400. +#include <linux/io.h>
  401. +#include <linux/module.h>
  402. +#include <linux/of.h>
  403. +#include <linux/platform_device.h>
  404. +#include <linux/regmap.h>
  405. +#include <linux/mfd/syscon.h>
  406. +#include <linux/stmmac.h>
  407. +
  408. +#include "stmmac_platform.h"
  409. +
  410. +/* System Control regmap offsets */
  411. +#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78
  412. +#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100
  413. +
  414. +/* Control Register */
  415. +#define DWMAC_CKEN_RX_IN 14
  416. +#define DWMAC_CKEN_RXN_OUT 13
  417. +#define DWMAC_CKEN_RX_OUT 12
  418. +#define DWMAC_CKEN_TX_IN 10
  419. +#define DWMAC_CKEN_TXN_OUT 9
  420. +#define DWMAC_CKEN_TX_OUT 8
  421. +#define DWMAC_RX_SOURCE 7
  422. +#define DWMAC_TX_SOURCE 6
  423. +#define DWMAC_LOW_TX_SOURCE 4
  424. +#define DWMAC_AUTO_TX_SOURCE 3
  425. +#define DWMAC_RGMII 2
  426. +#define DWMAC_SIMPLE_MUX 1
  427. +#define DWMAC_CKEN_GTX 0
  428. +
  429. +/* Delay register */
  430. +#define DWMAC_TX_VARDELAY_SHIFT 0
  431. +#define DWMAC_TXN_VARDELAY_SHIFT 8
  432. +#define DWMAC_RX_VARDELAY_SHIFT 16
  433. +#define DWMAC_RXN_VARDELAY_SHIFT 24
  434. +#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT)
  435. +#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT)
  436. +#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
  437. +#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
  438. +
  439. +struct oxnas_dwmac {
  440. + struct device *dev;
  441. + struct clk *clk;
  442. + struct regmap *regmap;
  443. +};
  444. +
  445. +static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
  446. +{
  447. + struct oxnas_dwmac *dwmac = priv;
  448. + unsigned int value;
  449. + int ret;
  450. +
  451. + /* Reset HW here before changing the glue configuration */
  452. + ret = device_reset(dwmac->dev);
  453. + if (ret)
  454. + return ret;
  455. +
  456. + ret = clk_prepare_enable(dwmac->clk);
  457. + if (ret)
  458. + return ret;
  459. +
  460. + ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
  461. + if (ret < 0) {
  462. + clk_disable_unprepare(dwmac->clk);
  463. + return ret;
  464. + }
  465. +
  466. + /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
  467. + value |= BIT(DWMAC_CKEN_GTX) |
  468. + /* Use simple mux for 25/125 Mhz clock switching */
  469. + BIT(DWMAC_SIMPLE_MUX) |
  470. + /* set auto switch tx clock source */
  471. + BIT(DWMAC_AUTO_TX_SOURCE) |
  472. + /* enable tx & rx vardelay */
  473. + BIT(DWMAC_CKEN_TX_OUT) |
  474. + BIT(DWMAC_CKEN_TXN_OUT) |
  475. + BIT(DWMAC_CKEN_TX_IN) |
  476. + BIT(DWMAC_CKEN_RX_OUT) |
  477. + BIT(DWMAC_CKEN_RXN_OUT) |
  478. + BIT(DWMAC_CKEN_RX_IN);
  479. + regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
  480. +
  481. + /* set tx & rx vardelay */
  482. + value = DWMAC_TX_VARDELAY(4) |
  483. + DWMAC_TXN_VARDELAY(2) |
  484. + DWMAC_RX_VARDELAY(10) |
  485. + DWMAC_RXN_VARDELAY(8);
  486. + regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
  487. +
  488. + return 0;
  489. +}
  490. +
  491. +static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
  492. +{
  493. + struct oxnas_dwmac *dwmac = priv;
  494. +
  495. + clk_disable_unprepare(dwmac->clk);
  496. +}
  497. +
  498. +static int oxnas_dwmac_probe(struct platform_device *pdev)
  499. +{
  500. + struct plat_stmmacenet_data *plat_dat;
  501. + struct stmmac_resources stmmac_res;
  502. + struct oxnas_dwmac *dwmac;
  503. + int ret;
  504. +
  505. + ret = stmmac_get_platform_resources(pdev, &stmmac_res);
  506. + if (ret)
  507. + return ret;
  508. +
  509. + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
  510. + if (IS_ERR(plat_dat))
  511. + return PTR_ERR(plat_dat);
  512. +
  513. + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
  514. + if (!dwmac) {
  515. + ret = -ENOMEM;
  516. + goto err_remove_config_dt;
  517. + }
  518. +
  519. + dwmac->dev = &pdev->dev;
  520. + plat_dat->bsp_priv = dwmac;
  521. + plat_dat->init = oxnas_dwmac_init;
  522. + plat_dat->exit = oxnas_dwmac_exit;
  523. +
  524. + dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  525. + "oxsemi,sys-ctrl");
  526. + if (IS_ERR(dwmac->regmap)) {
  527. + dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
  528. + ret = PTR_ERR(dwmac->regmap);
  529. + goto err_remove_config_dt;
  530. + }
  531. +
  532. + dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
  533. + if (IS_ERR(dwmac->clk)) {
  534. + ret = PTR_ERR(dwmac->clk);
  535. + goto err_remove_config_dt;
  536. + }
  537. +
  538. + ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
  539. + if (ret)
  540. + goto err_remove_config_dt;
  541. +
  542. + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
  543. + if (ret)
  544. + goto err_dwmac_exit;
  545. +
  546. +
  547. + return 0;
  548. +
  549. +err_dwmac_exit:
  550. + oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
  551. +err_remove_config_dt:
  552. + stmmac_remove_config_dt(pdev, plat_dat);
  553. +
  554. + return ret;
  555. +}
  556. +
  557. +static const struct of_device_id oxnas_dwmac_match[] = {
  558. + { .compatible = "oxsemi,ox820-dwmac" },
  559. + { }
  560. +};
  561. +MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
  562. +
  563. +static struct platform_driver oxnas_dwmac_driver = {
  564. + .probe = oxnas_dwmac_probe,
  565. + .remove = stmmac_pltfr_remove,
  566. + .driver = {
  567. + .name = "oxnas-dwmac",
  568. + .pm = &stmmac_pltfr_pm_ops,
  569. + .of_match_table = oxnas_dwmac_match,
  570. + },
  571. +};
  572. +module_platform_driver(oxnas_dwmac_driver);
  573. +
  574. +MODULE_AUTHOR("Neil Armstrong <[email protected]>");
  575. +MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
  576. +MODULE_LICENSE("GPL v2");
  577. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
  578. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
  579. @@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_pri
  580. int ret;
  581. struct device *dev = &bsp_priv->pdev->dev;
  582. + ret = gmac_clk_enable(bsp_priv, true);
  583. + if (ret)
  584. + return ret;
  585. +
  586. /*rmii or rgmii*/
  587. if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
  588. dev_info(dev, "init for RGMII\n");
  589. @@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_pri
  590. if (ret)
  591. return ret;
  592. - ret = gmac_clk_enable(bsp_priv, true);
  593. - if (ret)
  594. - return ret;
  595. -
  596. pm_runtime_enable(dev);
  597. pm_runtime_get_sync(dev);
  598. @@ -901,44 +901,6 @@ static void rk_gmac_powerdown(struct rk_
  599. gmac_clk_enable(gmac, false);
  600. }
  601. -static int rk_gmac_init(struct platform_device *pdev, void *priv)
  602. -{
  603. - struct rk_priv_data *bsp_priv = priv;
  604. -
  605. - return rk_gmac_powerup(bsp_priv);
  606. -}
  607. -
  608. -static void rk_gmac_exit(struct platform_device *pdev, void *priv)
  609. -{
  610. - struct rk_priv_data *bsp_priv = priv;
  611. -
  612. - rk_gmac_powerdown(bsp_priv);
  613. -}
  614. -
  615. -static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
  616. -{
  617. - struct rk_priv_data *bsp_priv = priv;
  618. -
  619. - /* Keep the PHY up if we use Wake-on-Lan. */
  620. - if (device_may_wakeup(&pdev->dev))
  621. - return;
  622. -
  623. - rk_gmac_powerdown(bsp_priv);
  624. - bsp_priv->suspended = true;
  625. -}
  626. -
  627. -static void rk_gmac_resume(struct platform_device *pdev, void *priv)
  628. -{
  629. - struct rk_priv_data *bsp_priv = priv;
  630. -
  631. - /* The PHY was up for Wake-on-Lan. */
  632. - if (!bsp_priv->suspended)
  633. - return;
  634. -
  635. - rk_gmac_powerup(bsp_priv);
  636. - bsp_priv->suspended = false;
  637. -}
  638. -
  639. static void rk_fix_speed(void *priv, unsigned int speed)
  640. {
  641. struct rk_priv_data *bsp_priv = priv;
  642. @@ -974,11 +936,7 @@ static int rk_gmac_probe(struct platform
  643. return PTR_ERR(plat_dat);
  644. plat_dat->has_gmac = true;
  645. - plat_dat->init = rk_gmac_init;
  646. - plat_dat->exit = rk_gmac_exit;
  647. plat_dat->fix_mac_speed = rk_fix_speed;
  648. - plat_dat->suspend = rk_gmac_suspend;
  649. - plat_dat->resume = rk_gmac_resume;
  650. plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
  651. if (IS_ERR(plat_dat->bsp_priv)) {
  652. @@ -986,24 +944,65 @@ static int rk_gmac_probe(struct platform
  653. goto err_remove_config_dt;
  654. }
  655. - ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
  656. + ret = rk_gmac_powerup(plat_dat->bsp_priv);
  657. if (ret)
  658. goto err_remove_config_dt;
  659. ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
  660. if (ret)
  661. - goto err_gmac_exit;
  662. + goto err_gmac_powerdown;
  663. return 0;
  664. -err_gmac_exit:
  665. - rk_gmac_exit(pdev, plat_dat->bsp_priv);
  666. +err_gmac_powerdown:
  667. + rk_gmac_powerdown(plat_dat->bsp_priv);
  668. err_remove_config_dt:
  669. stmmac_remove_config_dt(pdev, plat_dat);
  670. return ret;
  671. }
  672. +static int rk_gmac_remove(struct platform_device *pdev)
  673. +{
  674. + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
  675. + int ret = stmmac_dvr_remove(&pdev->dev);
  676. +
  677. + rk_gmac_powerdown(bsp_priv);
  678. +
  679. + return ret;
  680. +}
  681. +
  682. +#ifdef CONFIG_PM_SLEEP
  683. +static int rk_gmac_suspend(struct device *dev)
  684. +{
  685. + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
  686. + int ret = stmmac_suspend(dev);
  687. +
  688. + /* Keep the PHY up if we use Wake-on-Lan. */
  689. + if (!device_may_wakeup(dev)) {
  690. + rk_gmac_powerdown(bsp_priv);
  691. + bsp_priv->suspended = true;
  692. + }
  693. +
  694. + return ret;
  695. +}
  696. +
  697. +static int rk_gmac_resume(struct device *dev)
  698. +{
  699. + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
  700. +
  701. + /* The PHY was up for Wake-on-Lan. */
  702. + if (bsp_priv->suspended) {
  703. + rk_gmac_powerup(bsp_priv);
  704. + bsp_priv->suspended = false;
  705. + }
  706. +
  707. + return stmmac_resume(dev);
  708. +}
  709. +#endif /* CONFIG_PM_SLEEP */
  710. +
  711. +static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
  712. +
  713. static const struct of_device_id rk_gmac_dwmac_match[] = {
  714. { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
  715. { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
  716. @@ -1016,10 +1015,10 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_ma
  717. static struct platform_driver rk_gmac_dwmac_driver = {
  718. .probe = rk_gmac_probe,
  719. - .remove = stmmac_pltfr_remove,
  720. + .remove = rk_gmac_remove,
  721. .driver = {
  722. .name = "rk_gmac-dwmac",
  723. - .pm = &stmmac_pltfr_pm_ops,
  724. + .pm = &rk_gmac_pm_ops,
  725. .of_match_table = rk_gmac_dwmac_match,
  726. },
  727. };
  728. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
  729. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
  730. @@ -380,8 +380,8 @@ static int socfpga_dwmac_resume(struct d
  731. * control register 0, and can be modified by the phy driver
  732. * framework.
  733. */
  734. - if (priv->phydev)
  735. - phy_resume(priv->phydev);
  736. + if (ndev->phydev)
  737. + phy_resume(ndev->phydev);
  738. return stmmac_resume(dev);
  739. }
  740. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
  741. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
  742. @@ -126,8 +126,8 @@ struct sti_dwmac {
  743. struct clk *clk; /* PHY clock */
  744. u32 ctrl_reg; /* GMAC glue-logic control register */
  745. int clk_sel_reg; /* GMAC ext clk selection register */
  746. - struct device *dev;
  747. struct regmap *regmap;
  748. + bool gmac_en;
  749. u32 speed;
  750. void (*fix_retime_src)(void *priv, unsigned int speed);
  751. };
  752. @@ -191,7 +191,7 @@ static void stih4xx_fix_retime_src(void
  753. }
  754. }
  755. - if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq)
  756. + if (src == TX_RETIME_SRC_CLKGEN && freq)
  757. clk_set_rate(dwmac->clk, freq);
  758. regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
  759. @@ -222,26 +222,20 @@ static void stid127_fix_retime_src(void
  760. freq = DWMAC_2_5MHZ;
  761. }
  762. - if (dwmac->clk && freq)
  763. + if (freq)
  764. clk_set_rate(dwmac->clk, freq);
  765. regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
  766. }
  767. -static int sti_dwmac_init(struct platform_device *pdev, void *priv)
  768. +static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
  769. {
  770. - struct sti_dwmac *dwmac = priv;
  771. struct regmap *regmap = dwmac->regmap;
  772. int iface = dwmac->interface;
  773. - struct device *dev = dwmac->dev;
  774. - struct device_node *np = dev->of_node;
  775. u32 reg = dwmac->ctrl_reg;
  776. u32 val;
  777. - if (dwmac->clk)
  778. - clk_prepare_enable(dwmac->clk);
  779. -
  780. - if (of_property_read_bool(np, "st,gmac_en"))
  781. + if (dwmac->gmac_en)
  782. regmap_update_bits(regmap, reg, EN_MASK, EN);
  783. regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
  784. @@ -249,18 +243,11 @@ static int sti_dwmac_init(struct platfor
  785. val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
  786. regmap_update_bits(regmap, reg, ENMII_MASK, val);
  787. - dwmac->fix_retime_src(priv, dwmac->speed);
  788. + dwmac->fix_retime_src(dwmac, dwmac->speed);
  789. return 0;
  790. }
  791. -static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
  792. -{
  793. - struct sti_dwmac *dwmac = priv;
  794. -
  795. - if (dwmac->clk)
  796. - clk_disable_unprepare(dwmac->clk);
  797. -}
  798. static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
  799. struct platform_device *pdev)
  800. {
  801. @@ -270,9 +257,6 @@ static int sti_dwmac_parse_data(struct s
  802. struct regmap *regmap;
  803. int err;
  804. - if (!np)
  805. - return -EINVAL;
  806. -
  807. /* clk selection from extra syscfg register */
  808. dwmac->clk_sel_reg = -ENXIO;
  809. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
  810. @@ -289,9 +273,9 @@ static int sti_dwmac_parse_data(struct s
  811. return err;
  812. }
  813. - dwmac->dev = dev;
  814. dwmac->interface = of_get_phy_mode(np);
  815. dwmac->regmap = regmap;
  816. + dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
  817. dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
  818. dwmac->tx_retime_src = TX_RETIME_SRC_NA;
  819. dwmac->speed = SPEED_100;
  820. @@ -359,28 +343,65 @@ static int sti_dwmac_probe(struct platfo
  821. dwmac->fix_retime_src = data->fix_retime_src;
  822. plat_dat->bsp_priv = dwmac;
  823. - plat_dat->init = sti_dwmac_init;
  824. - plat_dat->exit = sti_dwmac_exit;
  825. plat_dat->fix_mac_speed = data->fix_retime_src;
  826. - ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
  827. + ret = clk_prepare_enable(dwmac->clk);
  828. if (ret)
  829. goto err_remove_config_dt;
  830. + ret = sti_dwmac_set_mode(dwmac);
  831. + if (ret)
  832. + goto disable_clk;
  833. +
  834. ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
  835. if (ret)
  836. - goto err_dwmac_exit;
  837. + goto disable_clk;
  838. return 0;
  839. -err_dwmac_exit:
  840. - sti_dwmac_exit(pdev, plat_dat->bsp_priv);
  841. +disable_clk:
  842. + clk_disable_unprepare(dwmac->clk);
  843. err_remove_config_dt:
  844. stmmac_remove_config_dt(pdev, plat_dat);
  845. return ret;
  846. }
  847. +static int sti_dwmac_remove(struct platform_device *pdev)
  848. +{
  849. + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
  850. + int ret = stmmac_dvr_remove(&pdev->dev);
  851. +
  852. + clk_disable_unprepare(dwmac->clk);
  853. +
  854. + return ret;
  855. +}
  856. +
  857. +#ifdef CONFIG_PM_SLEEP
  858. +static int sti_dwmac_suspend(struct device *dev)
  859. +{
  860. + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
  861. + int ret = stmmac_suspend(dev);
  862. +
  863. + clk_disable_unprepare(dwmac->clk);
  864. +
  865. + return ret;
  866. +}
  867. +
  868. +static int sti_dwmac_resume(struct device *dev)
  869. +{
  870. + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
  871. +
  872. + clk_prepare_enable(dwmac->clk);
  873. + sti_dwmac_set_mode(dwmac);
  874. +
  875. + return stmmac_resume(dev);
  876. +}
  877. +#endif /* CONFIG_PM_SLEEP */
  878. +
  879. +static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
  880. + sti_dwmac_resume);
  881. +
  882. static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
  883. .fix_retime_src = stih4xx_fix_retime_src,
  884. };
  885. @@ -400,10 +421,10 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match)
  886. static struct platform_driver sti_dwmac_driver = {
  887. .probe = sti_dwmac_probe,
  888. - .remove = stmmac_pltfr_remove,
  889. + .remove = sti_dwmac_remove,
  890. .driver = {
  891. .name = "sti-dwmac",
  892. - .pm = &stmmac_pltfr_pm_ops,
  893. + .pm = &sti_dwmac_pm_ops,
  894. .of_match_table = sti_dwmac_match,
  895. },
  896. };
  897. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
  898. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
  899. @@ -225,7 +225,7 @@ enum rx_tx_priority_ratio {
  900. #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
  901. #define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
  902. -#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
  903. +#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
  904. #define DMA_BUS_MODE_RPBL_SHIFT 17
  905. #define DMA_BUS_MODE_USP 0x00800000
  906. #define DMA_BUS_MODE_MAXPBL 0x01000000
  907. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
  908. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
  909. @@ -538,6 +538,12 @@ struct mac_device_info *dwmac1000_setup(
  910. mac->link.speed = GMAC_CONTROL_FES;
  911. mac->mii.addr = GMAC_MII_ADDR;
  912. mac->mii.data = GMAC_MII_DATA;
  913. + mac->mii.addr_shift = 11;
  914. + mac->mii.addr_mask = 0x0000F800;
  915. + mac->mii.reg_shift = 6;
  916. + mac->mii.reg_mask = 0x000007C0;
  917. + mac->mii.clk_csr_shift = 2;
  918. + mac->mii.clk_csr_mask = GENMASK(5, 2);
  919. /* Get and dump the chip ID */
  920. *synopsys_id = stmmac_get_synopsys_id(hwid);
  921. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
  922. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
  923. @@ -84,37 +84,39 @@ static void dwmac1000_dma_axi(void __iom
  924. writel(value, ioaddr + DMA_AXI_BUS_MODE);
  925. }
  926. -static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
  927. - int aal, u32 dma_tx, u32 dma_rx, int atds)
  928. +static void dwmac1000_dma_init(void __iomem *ioaddr,
  929. + struct stmmac_dma_cfg *dma_cfg,
  930. + u32 dma_tx, u32 dma_rx, int atds)
  931. {
  932. u32 value = readl(ioaddr + DMA_BUS_MODE);
  933. + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
  934. + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
  935. /*
  936. * Set the DMA PBL (Programmable Burst Length) mode.
  937. *
  938. * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
  939. * post 3.5 mode bit acts as 8*PBL.
  940. - *
  941. - * This configuration doesn't take care about the Separate PBL
  942. - * so only the bits: 13-8 are programmed with the PBL passed from the
  943. - * platform.
  944. */
  945. - value |= DMA_BUS_MODE_MAXPBL;
  946. - value &= ~DMA_BUS_MODE_PBL_MASK;
  947. - value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
  948. + if (dma_cfg->pblx8)
  949. + value |= DMA_BUS_MODE_MAXPBL;
  950. + value |= DMA_BUS_MODE_USP;
  951. + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
  952. + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
  953. + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
  954. /* Set the Fixed burst mode */
  955. - if (fb)
  956. + if (dma_cfg->fixed_burst)
  957. value |= DMA_BUS_MODE_FB;
  958. /* Mixed Burst has no effect when fb is set */
  959. - if (mb)
  960. + if (dma_cfg->mixed_burst)
  961. value |= DMA_BUS_MODE_MB;
  962. if (atds)
  963. value |= DMA_BUS_MODE_ATDS;
  964. - if (aal)
  965. + if (dma_cfg->aal)
  966. value |= DMA_BUS_MODE_AAL;
  967. writel(value, ioaddr + DMA_BUS_MODE);
  968. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
  969. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
  970. @@ -192,6 +192,13 @@ struct mac_device_info *dwmac100_setup(v
  971. mac->link.speed = 0;
  972. mac->mii.addr = MAC_MII_ADDR;
  973. mac->mii.data = MAC_MII_DATA;
  974. + mac->mii.addr_shift = 11;
  975. + mac->mii.addr_mask = 0x0000F800;
  976. + mac->mii.reg_shift = 6;
  977. + mac->mii.reg_mask = 0x000007C0;
  978. + mac->mii.clk_csr_shift = 2;
  979. + mac->mii.clk_csr_mask = GENMASK(5, 2);
  980. +
  981. /* Synopsys Id is not available on old chips */
  982. *synopsys_id = 0;
  983. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
  984. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
  985. @@ -32,11 +32,12 @@
  986. #include "dwmac100.h"
  987. #include "dwmac_dma.h"
  988. -static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
  989. - int aal, u32 dma_tx, u32 dma_rx, int atds)
  990. +static void dwmac100_dma_init(void __iomem *ioaddr,
  991. + struct stmmac_dma_cfg *dma_cfg,
  992. + u32 dma_tx, u32 dma_rx, int atds)
  993. {
  994. /* Enable Application Access by writing to DMA CSR0 */
  995. - writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
  996. + writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
  997. ioaddr + DMA_BUS_MODE);
  998. /* Mask interrupts by writing to CSR7 */
  999. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
  1000. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
  1001. @@ -155,8 +155,11 @@ enum power_event {
  1002. #define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
  1003. #define MTL_OP_MODE_RSF BIT(5)
  1004. +#define MTL_OP_MODE_TXQEN BIT(3)
  1005. #define MTL_OP_MODE_TSF BIT(1)
  1006. +#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
  1007. +
  1008. #define MTL_OP_MODE_TTC_MASK 0x70
  1009. #define MTL_OP_MODE_TTC_SHIFT 4
  1010. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
  1011. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
  1012. @@ -430,6 +430,12 @@ struct mac_device_info *dwmac4_setup(voi
  1013. mac->link.speed = GMAC_CONFIG_FES;
  1014. mac->mii.addr = GMAC_MDIO_ADDR;
  1015. mac->mii.data = GMAC_MDIO_DATA;
  1016. + mac->mii.addr_shift = 21;
  1017. + mac->mii.addr_mask = GENMASK(25, 21);
  1018. + mac->mii.reg_shift = 16;
  1019. + mac->mii.reg_mask = GENMASK(20, 16);
  1020. + mac->mii.clk_csr_shift = 8;
  1021. + mac->mii.clk_csr_mask = GENMASK(11, 8);
  1022. /* Get and dump the chip ID */
  1023. *synopsys_id = stmmac_get_synopsys_id(hwid);
  1024. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
  1025. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
  1026. @@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(v
  1027. unsigned int tdes3;
  1028. int ret = tx_done;
  1029. - tdes3 = p->des3;
  1030. + tdes3 = le32_to_cpu(p->des3);
  1031. /* Get tx owner first */
  1032. if (unlikely(tdes3 & TDES3_OWN))
  1033. @@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(v
  1034. struct dma_desc *p)
  1035. {
  1036. struct net_device_stats *stats = (struct net_device_stats *)data;
  1037. - unsigned int rdes1 = p->des1;
  1038. - unsigned int rdes2 = p->des2;
  1039. - unsigned int rdes3 = p->des3;
  1040. + unsigned int rdes1 = le32_to_cpu(p->des1);
  1041. + unsigned int rdes2 = le32_to_cpu(p->des2);
  1042. + unsigned int rdes3 = le32_to_cpu(p->des3);
  1043. int message_type;
  1044. int ret = good_frame;
  1045. @@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(v
  1046. static int dwmac4_rd_get_tx_len(struct dma_desc *p)
  1047. {
  1048. - return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
  1049. + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
  1050. }
  1051. static int dwmac4_get_tx_owner(struct dma_desc *p)
  1052. {
  1053. - return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
  1054. + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
  1055. }
  1056. static void dwmac4_set_tx_owner(struct dma_desc *p)
  1057. {
  1058. - p->des3 |= TDES3_OWN;
  1059. + p->des3 |= cpu_to_le32(TDES3_OWN);
  1060. }
  1061. static void dwmac4_set_rx_owner(struct dma_desc *p)
  1062. {
  1063. - p->des3 |= RDES3_OWN;
  1064. + p->des3 |= cpu_to_le32(RDES3_OWN);
  1065. }
  1066. static int dwmac4_get_tx_ls(struct dma_desc *p)
  1067. {
  1068. - return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
  1069. + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
  1070. + >> TDES3_LAST_DESCRIPTOR_SHIFT;
  1071. }
  1072. static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
  1073. {
  1074. - return (p->des3 & RDES3_PACKET_SIZE_MASK);
  1075. + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
  1076. }
  1077. static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
  1078. {
  1079. - p->des2 |= TDES2_TIMESTAMP_ENABLE;
  1080. + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
  1081. }
  1082. static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
  1083. {
  1084. /* Context type from W/B descriptor must be zero */
  1085. - if (p->des3 & TDES3_CONTEXT_TYPE)
  1086. + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
  1087. return -EINVAL;
  1088. /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
  1089. - if (p->des3 & TDES3_TIMESTAMP_STATUS)
  1090. + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
  1091. return 0;
  1092. return 1;
  1093. @@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(v
  1094. struct dma_desc *p = (struct dma_desc *)desc;
  1095. u64 ns;
  1096. - ns = p->des0;
  1097. + ns = le32_to_cpu(p->des0);
  1098. /* convert high/sec time stamp value to nanosecond */
  1099. - ns += p->des1 * 1000000000ULL;
  1100. + ns += le32_to_cpu(p->des1) * 1000000000ULL;
  1101. return ns;
  1102. }
  1103. @@ -264,7 +265,7 @@ static int dwmac4_wrback_get_rx_timestam
  1104. /* Get the status from normal w/b descriptor */
  1105. if (likely(p->des3 & TDES3_RS1V)) {
  1106. - if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
  1107. + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
  1108. int i = 0;
  1109. /* Check if timestamp is OK from context descriptor */
  1110. @@ -287,10 +288,10 @@ exit:
  1111. static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
  1112. int mode, int end)
  1113. {
  1114. - p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
  1115. + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
  1116. if (!disable_rx_ic)
  1117. - p->des3 |= RDES3_INT_ON_COMPLETION_EN;
  1118. + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
  1119. }
  1120. static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
  1121. @@ -305,9 +306,9 @@ static void dwmac4_rd_prepare_tx_desc(st
  1122. bool csum_flag, int mode, bool tx_own,
  1123. bool ls)
  1124. {
  1125. - unsigned int tdes3 = p->des3;
  1126. + unsigned int tdes3 = le32_to_cpu(p->des3);
  1127. - p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
  1128. + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
  1129. if (is_fs)
  1130. tdes3 |= TDES3_FIRST_DESCRIPTOR;
  1131. @@ -333,9 +334,9 @@ static void dwmac4_rd_prepare_tx_desc(st
  1132. * descriptors for the same frame has to be set before, to
  1133. * avoid race condition.
  1134. */
  1135. - wmb();
  1136. + dma_wmb();
  1137. - p->des3 = tdes3;
  1138. + p->des3 = cpu_to_le32(tdes3);
  1139. }
  1140. static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
  1141. @@ -343,14 +344,14 @@ static void dwmac4_rd_prepare_tso_tx_des
  1142. bool ls, unsigned int tcphdrlen,
  1143. unsigned int tcppayloadlen)
  1144. {
  1145. - unsigned int tdes3 = p->des3;
  1146. + unsigned int tdes3 = le32_to_cpu(p->des3);
  1147. if (len1)
  1148. - p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
  1149. + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
  1150. if (len2)
  1151. - p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
  1152. - & TDES2_BUFFER2_SIZE_MASK;
  1153. + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
  1154. + & TDES2_BUFFER2_SIZE_MASK);
  1155. if (is_fs) {
  1156. tdes3 |= TDES3_FIRST_DESCRIPTOR |
  1157. @@ -376,9 +377,9 @@ static void dwmac4_rd_prepare_tso_tx_des
  1158. * descriptors for the same frame has to be set before, to
  1159. * avoid race condition.
  1160. */
  1161. - wmb();
  1162. + dma_wmb();
  1163. - p->des3 = tdes3;
  1164. + p->des3 = cpu_to_le32(tdes3);
  1165. }
  1166. static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
  1167. @@ -389,7 +390,7 @@ static void dwmac4_release_tx_desc(struc
  1168. static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
  1169. {
  1170. - p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
  1171. + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
  1172. }
  1173. static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
  1174. @@ -402,7 +403,8 @@ static void dwmac4_display_ring(void *he
  1175. for (i = 0; i < size; i++) {
  1176. pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  1177. i, (unsigned int)virt_to_phys(p),
  1178. - p->des0, p->des1, p->des2, p->des3);
  1179. + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
  1180. + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
  1181. p++;
  1182. }
  1183. }
  1184. @@ -411,8 +413,8 @@ static void dwmac4_set_mss_ctxt(struct d
  1185. {
  1186. p->des0 = 0;
  1187. p->des1 = 0;
  1188. - p->des2 = mss;
  1189. - p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
  1190. + p->des2 = cpu_to_le32(mss);
  1191. + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
  1192. }
  1193. const struct stmmac_desc_ops dwmac4_desc_ops = {
  1194. --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
  1195. +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
  1196. @@ -71,25 +71,29 @@ static void dwmac4_dma_axi(void __iomem
  1197. writel(value, ioaddr + DMA_SYS_BUS_MODE);
  1198. }
  1199. -static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
  1200. +static void dwmac4_dma_init_channel(void __iomem *ioaddr,
  1201. + struct stmmac_dma_cfg *dma_cfg,
  1202. u32 dma_tx_phy, u32 dma_rx_phy,
  1203. u32 channel)
  1204. {
  1205. u32 value;
  1206. + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
  1207. + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
  1208. /* set PBL for each channels. Currently we affect same configuration
  1209. * on each channel
  1210. */
  1211. value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
  1212. - value = value | DMA_BUS_MODE_PBL;
  1213. + if (dma_cfg->pblx8)
  1214. + value = value | DMA_BUS_MODE_PBL;
  1215. writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
  1216. value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
  1217. - value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
  1218. + value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
  1219. writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
  1220. value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
  1221. - value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
  1222. + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
  1223. writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
  1224. /* Mask interrupts by writing to CSR7 */
  1225. @@ -99,27 +103,28 @@ static void dwmac4_dma_init_channel(void
  1226. writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
  1227. }
  1228. -static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
  1229. - int aal, u32 dma_tx, u32 dma_rx, int atds)
  1230. +static void dwmac4_dma_init(void __iomem *ioaddr,
  1231. + struct stmmac_dma_cfg *dma_cfg,
  1232. + u32 dma_tx, u32 dma_rx, int atds)
  1233. {
  1234. u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
  1235. int i;
  1236. /* Set the Fixed burst mode */
  1237. - if (fb)
  1238. + if (dma_cfg->fixed_burst)
  1239. value |= DMA_SYS_BUS_FB;
  1240. /* Mixed Burst has no effect when fb is set */
  1241. - if (mb)
  1242. + if (dma_cfg->mixed_burst)
  1243. value |= DMA_SYS_BUS_MB;
  1244. - if (aal)
  1245. + if (dma_cfg->aal)
  1246. value |= DMA_SYS_BUS_AAL;
  1247. writel(value, ioaddr + DMA_SYS_BUS_MODE);
  1248. for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
  1249. - dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
  1250. + dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
  1251. }
  1252. static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
  1253. @@ -215,7 +220,17 @@ static void dwmac4_dma_chan_op_mode(void
  1254. else
  1255. mtl_tx_op |= MTL_OP_MODE_TTC_512;
  1256. }
  1257. -
  1258. + /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
  1259. + * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
  1260. + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
  1261. + * with reset values: TXQEN off, TQS 256 bytes.
  1262. + *
  1263. + * Write the bits in both cases, since it will have no effect when RO.
  1264. + * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
  1265. + * be RO, however, writing the whole TQS field will result in a value
  1266. + * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
  1267. + */
  1268. + mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
  1269. writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
  1270. mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
  1271. --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
  1272. +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
  1273. @@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void *
  1274. struct dma_desc *p, void __iomem *ioaddr)
  1275. {
  1276. struct net_device_stats *stats = (struct net_device_stats *)data;
  1277. - unsigned int tdes0 = p->des0;
  1278. + unsigned int tdes0 = le32_to_cpu(p->des0);
  1279. int ret = tx_done;
  1280. /* Get tx owner first */
  1281. @@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void *
  1282. static int enh_desc_get_tx_len(struct dma_desc *p)
  1283. {
  1284. - return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
  1285. + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
  1286. }
  1287. static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
  1288. @@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_er
  1289. static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
  1290. struct dma_extended_desc *p)
  1291. {
  1292. - unsigned int rdes0 = p->basic.des0;
  1293. - unsigned int rdes4 = p->des4;
  1294. + unsigned int rdes0 = le32_to_cpu(p->basic.des0);
  1295. + unsigned int rdes4 = le32_to_cpu(p->des4);
  1296. if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
  1297. int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
  1298. @@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void *
  1299. struct dma_desc *p)
  1300. {
  1301. struct net_device_stats *stats = (struct net_device_stats *)data;
  1302. - unsigned int rdes0 = p->des0;
  1303. + unsigned int rdes0 = le32_to_cpu(p->des0);
  1304. int ret = good_frame;
  1305. if (unlikely(rdes0 & RDES0_OWN))
  1306. @@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void *
  1307. static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
  1308. int mode, int end)
  1309. {
  1310. - p->des0 |= RDES0_OWN;
  1311. - p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
  1312. + p->des0 |= cpu_to_le32(RDES0_OWN);
  1313. + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
  1314. if (mode == STMMAC_CHAIN_MODE)
  1315. ehn_desc_rx_set_on_chain(p);
  1316. @@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct
  1317. ehn_desc_rx_set_on_ring(p, end);
  1318. if (disable_rx_ic)
  1319. - p->des1 |= ERDES1_DISABLE_IC;
  1320. + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
  1321. }
  1322. static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
  1323. {
  1324. - p->des0 &= ~ETDES0_OWN;
  1325. + p->des0 &= cpu_to_le32(~ETDES0_OWN);
  1326. if (mode == STMMAC_CHAIN_MODE)
  1327. enh_desc_end_tx_desc_on_chain(p);
  1328. else
  1329. @@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct
  1330. static int enh_desc_get_tx_owner(struct dma_desc *p)
  1331. {
  1332. - return (p->des0 & ETDES0_OWN) >> 31;
  1333. + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
  1334. }
  1335. static void enh_desc_set_tx_owner(struct dma_desc *p)
  1336. {
  1337. - p->des0 |= ETDES0_OWN;
  1338. + p->des0 |= cpu_to_le32(ETDES0_OWN);
  1339. }
  1340. static void enh_desc_set_rx_owner(struct dma_desc *p)
  1341. {
  1342. - p->des0 |= RDES0_OWN;
  1343. + p->des0 |= cpu_to_le32(RDES0_OWN);
  1344. }
  1345. static int enh_desc_get_tx_ls(struct dma_desc *p)
  1346. {
  1347. - return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
  1348. + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
  1349. }
  1350. static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
  1351. {
  1352. - int ter = (p->des0 & ETDES0_END_RING) >> 21;
  1353. + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
  1354. memset(p, 0, offsetof(struct dma_desc, des2));
  1355. if (mode == STMMAC_CHAIN_MODE)
  1356. @@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(str
  1357. bool csum_flag, int mode, bool tx_own,
  1358. bool ls)
  1359. {
  1360. - unsigned int tdes0 = p->des0;
  1361. + unsigned int tdes0 = le32_to_cpu(p->des0);
  1362. if (mode == STMMAC_CHAIN_MODE)
  1363. enh_set_tx_desc_len_on_chain(p, len);
  1364. @@ -350,14 +350,14 @@ static void enh_desc_prepare_tx_desc(str
  1365. * descriptors for the same frame has to be set before, to
  1366. * avoid race condition.
  1367. */
  1368. - wmb();
  1369. + dma_wmb();
  1370. - p->des0 = tdes0;
  1371. + p->des0 = cpu_to_le32(tdes0);
  1372. }
  1373. static void enh_desc_set_tx_ic(struct dma_desc *p)
  1374. {
  1375. - p->des0 |= ETDES0_INTERRUPT;
  1376. + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
  1377. }
  1378. static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
  1379. @@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(str
  1380. if (rx_coe_type == STMMAC_RX_COE_TYPE1)
  1381. csum = 2;
  1382. - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
  1383. - csum);
  1384. + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
  1385. + >> RDES0_FRAME_LEN_SHIFT) - csum);
  1386. }
  1387. static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
  1388. {
  1389. - p->des0 |= ETDES0_TIME_STAMP_ENABLE;
  1390. + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
  1391. }
  1392. static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
  1393. {
  1394. - return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
  1395. + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
  1396. }
  1397. static u64 enh_desc_get_timestamp(void *desc, u32 ats)
  1398. @@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void *
  1399. if (ats) {
  1400. struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
  1401. - ns = p->des6;
  1402. + ns = le32_to_cpu(p->des6);
  1403. /* convert high/sec time stamp value to nanosecond */
  1404. - ns += p->des7 * 1000000000ULL;
  1405. + ns += le32_to_cpu(p->des7) * 1000000000ULL;
  1406. } else {
  1407. struct dma_desc *p = (struct dma_desc *)desc;
  1408. - ns = p->des2;
  1409. - ns += p->des3 * 1000000000ULL;
  1410. + ns = le32_to_cpu(p->des2);
  1411. + ns += le32_to_cpu(p->des3) * 1000000000ULL;
  1412. }
  1413. return ns;
  1414. @@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_sta
  1415. {
  1416. if (ats) {
  1417. struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
  1418. - return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
  1419. + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
  1420. } else {
  1421. struct dma_desc *p = (struct dma_desc *)desc;
  1422. - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
  1423. + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
  1424. + (le32_to_cpu(p->des3) == 0xffffffff))
  1425. /* timestamp is corrupted, hence don't store it */
  1426. return 0;
  1427. else
  1428. --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
  1429. +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
  1430. @@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *dat
  1431. struct dma_desc *p, void __iomem *ioaddr)
  1432. {
  1433. struct net_device_stats *stats = (struct net_device_stats *)data;
  1434. - unsigned int tdes0 = p->des0;
  1435. - unsigned int tdes1 = p->des1;
  1436. + unsigned int tdes0 = le32_to_cpu(p->des0);
  1437. + unsigned int tdes1 = le32_to_cpu(p->des1);
  1438. int ret = tx_done;
  1439. /* Get tx owner first */
  1440. @@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *dat
  1441. static int ndesc_get_tx_len(struct dma_desc *p)
  1442. {
  1443. - return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
  1444. + return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
  1445. }
  1446. /* This function verifies if each incoming frame has some errors
  1447. @@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *dat
  1448. struct dma_desc *p)
  1449. {
  1450. int ret = good_frame;
  1451. - unsigned int rdes0 = p->des0;
  1452. + unsigned int rdes0 = le32_to_cpu(p->des0);
  1453. struct net_device_stats *stats = (struct net_device_stats *)data;
  1454. if (unlikely(rdes0 & RDES0_OWN))
  1455. @@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *dat
  1456. static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
  1457. int end)
  1458. {
  1459. - p->des0 |= RDES0_OWN;
  1460. - p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
  1461. + p->des0 |= cpu_to_le32(RDES0_OWN);
  1462. + p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
  1463. if (mode == STMMAC_CHAIN_MODE)
  1464. ndesc_rx_set_on_chain(p, end);
  1465. @@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dm
  1466. ndesc_rx_set_on_ring(p, end);
  1467. if (disable_rx_ic)
  1468. - p->des1 |= RDES1_DISABLE_IC;
  1469. + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
  1470. }
  1471. static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
  1472. {
  1473. - p->des0 &= ~TDES0_OWN;
  1474. + p->des0 &= cpu_to_le32(~TDES0_OWN);
  1475. if (mode == STMMAC_CHAIN_MODE)
  1476. ndesc_tx_set_on_chain(p);
  1477. else
  1478. @@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dm
  1479. static int ndesc_get_tx_owner(struct dma_desc *p)
  1480. {
  1481. - return (p->des0 & TDES0_OWN) >> 31;
  1482. + return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
  1483. }
  1484. static void ndesc_set_tx_owner(struct dma_desc *p)
  1485. {
  1486. - p->des0 |= TDES0_OWN;
  1487. + p->des0 |= cpu_to_le32(TDES0_OWN);
  1488. }
  1489. static void ndesc_set_rx_owner(struct dma_desc *p)
  1490. {
  1491. - p->des0 |= RDES0_OWN;
  1492. + p->des0 |= cpu_to_le32(RDES0_OWN);
  1493. }
  1494. static int ndesc_get_tx_ls(struct dma_desc *p)
  1495. {
  1496. - return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
  1497. + return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
  1498. }
  1499. static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
  1500. {
  1501. - int ter = (p->des1 & TDES1_END_RING) >> 25;
  1502. + int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
  1503. memset(p, 0, offsetof(struct dma_desc, des2));
  1504. if (mode == STMMAC_CHAIN_MODE)
  1505. @@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct
  1506. bool csum_flag, int mode, bool tx_own,
  1507. bool ls)
  1508. {
  1509. - unsigned int tdes1 = p->des1;
  1510. + unsigned int tdes1 = le32_to_cpu(p->des1);
  1511. if (is_fs)
  1512. tdes1 |= TDES1_FIRST_SEGMENT;
  1513. @@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct
  1514. if (ls)
  1515. tdes1 |= TDES1_LAST_SEGMENT;
  1516. - p->des1 = tdes1;
  1517. + p->des1 = cpu_to_le32(tdes1);
  1518. if (mode == STMMAC_CHAIN_MODE)
  1519. norm_set_tx_desc_len_on_chain(p, len);
  1520. @@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct
  1521. norm_set_tx_desc_len_on_ring(p, len);
  1522. if (tx_own)
  1523. - p->des0 |= TDES0_OWN;
  1524. + p->des0 |= cpu_to_le32(TDES0_OWN);
  1525. }
  1526. static void ndesc_set_tx_ic(struct dma_desc *p)
  1527. {
  1528. - p->des1 |= TDES1_INTERRUPT;
  1529. + p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
  1530. }
  1531. static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
  1532. @@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct
  1533. if (rx_coe_type == STMMAC_RX_COE_TYPE1)
  1534. csum = 2;
  1535. - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
  1536. + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
  1537. + >> RDES0_FRAME_LEN_SHIFT) -
  1538. csum);
  1539. }
  1540. static void ndesc_enable_tx_timestamp(struct dma_desc *p)
  1541. {
  1542. - p->des1 |= TDES1_TIME_STAMP_ENABLE;
  1543. + p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
  1544. }
  1545. static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
  1546. {
  1547. - return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
  1548. + return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
  1549. }
  1550. static u64 ndesc_get_timestamp(void *desc, u32 ats)
  1551. @@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *des
  1552. struct dma_desc *p = (struct dma_desc *)desc;
  1553. u64 ns;
  1554. - ns = p->des2;
  1555. + ns = le32_to_cpu(p->des2);
  1556. /* convert high/sec time stamp value to nanosecond */
  1557. - ns += p->des3 * 1000000000ULL;
  1558. + ns += le32_to_cpu(p->des3) * 1000000000ULL;
  1559. return ns;
  1560. }
  1561. @@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status
  1562. {
  1563. struct dma_desc *p = (struct dma_desc *)desc;
  1564. - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
  1565. + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
  1566. + (le32_to_cpu(p->des3) == 0xffffffff))
  1567. /* timestamp is corrupted, hence don't store it */
  1568. return 0;
  1569. else
  1570. --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
  1571. +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
  1572. @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
  1573. unsigned int entry = priv->cur_tx;
  1574. struct dma_desc *desc;
  1575. unsigned int nopaged_len = skb_headlen(skb);
  1576. - unsigned int bmax, len;
  1577. + unsigned int bmax, len, des2;
  1578. if (priv->extend_desc)
  1579. desc = (struct dma_desc *)(priv->dma_etx + entry);
  1580. @@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, str
  1581. if (nopaged_len > BUF_SIZE_8KiB) {
  1582. - desc->des2 = dma_map_single(priv->device, skb->data,
  1583. - bmax, DMA_TO_DEVICE);
  1584. - if (dma_mapping_error(priv->device, desc->des2))
  1585. + des2 = dma_map_single(priv->device, skb->data, bmax,
  1586. + DMA_TO_DEVICE);
  1587. + desc->des2 = cpu_to_le32(des2);
  1588. + if (dma_mapping_error(priv->device, des2))
  1589. return -1;
  1590. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  1591. + priv->tx_skbuff_dma[entry].buf = des2;
  1592. priv->tx_skbuff_dma[entry].len = bmax;
  1593. priv->tx_skbuff_dma[entry].is_jumbo = true;
  1594. - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
  1595. + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
  1596. priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
  1597. STMMAC_RING_MODE, 0, false);
  1598. priv->tx_skbuff[entry] = NULL;
  1599. @@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, str
  1600. else
  1601. desc = priv->dma_tx + entry;
  1602. - desc->des2 = dma_map_single(priv->device, skb->data + bmax,
  1603. - len, DMA_TO_DEVICE);
  1604. - if (dma_mapping_error(priv->device, desc->des2))
  1605. + des2 = dma_map_single(priv->device, skb->data + bmax, len,
  1606. + DMA_TO_DEVICE);
  1607. + desc->des2 = cpu_to_le32(des2);
  1608. + if (dma_mapping_error(priv->device, des2))
  1609. return -1;
  1610. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  1611. + priv->tx_skbuff_dma[entry].buf = des2;
  1612. priv->tx_skbuff_dma[entry].len = len;
  1613. priv->tx_skbuff_dma[entry].is_jumbo = true;
  1614. - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
  1615. + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
  1616. priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
  1617. STMMAC_RING_MODE, 1, true);
  1618. } else {
  1619. - desc->des2 = dma_map_single(priv->device, skb->data,
  1620. - nopaged_len, DMA_TO_DEVICE);
  1621. - if (dma_mapping_error(priv->device, desc->des2))
  1622. + des2 = dma_map_single(priv->device, skb->data,
  1623. + nopaged_len, DMA_TO_DEVICE);
  1624. + desc->des2 = cpu_to_le32(des2);
  1625. + if (dma_mapping_error(priv->device, des2))
  1626. return -1;
  1627. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  1628. + priv->tx_skbuff_dma[entry].buf = des2;
  1629. priv->tx_skbuff_dma[entry].len = nopaged_len;
  1630. priv->tx_skbuff_dma[entry].is_jumbo = true;
  1631. - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
  1632. + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
  1633. priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
  1634. STMMAC_RING_MODE, 0, true);
  1635. }
  1636. @@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *pr
  1637. /* Fill DES3 in case of RING mode */
  1638. if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
  1639. - p->des3 = p->des2 + BUF_SIZE_8KiB;
  1640. + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
  1641. }
  1642. /* In ring mode we need to fill the desc3 because it is used as buffer */
  1643. static void stmmac_init_desc3(struct dma_desc *p)
  1644. {
  1645. - p->des3 = p->des2 + BUF_SIZE_8KiB;
  1646. + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
  1647. }
  1648. static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
  1649. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
  1650. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
  1651. @@ -64,7 +64,6 @@ struct stmmac_priv {
  1652. dma_addr_t dma_tx_phy;
  1653. int tx_coalesce;
  1654. int hwts_tx_en;
  1655. - spinlock_t tx_lock;
  1656. bool tx_path_in_lpi_mode;
  1657. struct timer_list txtimer;
  1658. bool tso;
  1659. @@ -90,7 +89,6 @@ struct stmmac_priv {
  1660. struct mac_device_info *hw;
  1661. spinlock_t lock;
  1662. - struct phy_device *phydev ____cacheline_aligned_in_smp;
  1663. int oldlink;
  1664. int speed;
  1665. int oldduplex;
  1666. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
  1667. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
  1668. @@ -263,7 +263,7 @@ static void stmmac_ethtool_getdrvinfo(st
  1669. {
  1670. struct stmmac_priv *priv = netdev_priv(dev);
  1671. - if (priv->plat->has_gmac)
  1672. + if (priv->plat->has_gmac || priv->plat->has_gmac4)
  1673. strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
  1674. else
  1675. strlcpy(info->driver, MAC100_ETHTOOL_NAME,
  1676. @@ -272,25 +272,26 @@ static void stmmac_ethtool_getdrvinfo(st
  1677. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  1678. }
  1679. -static int stmmac_ethtool_getsettings(struct net_device *dev,
  1680. - struct ethtool_cmd *cmd)
  1681. +static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
  1682. + struct ethtool_link_ksettings *cmd)
  1683. {
  1684. struct stmmac_priv *priv = netdev_priv(dev);
  1685. - struct phy_device *phy = priv->phydev;
  1686. + struct phy_device *phy = dev->phydev;
  1687. int rc;
  1688. if (priv->hw->pcs & STMMAC_PCS_RGMII ||
  1689. priv->hw->pcs & STMMAC_PCS_SGMII) {
  1690. struct rgmii_adv adv;
  1691. + u32 supported, advertising, lp_advertising;
  1692. if (!priv->xstats.pcs_link) {
  1693. - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  1694. - cmd->duplex = DUPLEX_UNKNOWN;
  1695. + cmd->base.speed = SPEED_UNKNOWN;
  1696. + cmd->base.duplex = DUPLEX_UNKNOWN;
  1697. return 0;
  1698. }
  1699. - cmd->duplex = priv->xstats.pcs_duplex;
  1700. + cmd->base.duplex = priv->xstats.pcs_duplex;
  1701. - ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
  1702. + cmd->base.speed = priv->xstats.pcs_speed;
  1703. /* Get and convert ADV/LP_ADV from the HW AN registers */
  1704. if (!priv->hw->mac->pcs_get_adv_lp)
  1705. @@ -300,45 +301,59 @@ static int stmmac_ethtool_getsettings(st
  1706. /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
  1707. + ethtool_convert_link_mode_to_legacy_u32(
  1708. + &supported, cmd->link_modes.supported);
  1709. + ethtool_convert_link_mode_to_legacy_u32(
  1710. + &advertising, cmd->link_modes.advertising);
  1711. + ethtool_convert_link_mode_to_legacy_u32(
  1712. + &lp_advertising, cmd->link_modes.lp_advertising);
  1713. +
  1714. if (adv.pause & STMMAC_PCS_PAUSE)
  1715. - cmd->advertising |= ADVERTISED_Pause;
  1716. + advertising |= ADVERTISED_Pause;
  1717. if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
  1718. - cmd->advertising |= ADVERTISED_Asym_Pause;
  1719. + advertising |= ADVERTISED_Asym_Pause;
  1720. if (adv.lp_pause & STMMAC_PCS_PAUSE)
  1721. - cmd->lp_advertising |= ADVERTISED_Pause;
  1722. + lp_advertising |= ADVERTISED_Pause;
  1723. if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
  1724. - cmd->lp_advertising |= ADVERTISED_Asym_Pause;
  1725. + lp_advertising |= ADVERTISED_Asym_Pause;
  1726. /* Reg49[3] always set because ANE is always supported */
  1727. - cmd->autoneg = ADVERTISED_Autoneg;
  1728. - cmd->supported |= SUPPORTED_Autoneg;
  1729. - cmd->advertising |= ADVERTISED_Autoneg;
  1730. - cmd->lp_advertising |= ADVERTISED_Autoneg;
  1731. + cmd->base.autoneg = ADVERTISED_Autoneg;
  1732. + supported |= SUPPORTED_Autoneg;
  1733. + advertising |= ADVERTISED_Autoneg;
  1734. + lp_advertising |= ADVERTISED_Autoneg;
  1735. if (adv.duplex) {
  1736. - cmd->supported |= (SUPPORTED_1000baseT_Full |
  1737. - SUPPORTED_100baseT_Full |
  1738. - SUPPORTED_10baseT_Full);
  1739. - cmd->advertising |= (ADVERTISED_1000baseT_Full |
  1740. - ADVERTISED_100baseT_Full |
  1741. - ADVERTISED_10baseT_Full);
  1742. + supported |= (SUPPORTED_1000baseT_Full |
  1743. + SUPPORTED_100baseT_Full |
  1744. + SUPPORTED_10baseT_Full);
  1745. + advertising |= (ADVERTISED_1000baseT_Full |
  1746. + ADVERTISED_100baseT_Full |
  1747. + ADVERTISED_10baseT_Full);
  1748. } else {
  1749. - cmd->supported |= (SUPPORTED_1000baseT_Half |
  1750. - SUPPORTED_100baseT_Half |
  1751. - SUPPORTED_10baseT_Half);
  1752. - cmd->advertising |= (ADVERTISED_1000baseT_Half |
  1753. - ADVERTISED_100baseT_Half |
  1754. - ADVERTISED_10baseT_Half);
  1755. + supported |= (SUPPORTED_1000baseT_Half |
  1756. + SUPPORTED_100baseT_Half |
  1757. + SUPPORTED_10baseT_Half);
  1758. + advertising |= (ADVERTISED_1000baseT_Half |
  1759. + ADVERTISED_100baseT_Half |
  1760. + ADVERTISED_10baseT_Half);
  1761. }
  1762. if (adv.lp_duplex)
  1763. - cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
  1764. - ADVERTISED_100baseT_Full |
  1765. - ADVERTISED_10baseT_Full);
  1766. + lp_advertising |= (ADVERTISED_1000baseT_Full |
  1767. + ADVERTISED_100baseT_Full |
  1768. + ADVERTISED_10baseT_Full);
  1769. else
  1770. - cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
  1771. - ADVERTISED_100baseT_Half |
  1772. - ADVERTISED_10baseT_Half);
  1773. - cmd->port = PORT_OTHER;
  1774. + lp_advertising |= (ADVERTISED_1000baseT_Half |
  1775. + ADVERTISED_100baseT_Half |
  1776. + ADVERTISED_10baseT_Half);
  1777. + cmd->base.port = PORT_OTHER;
  1778. +
  1779. + ethtool_convert_legacy_u32_to_link_mode(
  1780. + cmd->link_modes.supported, supported);
  1781. + ethtool_convert_legacy_u32_to_link_mode(
  1782. + cmd->link_modes.advertising, advertising);
  1783. + ethtool_convert_legacy_u32_to_link_mode(
  1784. + cmd->link_modes.lp_advertising, lp_advertising);
  1785. return 0;
  1786. }
  1787. @@ -353,16 +368,16 @@ static int stmmac_ethtool_getsettings(st
  1788. "link speed / duplex setting\n", dev->name);
  1789. return -EBUSY;
  1790. }
  1791. - cmd->transceiver = XCVR_INTERNAL;
  1792. - rc = phy_ethtool_gset(phy, cmd);
  1793. + rc = phy_ethtool_ksettings_get(phy, cmd);
  1794. return rc;
  1795. }
  1796. -static int stmmac_ethtool_setsettings(struct net_device *dev,
  1797. - struct ethtool_cmd *cmd)
  1798. +static int
  1799. +stmmac_ethtool_set_link_ksettings(struct net_device *dev,
  1800. + const struct ethtool_link_ksettings *cmd)
  1801. {
  1802. struct stmmac_priv *priv = netdev_priv(dev);
  1803. - struct phy_device *phy = priv->phydev;
  1804. + struct phy_device *phy = dev->phydev;
  1805. int rc;
  1806. if (priv->hw->pcs & STMMAC_PCS_RGMII ||
  1807. @@ -370,7 +385,7 @@ static int stmmac_ethtool_setsettings(st
  1808. u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
  1809. /* Only support ANE */
  1810. - if (cmd->autoneg != AUTONEG_ENABLE)
  1811. + if (cmd->base.autoneg != AUTONEG_ENABLE)
  1812. return -EINVAL;
  1813. mask &= (ADVERTISED_1000baseT_Half |
  1814. @@ -391,9 +406,7 @@ static int stmmac_ethtool_setsettings(st
  1815. return 0;
  1816. }
  1817. - spin_lock(&priv->lock);
  1818. - rc = phy_ethtool_sset(phy, cmd);
  1819. - spin_unlock(&priv->lock);
  1820. + rc = phy_ethtool_ksettings_set(phy, cmd);
  1821. return rc;
  1822. }
  1823. @@ -433,7 +446,7 @@ static void stmmac_ethtool_gregs(struct
  1824. memset(reg_space, 0x0, REG_SPACE_SIZE);
  1825. - if (!priv->plat->has_gmac) {
  1826. + if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
  1827. /* MAC registers */
  1828. for (i = 0; i < 12; i++)
  1829. reg_space[i] = readl(priv->ioaddr + (i * 4));
  1830. @@ -471,12 +484,12 @@ stmmac_get_pauseparam(struct net_device
  1831. if (!adv_lp.pause)
  1832. return;
  1833. } else {
  1834. - if (!(priv->phydev->supported & SUPPORTED_Pause) ||
  1835. - !(priv->phydev->supported & SUPPORTED_Asym_Pause))
  1836. + if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
  1837. + !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
  1838. return;
  1839. }
  1840. - pause->autoneg = priv->phydev->autoneg;
  1841. + pause->autoneg = netdev->phydev->autoneg;
  1842. if (priv->flow_ctrl & FLOW_RX)
  1843. pause->rx_pause = 1;
  1844. @@ -490,7 +503,7 @@ stmmac_set_pauseparam(struct net_device
  1845. struct ethtool_pauseparam *pause)
  1846. {
  1847. struct stmmac_priv *priv = netdev_priv(netdev);
  1848. - struct phy_device *phy = priv->phydev;
  1849. + struct phy_device *phy = netdev->phydev;
  1850. int new_pause = FLOW_OFF;
  1851. if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
  1852. @@ -550,7 +563,7 @@ static void stmmac_get_ethtool_stats(str
  1853. }
  1854. }
  1855. if (priv->eee_enabled) {
  1856. - int val = phy_get_eee_err(priv->phydev);
  1857. + int val = phy_get_eee_err(dev->phydev);
  1858. if (val)
  1859. priv->xstats.phy_eee_wakeup_error_n = val;
  1860. }
  1861. @@ -669,7 +682,7 @@ static int stmmac_ethtool_op_get_eee(str
  1862. edata->eee_active = priv->eee_active;
  1863. edata->tx_lpi_timer = priv->tx_lpi_timer;
  1864. - return phy_ethtool_get_eee(priv->phydev, edata);
  1865. + return phy_ethtool_get_eee(dev->phydev, edata);
  1866. }
  1867. static int stmmac_ethtool_op_set_eee(struct net_device *dev,
  1868. @@ -694,7 +707,7 @@ static int stmmac_ethtool_op_set_eee(str
  1869. priv->tx_lpi_timer = edata->tx_lpi_timer;
  1870. }
  1871. - return phy_ethtool_set_eee(priv->phydev, edata);
  1872. + return phy_ethtool_set_eee(dev->phydev, edata);
  1873. }
  1874. static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
  1875. @@ -853,8 +866,6 @@ static int stmmac_set_tunable(struct net
  1876. static const struct ethtool_ops stmmac_ethtool_ops = {
  1877. .begin = stmmac_check_if_running,
  1878. .get_drvinfo = stmmac_ethtool_getdrvinfo,
  1879. - .get_settings = stmmac_ethtool_getsettings,
  1880. - .set_settings = stmmac_ethtool_setsettings,
  1881. .get_msglevel = stmmac_ethtool_getmsglevel,
  1882. .set_msglevel = stmmac_ethtool_setmsglevel,
  1883. .get_regs = stmmac_ethtool_gregs,
  1884. @@ -874,6 +885,8 @@ static const struct ethtool_ops stmmac_e
  1885. .set_coalesce = stmmac_set_coalesce,
  1886. .get_tunable = stmmac_get_tunable,
  1887. .set_tunable = stmmac_set_tunable,
  1888. + .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
  1889. + .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
  1890. };
  1891. void stmmac_set_ethtool_ops(struct net_device *netdev)
  1892. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  1893. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  1894. @@ -105,8 +105,8 @@ module_param(eee_timer, int, S_IRUGO | S
  1895. MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
  1896. #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
  1897. -/* By default the driver will use the ring mode to manage tx and rx descriptors
  1898. - * but passing this value so user can force to use the chain instead of the ring
  1899. +/* By default the driver will use the ring mode to manage tx and rx descriptors,
  1900. + * but allow user to force to use the chain instead of the ring
  1901. */
  1902. static unsigned int chain_mode;
  1903. module_param(chain_mode, int, S_IRUGO);
  1904. @@ -221,7 +221,8 @@ static inline u32 stmmac_rx_dirty(struct
  1905. */
  1906. static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
  1907. {
  1908. - struct phy_device *phydev = priv->phydev;
  1909. + struct net_device *ndev = priv->dev;
  1910. + struct phy_device *phydev = ndev->phydev;
  1911. if (likely(priv->plat->fix_mac_speed))
  1912. priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
  1913. @@ -279,6 +280,7 @@ static void stmmac_eee_ctrl_timer(unsign
  1914. */
  1915. bool stmmac_eee_init(struct stmmac_priv *priv)
  1916. {
  1917. + struct net_device *ndev = priv->dev;
  1918. unsigned long flags;
  1919. int interface = priv->plat->interface;
  1920. bool ret = false;
  1921. @@ -301,7 +303,7 @@ bool stmmac_eee_init(struct stmmac_priv
  1922. int tx_lpi_timer = priv->tx_lpi_timer;
  1923. /* Check if the PHY supports EEE */
  1924. - if (phy_init_eee(priv->phydev, 1)) {
  1925. + if (phy_init_eee(ndev->phydev, 1)) {
  1926. /* To manage at run-time if the EEE cannot be supported
  1927. * anymore (for example because the lp caps have been
  1928. * changed).
  1929. @@ -309,7 +311,7 @@ bool stmmac_eee_init(struct stmmac_priv
  1930. */
  1931. spin_lock_irqsave(&priv->lock, flags);
  1932. if (priv->eee_active) {
  1933. - pr_debug("stmmac: disable EEE\n");
  1934. + netdev_dbg(priv->dev, "disable EEE\n");
  1935. del_timer_sync(&priv->eee_ctrl_timer);
  1936. priv->hw->mac->set_eee_timer(priv->hw, 0,
  1937. tx_lpi_timer);
  1938. @@ -333,12 +335,12 @@ bool stmmac_eee_init(struct stmmac_priv
  1939. tx_lpi_timer);
  1940. }
  1941. /* Set HW EEE according to the speed */
  1942. - priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
  1943. + priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
  1944. ret = true;
  1945. spin_unlock_irqrestore(&priv->lock, flags);
  1946. - pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
  1947. + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
  1948. }
  1949. out:
  1950. return ret;
  1951. @@ -456,8 +458,8 @@ static int stmmac_hwtstamp_ioctl(struct
  1952. sizeof(struct hwtstamp_config)))
  1953. return -EFAULT;
  1954. - pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
  1955. - __func__, config.flags, config.tx_type, config.rx_filter);
  1956. + netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
  1957. + __func__, config.flags, config.tx_type, config.rx_filter);
  1958. /* reserved for future extensions */
  1959. if (config.flags)
  1960. @@ -712,7 +714,7 @@ static void stmmac_release_ptp(struct st
  1961. static void stmmac_adjust_link(struct net_device *dev)
  1962. {
  1963. struct stmmac_priv *priv = netdev_priv(dev);
  1964. - struct phy_device *phydev = priv->phydev;
  1965. + struct phy_device *phydev = dev->phydev;
  1966. unsigned long flags;
  1967. int new_state = 0;
  1968. unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
  1969. @@ -765,9 +767,9 @@ static void stmmac_adjust_link(struct ne
  1970. stmmac_hw_fix_mac_speed(priv);
  1971. break;
  1972. default:
  1973. - if (netif_msg_link(priv))
  1974. - pr_warn("%s: Speed (%d) not 10/100\n",
  1975. - dev->name, phydev->speed);
  1976. + netif_warn(priv, link, priv->dev,
  1977. + "Speed (%d) not 10/100\n",
  1978. + phydev->speed);
  1979. break;
  1980. }
  1981. @@ -820,10 +822,10 @@ static void stmmac_check_pcs_mode(struct
  1982. (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  1983. (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  1984. (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
  1985. - pr_debug("STMMAC: PCS RGMII support enable\n");
  1986. + netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
  1987. priv->hw->pcs = STMMAC_PCS_RGMII;
  1988. } else if (interface == PHY_INTERFACE_MODE_SGMII) {
  1989. - pr_debug("STMMAC: PCS SGMII support enable\n");
  1990. + netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
  1991. priv->hw->pcs = STMMAC_PCS_SGMII;
  1992. }
  1993. }
  1994. @@ -858,15 +860,15 @@ static int stmmac_init_phy(struct net_de
  1995. snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
  1996. priv->plat->phy_addr);
  1997. - pr_debug("stmmac_init_phy: trying to attach to %s\n",
  1998. - phy_id_fmt);
  1999. + netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
  2000. + phy_id_fmt);
  2001. phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
  2002. interface);
  2003. }
  2004. if (IS_ERR_OR_NULL(phydev)) {
  2005. - pr_err("%s: Could not attach to PHY\n", dev->name);
  2006. + netdev_err(priv->dev, "Could not attach to PHY\n");
  2007. if (!phydev)
  2008. return -ENODEV;
  2009. @@ -899,10 +901,8 @@ static int stmmac_init_phy(struct net_de
  2010. if (phydev->is_pseudo_fixed_link)
  2011. phydev->irq = PHY_POLL;
  2012. - pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
  2013. - " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
  2014. -
  2015. - priv->phydev = phydev;
  2016. + netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
  2017. + __func__, phydev->phy_id, phydev->link);
  2018. return 0;
  2019. }
  2020. @@ -988,7 +988,8 @@ static int stmmac_init_rx_buffers(struct
  2021. skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
  2022. if (!skb) {
  2023. - pr_err("%s: Rx init fails; skb is NULL\n", __func__);
  2024. + netdev_err(priv->dev,
  2025. + "%s: Rx init fails; skb is NULL\n", __func__);
  2026. return -ENOMEM;
  2027. }
  2028. priv->rx_skbuff[i] = skb;
  2029. @@ -996,15 +997,15 @@ static int stmmac_init_rx_buffers(struct
  2030. priv->dma_buf_sz,
  2031. DMA_FROM_DEVICE);
  2032. if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
  2033. - pr_err("%s: DMA mapping error\n", __func__);
  2034. + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
  2035. dev_kfree_skb_any(skb);
  2036. return -EINVAL;
  2037. }
  2038. if (priv->synopsys_id >= DWMAC_CORE_4_00)
  2039. - p->des0 = priv->rx_skbuff_dma[i];
  2040. + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
  2041. else
  2042. - p->des2 = priv->rx_skbuff_dma[i];
  2043. + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
  2044. if ((priv->hw->mode->init_desc3) &&
  2045. (priv->dma_buf_sz == BUF_SIZE_16KiB))
  2046. @@ -1046,13 +1047,14 @@ static int init_dma_desc_rings(struct ne
  2047. priv->dma_buf_sz = bfsize;
  2048. - if (netif_msg_probe(priv)) {
  2049. - pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
  2050. - (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
  2051. + netif_dbg(priv, probe, priv->dev,
  2052. + "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
  2053. + __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
  2054. +
  2055. + /* RX INITIALIZATION */
  2056. + netif_dbg(priv, probe, priv->dev,
  2057. + "SKB addresses:\nskb\t\tskb data\tdma data\n");
  2058. - /* RX INITIALIZATION */
  2059. - pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
  2060. - }
  2061. for (i = 0; i < DMA_RX_SIZE; i++) {
  2062. struct dma_desc *p;
  2063. if (priv->extend_desc)
  2064. @@ -1064,10 +1066,9 @@ static int init_dma_desc_rings(struct ne
  2065. if (ret)
  2066. goto err_init_rx_buffers;
  2067. - if (netif_msg_probe(priv))
  2068. - pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
  2069. - priv->rx_skbuff[i]->data,
  2070. - (unsigned int)priv->rx_skbuff_dma[i]);
  2071. + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
  2072. + priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
  2073. + (unsigned int)priv->rx_skbuff_dma[i]);
  2074. }
  2075. priv->cur_rx = 0;
  2076. priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
  2077. @@ -1322,7 +1323,7 @@ static void stmmac_tx_clean(struct stmma
  2078. unsigned int bytes_compl = 0, pkts_compl = 0;
  2079. unsigned int entry = priv->dirty_tx;
  2080. - spin_lock(&priv->tx_lock);
  2081. + netif_tx_lock(priv->dev);
  2082. priv->xstats.tx_clean++;
  2083. @@ -1398,22 +1399,17 @@ static void stmmac_tx_clean(struct stmma
  2084. netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
  2085. if (unlikely(netif_queue_stopped(priv->dev) &&
  2086. - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
  2087. - netif_tx_lock(priv->dev);
  2088. - if (netif_queue_stopped(priv->dev) &&
  2089. - stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
  2090. - if (netif_msg_tx_done(priv))
  2091. - pr_debug("%s: restart transmit\n", __func__);
  2092. - netif_wake_queue(priv->dev);
  2093. - }
  2094. - netif_tx_unlock(priv->dev);
  2095. + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
  2096. + netif_dbg(priv, tx_done, priv->dev,
  2097. + "%s: restart transmit\n", __func__);
  2098. + netif_wake_queue(priv->dev);
  2099. }
  2100. if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
  2101. stmmac_enable_eee_mode(priv);
  2102. mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
  2103. }
  2104. - spin_unlock(&priv->tx_lock);
  2105. + netif_tx_unlock(priv->dev);
  2106. }
  2107. static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
  2108. @@ -1517,7 +1513,7 @@ static void stmmac_mmc_setup(struct stmm
  2109. dwmac_mmc_ctrl(priv->mmcaddr, mode);
  2110. memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
  2111. } else
  2112. - pr_info(" No MAC Management Counters available\n");
  2113. + netdev_info(priv->dev, "No MAC Management Counters available\n");
  2114. }
  2115. /**
  2116. @@ -1530,18 +1526,18 @@ static void stmmac_mmc_setup(struct stmm
  2117. static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
  2118. {
  2119. if (priv->plat->enh_desc) {
  2120. - pr_info(" Enhanced/Alternate descriptors\n");
  2121. + dev_info(priv->device, "Enhanced/Alternate descriptors\n");
  2122. /* GMAC older than 3.50 has no extended descriptors */
  2123. if (priv->synopsys_id >= DWMAC_CORE_3_50) {
  2124. - pr_info("\tEnabled extended descriptors\n");
  2125. + dev_info(priv->device, "Enabled extended descriptors\n");
  2126. priv->extend_desc = 1;
  2127. } else
  2128. - pr_warn("Extended descriptors not supported\n");
  2129. + dev_warn(priv->device, "Extended descriptors not supported\n");
  2130. priv->hw->desc = &enh_desc_ops;
  2131. } else {
  2132. - pr_info(" Normal descriptors\n");
  2133. + dev_info(priv->device, "Normal descriptors\n");
  2134. priv->hw->desc = &ndesc_ops;
  2135. }
  2136. }
  2137. @@ -1582,8 +1578,8 @@ static void stmmac_check_ether_addr(stru
  2138. priv->dev->dev_addr, 0);
  2139. if (!is_valid_ether_addr(priv->dev->dev_addr))
  2140. eth_hw_addr_random(priv->dev);
  2141. - pr_info("%s: device MAC address %pM\n", priv->dev->name,
  2142. - priv->dev->dev_addr);
  2143. + netdev_info(priv->dev, "device MAC address %pM\n",
  2144. + priv->dev->dev_addr);
  2145. }
  2146. }
  2147. @@ -1597,16 +1593,12 @@ static void stmmac_check_ether_addr(stru
  2148. */
  2149. static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  2150. {
  2151. - int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
  2152. - int mixed_burst = 0;
  2153. int atds = 0;
  2154. int ret = 0;
  2155. - if (priv->plat->dma_cfg) {
  2156. - pbl = priv->plat->dma_cfg->pbl;
  2157. - fixed_burst = priv->plat->dma_cfg->fixed_burst;
  2158. - mixed_burst = priv->plat->dma_cfg->mixed_burst;
  2159. - aal = priv->plat->dma_cfg->aal;
  2160. + if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
  2161. + dev_err(priv->device, "Invalid DMA configuration\n");
  2162. + return -EINVAL;
  2163. }
  2164. if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
  2165. @@ -1618,8 +1610,8 @@ static int stmmac_init_dma_engine(struct
  2166. return ret;
  2167. }
  2168. - priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
  2169. - aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
  2170. + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
  2171. + priv->dma_tx_phy, priv->dma_rx_phy, atds);
  2172. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  2173. priv->rx_tail_addr = priv->dma_rx_phy +
  2174. @@ -1691,7 +1683,8 @@ static int stmmac_hw_setup(struct net_de
  2175. /* DMA initialization and SW reset */
  2176. ret = stmmac_init_dma_engine(priv);
  2177. if (ret < 0) {
  2178. - pr_err("%s: DMA engine initialization failed\n", __func__);
  2179. + netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
  2180. + __func__);
  2181. return ret;
  2182. }
  2183. @@ -1720,7 +1713,7 @@ static int stmmac_hw_setup(struct net_de
  2184. ret = priv->hw->mac->rx_ipc(priv->hw);
  2185. if (!ret) {
  2186. - pr_warn(" RX IPC Checksum Offload disabled\n");
  2187. + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
  2188. priv->plat->rx_coe = STMMAC_RX_COE_NONE;
  2189. priv->hw->rx_csum = 0;
  2190. }
  2191. @@ -1745,10 +1738,11 @@ static int stmmac_hw_setup(struct net_de
  2192. #ifdef CONFIG_DEBUG_FS
  2193. ret = stmmac_init_fs(dev);
  2194. if (ret < 0)
  2195. - pr_warn("%s: failed debugFS registration\n", __func__);
  2196. + netdev_warn(priv->dev, "%s: failed debugFS registration\n",
  2197. + __func__);
  2198. #endif
  2199. /* Start the ball rolling... */
  2200. - pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
  2201. + netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
  2202. priv->hw->dma->start_tx(priv->ioaddr);
  2203. priv->hw->dma->start_rx(priv->ioaddr);
  2204. @@ -1803,8 +1797,9 @@ static int stmmac_open(struct net_device
  2205. priv->hw->pcs != STMMAC_PCS_RTBI) {
  2206. ret = stmmac_init_phy(dev);
  2207. if (ret) {
  2208. - pr_err("%s: Cannot attach to PHY (error: %d)\n",
  2209. - __func__, ret);
  2210. + netdev_err(priv->dev,
  2211. + "%s: Cannot attach to PHY (error: %d)\n",
  2212. + __func__, ret);
  2213. return ret;
  2214. }
  2215. }
  2216. @@ -1819,33 +1814,36 @@ static int stmmac_open(struct net_device
  2217. ret = alloc_dma_desc_resources(priv);
  2218. if (ret < 0) {
  2219. - pr_err("%s: DMA descriptors allocation failed\n", __func__);
  2220. + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
  2221. + __func__);
  2222. goto dma_desc_error;
  2223. }
  2224. ret = init_dma_desc_rings(dev, GFP_KERNEL);
  2225. if (ret < 0) {
  2226. - pr_err("%s: DMA descriptors initialization failed\n", __func__);
  2227. + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
  2228. + __func__);
  2229. goto init_error;
  2230. }
  2231. ret = stmmac_hw_setup(dev, true);
  2232. if (ret < 0) {
  2233. - pr_err("%s: Hw setup failed\n", __func__);
  2234. + netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
  2235. goto init_error;
  2236. }
  2237. stmmac_init_tx_coalesce(priv);
  2238. - if (priv->phydev)
  2239. - phy_start(priv->phydev);
  2240. + if (dev->phydev)
  2241. + phy_start(dev->phydev);
  2242. /* Request the IRQ lines */
  2243. ret = request_irq(dev->irq, stmmac_interrupt,
  2244. IRQF_SHARED, dev->name, dev);
  2245. if (unlikely(ret < 0)) {
  2246. - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
  2247. - __func__, dev->irq, ret);
  2248. + netdev_err(priv->dev,
  2249. + "%s: ERROR: allocating the IRQ %d (error: %d)\n",
  2250. + __func__, dev->irq, ret);
  2251. goto init_error;
  2252. }
  2253. @@ -1854,8 +1852,9 @@ static int stmmac_open(struct net_device
  2254. ret = request_irq(priv->wol_irq, stmmac_interrupt,
  2255. IRQF_SHARED, dev->name, dev);
  2256. if (unlikely(ret < 0)) {
  2257. - pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
  2258. - __func__, priv->wol_irq, ret);
  2259. + netdev_err(priv->dev,
  2260. + "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
  2261. + __func__, priv->wol_irq, ret);
  2262. goto wolirq_error;
  2263. }
  2264. }
  2265. @@ -1865,8 +1864,9 @@ static int stmmac_open(struct net_device
  2266. ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
  2267. dev->name, dev);
  2268. if (unlikely(ret < 0)) {
  2269. - pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
  2270. - __func__, priv->lpi_irq, ret);
  2271. + netdev_err(priv->dev,
  2272. + "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
  2273. + __func__, priv->lpi_irq, ret);
  2274. goto lpiirq_error;
  2275. }
  2276. }
  2277. @@ -1885,8 +1885,8 @@ wolirq_error:
  2278. init_error:
  2279. free_dma_desc_resources(priv);
  2280. dma_desc_error:
  2281. - if (priv->phydev)
  2282. - phy_disconnect(priv->phydev);
  2283. + if (dev->phydev)
  2284. + phy_disconnect(dev->phydev);
  2285. return ret;
  2286. }
  2287. @@ -1905,10 +1905,9 @@ static int stmmac_release(struct net_dev
  2288. del_timer_sync(&priv->eee_ctrl_timer);
  2289. /* Stop and disconnect the PHY */
  2290. - if (priv->phydev) {
  2291. - phy_stop(priv->phydev);
  2292. - phy_disconnect(priv->phydev);
  2293. - priv->phydev = NULL;
  2294. + if (dev->phydev) {
  2295. + phy_stop(dev->phydev);
  2296. + phy_disconnect(dev->phydev);
  2297. }
  2298. netif_stop_queue(dev);
  2299. @@ -1968,13 +1967,13 @@ static void stmmac_tso_allocator(struct
  2300. priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
  2301. desc = priv->dma_tx + priv->cur_tx;
  2302. - desc->des0 = des + (total_len - tmp_len);
  2303. + desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
  2304. buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
  2305. TSO_MAX_BUFF_SIZE : tmp_len;
  2306. priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
  2307. 0, 1,
  2308. - (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
  2309. + (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
  2310. 0, 0);
  2311. tmp_len -= TSO_MAX_BUFF_SIZE;
  2312. @@ -2019,8 +2018,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2313. u8 proto_hdr_len;
  2314. int i;
  2315. - spin_lock(&priv->tx_lock);
  2316. -
  2317. /* Compute header lengths */
  2318. proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  2319. @@ -2030,9 +2027,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2320. if (!netif_queue_stopped(dev)) {
  2321. netif_stop_queue(dev);
  2322. /* This is a hard error, log it. */
  2323. - pr_err("%s: Tx Ring full when queue awake\n", __func__);
  2324. + netdev_err(priv->dev,
  2325. + "%s: Tx Ring full when queue awake\n",
  2326. + __func__);
  2327. }
  2328. - spin_unlock(&priv->tx_lock);
  2329. return NETDEV_TX_BUSY;
  2330. }
  2331. @@ -2070,11 +2068,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2332. priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
  2333. priv->tx_skbuff[first_entry] = skb;
  2334. - first->des0 = des;
  2335. + first->des0 = cpu_to_le32(des);
  2336. /* Fill start of payload in buff2 of first descriptor */
  2337. if (pay_len)
  2338. - first->des1 = des + proto_hdr_len;
  2339. + first->des1 = cpu_to_le32(des + proto_hdr_len);
  2340. /* If needed take extra descriptors to fill the remaining payload */
  2341. tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
  2342. @@ -2103,8 +2101,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2343. priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
  2344. if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
  2345. - if (netif_msg_hw(priv))
  2346. - pr_debug("%s: stop transmitted packets\n", __func__);
  2347. + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
  2348. + __func__);
  2349. netif_stop_queue(dev);
  2350. }
  2351. @@ -2155,7 +2153,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2352. * descriptor and then barrier is needed to make sure that
  2353. * all is coherent before granting the DMA engine.
  2354. */
  2355. - smp_wmb();
  2356. + dma_wmb();
  2357. if (netif_msg_pktdata(priv)) {
  2358. pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
  2359. @@ -2174,11 +2172,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
  2360. priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
  2361. STMMAC_CHAN0);
  2362. - spin_unlock(&priv->tx_lock);
  2363. return NETDEV_TX_OK;
  2364. dma_map_err:
  2365. - spin_unlock(&priv->tx_lock);
  2366. dev_err(priv->device, "Tx dma map failed\n");
  2367. dev_kfree_skb(skb);
  2368. priv->dev->stats.tx_dropped++;
  2369. @@ -2210,14 +2206,13 @@ static netdev_tx_t stmmac_xmit(struct sk
  2370. return stmmac_tso_xmit(skb, dev);
  2371. }
  2372. - spin_lock(&priv->tx_lock);
  2373. -
  2374. if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
  2375. - spin_unlock(&priv->tx_lock);
  2376. if (!netif_queue_stopped(dev)) {
  2377. netif_stop_queue(dev);
  2378. /* This is a hard error, log it. */
  2379. - pr_err("%s: Tx Ring full when queue awake\n", __func__);
  2380. + netdev_err(priv->dev,
  2381. + "%s: Tx Ring full when queue awake\n",
  2382. + __func__);
  2383. }
  2384. return NETDEV_TX_BUSY;
  2385. }
  2386. @@ -2270,13 +2265,11 @@ static netdev_tx_t stmmac_xmit(struct sk
  2387. priv->tx_skbuff[entry] = NULL;
  2388. - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
  2389. - desc->des0 = des;
  2390. - priv->tx_skbuff_dma[entry].buf = desc->des0;
  2391. - } else {
  2392. - desc->des2 = des;
  2393. - priv->tx_skbuff_dma[entry].buf = desc->des2;
  2394. - }
  2395. + priv->tx_skbuff_dma[entry].buf = des;
  2396. + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2397. + desc->des0 = cpu_to_le32(des);
  2398. + else
  2399. + desc->des2 = cpu_to_le32(des);
  2400. priv->tx_skbuff_dma[entry].map_as_page = true;
  2401. priv->tx_skbuff_dma[entry].len = len;
  2402. @@ -2294,9 +2287,10 @@ static netdev_tx_t stmmac_xmit(struct sk
  2403. if (netif_msg_pktdata(priv)) {
  2404. void *tx_head;
  2405. - pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
  2406. - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
  2407. - entry, first, nfrags);
  2408. + netdev_dbg(priv->dev,
  2409. + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
  2410. + __func__, priv->cur_tx, priv->dirty_tx, first_entry,
  2411. + entry, first, nfrags);
  2412. if (priv->extend_desc)
  2413. tx_head = (void *)priv->dma_etx;
  2414. @@ -2305,13 +2299,13 @@ static netdev_tx_t stmmac_xmit(struct sk
  2415. priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
  2416. - pr_debug(">>> frame to be transmitted: ");
  2417. + netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
  2418. print_pkt(skb->data, skb->len);
  2419. }
  2420. if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
  2421. - if (netif_msg_hw(priv))
  2422. - pr_debug("%s: stop transmitted packets\n", __func__);
  2423. + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
  2424. + __func__);
  2425. netif_stop_queue(dev);
  2426. }
  2427. @@ -2347,13 +2341,11 @@ static netdev_tx_t stmmac_xmit(struct sk
  2428. if (dma_mapping_error(priv->device, des))
  2429. goto dma_map_err;
  2430. - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
  2431. - first->des0 = des;
  2432. - priv->tx_skbuff_dma[first_entry].buf = first->des0;
  2433. - } else {
  2434. - first->des2 = des;
  2435. - priv->tx_skbuff_dma[first_entry].buf = first->des2;
  2436. - }
  2437. + priv->tx_skbuff_dma[first_entry].buf = des;
  2438. + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2439. + first->des0 = cpu_to_le32(des);
  2440. + else
  2441. + first->des2 = cpu_to_le32(des);
  2442. priv->tx_skbuff_dma[first_entry].len = nopaged_len;
  2443. priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
  2444. @@ -2374,7 +2366,7 @@ static netdev_tx_t stmmac_xmit(struct sk
  2445. * descriptor and then barrier is needed to make sure that
  2446. * all is coherent before granting the DMA engine.
  2447. */
  2448. - smp_wmb();
  2449. + dma_wmb();
  2450. }
  2451. netdev_sent_queue(dev, skb->len);
  2452. @@ -2385,12 +2377,10 @@ static netdev_tx_t stmmac_xmit(struct sk
  2453. priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
  2454. STMMAC_CHAN0);
  2455. - spin_unlock(&priv->tx_lock);
  2456. return NETDEV_TX_OK;
  2457. dma_map_err:
  2458. - spin_unlock(&priv->tx_lock);
  2459. - dev_err(priv->device, "Tx dma map failed\n");
  2460. + netdev_err(priv->dev, "Tx DMA map failed\n");
  2461. dev_kfree_skb(skb);
  2462. priv->dev->stats.tx_dropped++;
  2463. return NETDEV_TX_OK;
  2464. @@ -2461,16 +2451,16 @@ static inline void stmmac_rx_refill(stru
  2465. DMA_FROM_DEVICE);
  2466. if (dma_mapping_error(priv->device,
  2467. priv->rx_skbuff_dma[entry])) {
  2468. - dev_err(priv->device, "Rx dma map failed\n");
  2469. + netdev_err(priv->dev, "Rx DMA map failed\n");
  2470. dev_kfree_skb(skb);
  2471. break;
  2472. }
  2473. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
  2474. - p->des0 = priv->rx_skbuff_dma[entry];
  2475. + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
  2476. p->des1 = 0;
  2477. } else {
  2478. - p->des2 = priv->rx_skbuff_dma[entry];
  2479. + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
  2480. }
  2481. if (priv->hw->mode->refill_desc3)
  2482. priv->hw->mode->refill_desc3(priv, p);
  2483. @@ -2478,17 +2468,17 @@ static inline void stmmac_rx_refill(stru
  2484. if (priv->rx_zeroc_thresh > 0)
  2485. priv->rx_zeroc_thresh--;
  2486. - if (netif_msg_rx_status(priv))
  2487. - pr_debug("\trefill entry #%d\n", entry);
  2488. + netif_dbg(priv, rx_status, priv->dev,
  2489. + "refill entry #%d\n", entry);
  2490. }
  2491. - wmb();
  2492. + dma_wmb();
  2493. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2494. priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
  2495. else
  2496. priv->hw->desc->set_rx_owner(p);
  2497. - wmb();
  2498. + dma_wmb();
  2499. entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
  2500. }
  2501. @@ -2512,7 +2502,7 @@ static int stmmac_rx(struct stmmac_priv
  2502. if (netif_msg_rx_status(priv)) {
  2503. void *rx_head;
  2504. - pr_info(">>>>>> %s: descriptor ring:\n", __func__);
  2505. + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
  2506. if (priv->extend_desc)
  2507. rx_head = (void *)priv->dma_erx;
  2508. else
  2509. @@ -2574,9 +2564,9 @@ static int stmmac_rx(struct stmmac_priv
  2510. unsigned int des;
  2511. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2512. - des = p->des0;
  2513. + des = le32_to_cpu(p->des0);
  2514. else
  2515. - des = p->des2;
  2516. + des = le32_to_cpu(p->des2);
  2517. frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
  2518. @@ -2585,9 +2575,9 @@ static int stmmac_rx(struct stmmac_priv
  2519. * ignored
  2520. */
  2521. if (frame_len > priv->dma_buf_sz) {
  2522. - pr_err("%s: len %d larger than size (%d)\n",
  2523. - priv->dev->name, frame_len,
  2524. - priv->dma_buf_sz);
  2525. + netdev_err(priv->dev,
  2526. + "len %d larger than size (%d)\n",
  2527. + frame_len, priv->dma_buf_sz);
  2528. priv->dev->stats.rx_length_errors++;
  2529. break;
  2530. }
  2531. @@ -2599,11 +2589,11 @@ static int stmmac_rx(struct stmmac_priv
  2532. frame_len -= ETH_FCS_LEN;
  2533. if (netif_msg_rx_status(priv)) {
  2534. - pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
  2535. - p, entry, des);
  2536. + netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
  2537. + p, entry, des);
  2538. if (frame_len > ETH_FRAME_LEN)
  2539. - pr_debug("\tframe size %d, COE: %d\n",
  2540. - frame_len, status);
  2541. + netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
  2542. + frame_len, status);
  2543. }
  2544. /* The zero-copy is always used for all the sizes
  2545. @@ -2640,8 +2630,9 @@ static int stmmac_rx(struct stmmac_priv
  2546. } else {
  2547. skb = priv->rx_skbuff[entry];
  2548. if (unlikely(!skb)) {
  2549. - pr_err("%s: Inconsistent Rx chain\n",
  2550. - priv->dev->name);
  2551. + netdev_err(priv->dev,
  2552. + "%s: Inconsistent Rx chain\n",
  2553. + priv->dev->name);
  2554. priv->dev->stats.rx_dropped++;
  2555. break;
  2556. }
  2557. @@ -2657,7 +2648,8 @@ static int stmmac_rx(struct stmmac_priv
  2558. }
  2559. if (netif_msg_pktdata(priv)) {
  2560. - pr_debug("frame received (%dbytes)", frame_len);
  2561. + netdev_dbg(priv->dev, "frame received (%dbytes)",
  2562. + frame_len);
  2563. print_pkt(skb->data, frame_len);
  2564. }
  2565. @@ -2760,7 +2752,7 @@ static int stmmac_change_mtu(struct net_
  2566. int max_mtu;
  2567. if (netif_running(dev)) {
  2568. - pr_err("%s: must be stopped to change its MTU\n", dev->name);
  2569. + netdev_err(priv->dev, "must be stopped to change its MTU\n");
  2570. return -EBUSY;
  2571. }
  2572. @@ -2852,7 +2844,7 @@ static irqreturn_t stmmac_interrupt(int
  2573. pm_wakeup_event(priv->device, 0);
  2574. if (unlikely(!dev)) {
  2575. - pr_err("%s: invalid dev pointer\n", __func__);
  2576. + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
  2577. return IRQ_NONE;
  2578. }
  2579. @@ -2910,7 +2902,6 @@ static void stmmac_poll_controller(struc
  2580. */
  2581. static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2582. {
  2583. - struct stmmac_priv *priv = netdev_priv(dev);
  2584. int ret = -EOPNOTSUPP;
  2585. if (!netif_running(dev))
  2586. @@ -2920,9 +2911,9 @@ static int stmmac_ioctl(struct net_devic
  2587. case SIOCGMIIPHY:
  2588. case SIOCGMIIREG:
  2589. case SIOCSMIIREG:
  2590. - if (!priv->phydev)
  2591. + if (!dev->phydev)
  2592. return -EINVAL;
  2593. - ret = phy_mii_ioctl(priv->phydev, rq, cmd);
  2594. + ret = phy_mii_ioctl(dev->phydev, rq, cmd);
  2595. break;
  2596. case SIOCSHWTSTAMP:
  2597. ret = stmmac_hwtstamp_ioctl(dev, rq);
  2598. @@ -2950,14 +2941,17 @@ static void sysfs_display_ring(void *hea
  2599. x = *(u64 *) ep;
  2600. seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  2601. i, (unsigned int)virt_to_phys(ep),
  2602. - ep->basic.des0, ep->basic.des1,
  2603. - ep->basic.des2, ep->basic.des3);
  2604. + le32_to_cpu(ep->basic.des0),
  2605. + le32_to_cpu(ep->basic.des1),
  2606. + le32_to_cpu(ep->basic.des2),
  2607. + le32_to_cpu(ep->basic.des3));
  2608. ep++;
  2609. } else {
  2610. x = *(u64 *) p;
  2611. seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  2612. i, (unsigned int)virt_to_phys(ep),
  2613. - p->des0, p->des1, p->des2, p->des3);
  2614. + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
  2615. + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
  2616. p++;
  2617. }
  2618. seq_printf(seq, "\n");
  2619. @@ -2989,6 +2983,8 @@ static int stmmac_sysfs_ring_open(struct
  2620. return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
  2621. }
  2622. +/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
  2623. +
  2624. static const struct file_operations stmmac_rings_status_fops = {
  2625. .owner = THIS_MODULE,
  2626. .open = stmmac_sysfs_ring_open,
  2627. @@ -3011,11 +3007,11 @@ static int stmmac_sysfs_dma_cap_read(str
  2628. seq_printf(seq, "\tDMA HW features\n");
  2629. seq_printf(seq, "==============================\n");
  2630. - seq_printf(seq, "\t10/100 Mbps %s\n",
  2631. + seq_printf(seq, "\t10/100 Mbps: %s\n",
  2632. (priv->dma_cap.mbps_10_100) ? "Y" : "N");
  2633. - seq_printf(seq, "\t1000 Mbps %s\n",
  2634. + seq_printf(seq, "\t1000 Mbps: %s\n",
  2635. (priv->dma_cap.mbps_1000) ? "Y" : "N");
  2636. - seq_printf(seq, "\tHalf duple %s\n",
  2637. + seq_printf(seq, "\tHalf duplex: %s\n",
  2638. (priv->dma_cap.half_duplex) ? "Y" : "N");
  2639. seq_printf(seq, "\tHash Filter: %s\n",
  2640. (priv->dma_cap.hash_filter) ? "Y" : "N");
  2641. @@ -3033,9 +3029,9 @@ static int stmmac_sysfs_dma_cap_read(str
  2642. (priv->dma_cap.rmon) ? "Y" : "N");
  2643. seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
  2644. (priv->dma_cap.time_stamp) ? "Y" : "N");
  2645. - seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
  2646. + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
  2647. (priv->dma_cap.atime_stamp) ? "Y" : "N");
  2648. - seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
  2649. + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
  2650. (priv->dma_cap.eee) ? "Y" : "N");
  2651. seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
  2652. seq_printf(seq, "\tChecksum Offload in TX: %s\n",
  2653. @@ -3082,8 +3078,7 @@ static int stmmac_init_fs(struct net_dev
  2654. priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
  2655. if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
  2656. - pr_err("ERROR %s/%s, debugfs create directory failed\n",
  2657. - STMMAC_RESOURCE_NAME, dev->name);
  2658. + netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
  2659. return -ENOMEM;
  2660. }
  2661. @@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev
  2662. &stmmac_rings_status_fops);
  2663. if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
  2664. - pr_info("ERROR creating stmmac ring debugfs file\n");
  2665. + netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
  2666. debugfs_remove_recursive(priv->dbgfs_dir);
  2667. return -ENOMEM;
  2668. @@ -3107,7 +3102,7 @@ static int stmmac_init_fs(struct net_dev
  2669. dev, &stmmac_dma_cap_fops);
  2670. if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
  2671. - pr_info("ERROR creating stmmac MMC debugfs file\n");
  2672. + netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
  2673. debugfs_remove_recursive(priv->dbgfs_dir);
  2674. return -ENOMEM;
  2675. @@ -3179,11 +3174,11 @@ static int stmmac_hw_init(struct stmmac_
  2676. } else {
  2677. if (chain_mode) {
  2678. priv->hw->mode = &chain_mode_ops;
  2679. - pr_info(" Chain mode enabled\n");
  2680. + dev_info(priv->device, "Chain mode enabled\n");
  2681. priv->mode = STMMAC_CHAIN_MODE;
  2682. } else {
  2683. priv->hw->mode = &ring_mode_ops;
  2684. - pr_info(" Ring mode enabled\n");
  2685. + dev_info(priv->device, "Ring mode enabled\n");
  2686. priv->mode = STMMAC_RING_MODE;
  2687. }
  2688. }
  2689. @@ -3191,7 +3186,7 @@ static int stmmac_hw_init(struct stmmac_
  2690. /* Get the HW capability (new GMAC newer than 3.50a) */
  2691. priv->hw_cap_support = stmmac_get_hw_features(priv);
  2692. if (priv->hw_cap_support) {
  2693. - pr_info(" DMA HW capability register supported");
  2694. + dev_info(priv->device, "DMA HW capability register supported\n");
  2695. /* We can override some gmac/dma configuration fields: e.g.
  2696. * enh_desc, tx_coe (e.g. that are passed through the
  2697. @@ -3216,8 +3211,9 @@ static int stmmac_hw_init(struct stmmac_
  2698. else if (priv->dma_cap.rx_coe_type1)
  2699. priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
  2700. - } else
  2701. - pr_info(" No HW DMA feature register supported");
  2702. + } else {
  2703. + dev_info(priv->device, "No HW DMA feature register supported\n");
  2704. + }
  2705. /* To use alternate (extended), normal or GMAC4 descriptor structures */
  2706. if (priv->synopsys_id >= DWMAC_CORE_4_00)
  2707. @@ -3227,20 +3223,20 @@ static int stmmac_hw_init(struct stmmac_
  2708. if (priv->plat->rx_coe) {
  2709. priv->hw->rx_csum = priv->plat->rx_coe;
  2710. - pr_info(" RX Checksum Offload Engine supported\n");
  2711. + dev_info(priv->device, "RX Checksum Offload Engine supported\n");
  2712. if (priv->synopsys_id < DWMAC_CORE_4_00)
  2713. - pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
  2714. + dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
  2715. }
  2716. if (priv->plat->tx_coe)
  2717. - pr_info(" TX Checksum insertion supported\n");
  2718. + dev_info(priv->device, "TX Checksum insertion supported\n");
  2719. if (priv->plat->pmt) {
  2720. - pr_info(" Wake-Up On Lan supported\n");
  2721. + dev_info(priv->device, "Wake-Up On Lan supported\n");
  2722. device_set_wakeup_capable(priv->device, 1);
  2723. }
  2724. if (priv->dma_cap.tsoen)
  2725. - pr_info(" TSO supported\n");
  2726. + dev_info(priv->device, "TSO supported\n");
  2727. return 0;
  2728. }
  2729. @@ -3299,8 +3295,8 @@ int stmmac_dvr_probe(struct device *devi
  2730. priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
  2731. if (IS_ERR(priv->stmmac_clk)) {
  2732. - dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
  2733. - __func__);
  2734. + netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
  2735. + __func__);
  2736. /* If failed to obtain stmmac_clk and specific clk_csr value
  2737. * is NOT passed from the platform, probe fail.
  2738. */
  2739. @@ -3349,7 +3345,7 @@ int stmmac_dvr_probe(struct device *devi
  2740. if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
  2741. ndev->hw_features |= NETIF_F_TSO;
  2742. priv->tso = true;
  2743. - pr_info(" TSO feature enabled\n");
  2744. + dev_info(priv->device, "TSO feature enabled\n");
  2745. }
  2746. ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
  2747. ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
  2748. @@ -3369,13 +3365,13 @@ int stmmac_dvr_probe(struct device *devi
  2749. */
  2750. if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
  2751. priv->use_riwt = 1;
  2752. - pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
  2753. + dev_info(priv->device,
  2754. + "Enable RX Mitigation via HW Watchdog Timer\n");
  2755. }
  2756. netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
  2757. spin_lock_init(&priv->lock);
  2758. - spin_lock_init(&priv->tx_lock);
  2759. /* If a specific clk_csr value is passed from the platform
  2760. * this means that the CSR Clock Range selection cannot be
  2761. @@ -3396,15 +3392,17 @@ int stmmac_dvr_probe(struct device *devi
  2762. /* MDIO bus Registration */
  2763. ret = stmmac_mdio_register(ndev);
  2764. if (ret < 0) {
  2765. - pr_debug("%s: MDIO bus (id: %d) registration failed",
  2766. - __func__, priv->plat->bus_id);
  2767. - goto error_napi_register;
  2768. + dev_err(priv->device,
  2769. + "%s: MDIO bus (id: %d) registration failed",
  2770. + __func__, priv->plat->bus_id);
  2771. + goto error_mdio_register;
  2772. }
  2773. }
  2774. ret = register_netdev(ndev);
  2775. if (ret) {
  2776. - pr_err("%s: ERROR %i registering the device\n", __func__, ret);
  2777. + dev_err(priv->device, "%s: ERROR %i registering the device\n",
  2778. + __func__, ret);
  2779. goto error_netdev_register;
  2780. }
  2781. @@ -3415,7 +3413,7 @@ error_netdev_register:
  2782. priv->hw->pcs != STMMAC_PCS_TBI &&
  2783. priv->hw->pcs != STMMAC_PCS_RTBI)
  2784. stmmac_mdio_unregister(ndev);
  2785. -error_napi_register:
  2786. +error_mdio_register:
  2787. netif_napi_del(&priv->napi);
  2788. error_hw_init:
  2789. clk_disable_unprepare(priv->pclk);
  2790. @@ -3439,7 +3437,7 @@ int stmmac_dvr_remove(struct device *dev
  2791. struct net_device *ndev = dev_get_drvdata(dev);
  2792. struct stmmac_priv *priv = netdev_priv(ndev);
  2793. - pr_info("%s:\n\tremoving driver", __func__);
  2794. + netdev_info(priv->dev, "%s: removing driver", __func__);
  2795. priv->hw->dma->stop_rx(priv->ioaddr);
  2796. priv->hw->dma->stop_tx(priv->ioaddr);
  2797. @@ -3477,8 +3475,8 @@ int stmmac_suspend(struct device *dev)
  2798. if (!ndev || !netif_running(ndev))
  2799. return 0;
  2800. - if (priv->phydev)
  2801. - phy_stop(priv->phydev);
  2802. + if (ndev->phydev)
  2803. + phy_stop(ndev->phydev);
  2804. spin_lock_irqsave(&priv->lock, flags);
  2805. @@ -3572,8 +3570,8 @@ int stmmac_resume(struct device *dev)
  2806. spin_unlock_irqrestore(&priv->lock, flags);
  2807. - if (priv->phydev)
  2808. - phy_start(priv->phydev);
  2809. + if (ndev->phydev)
  2810. + phy_start(ndev->phydev);
  2811. return 0;
  2812. }
  2813. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
  2814. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
  2815. @@ -42,13 +42,6 @@
  2816. #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
  2817. #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
  2818. -#define MII_PHY_ADDR_GMAC4_SHIFT 21
  2819. -#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
  2820. -#define MII_PHY_REG_GMAC4_SHIFT 16
  2821. -#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
  2822. -#define MII_CSR_CLK_GMAC4_SHIFT 8
  2823. -#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
  2824. -
  2825. static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
  2826. {
  2827. unsigned long curr;
  2828. @@ -68,8 +61,8 @@ static int stmmac_mdio_busy_wait(void __
  2829. /**
  2830. * stmmac_mdio_read
  2831. * @bus: points to the mii_bus structure
  2832. - * @phyaddr: MII addr reg bits 15-11
  2833. - * @phyreg: MII addr reg bits 10-6
  2834. + * @phyaddr: MII addr
  2835. + * @phyreg: MII reg
  2836. * Description: it reads data from the MII register from within the phy device.
  2837. * For the 7111 GMAC, we must set the bit 0 in the MII address register while
  2838. * accessing the PHY registers.
  2839. @@ -83,14 +76,20 @@ static int stmmac_mdio_read(struct mii_b
  2840. unsigned int mii_data = priv->hw->mii.data;
  2841. int data;
  2842. - u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
  2843. - ((phyreg << 6) & (0x000007C0)));
  2844. - regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
  2845. + u32 value = MII_BUSY;
  2846. +
  2847. + value |= (phyaddr << priv->hw->mii.addr_shift)
  2848. + & priv->hw->mii.addr_mask;
  2849. + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
  2850. + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
  2851. + & priv->hw->mii.clk_csr_mask;
  2852. + if (priv->plat->has_gmac4)
  2853. + value |= MII_GMAC4_READ;
  2854. if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2855. return -EBUSY;
  2856. - writel(regValue, priv->ioaddr + mii_address);
  2857. + writel(value, priv->ioaddr + mii_address);
  2858. if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2859. return -EBUSY;
  2860. @@ -104,8 +103,8 @@ static int stmmac_mdio_read(struct mii_b
  2861. /**
  2862. * stmmac_mdio_write
  2863. * @bus: points to the mii_bus structure
  2864. - * @phyaddr: MII addr reg bits 15-11
  2865. - * @phyreg: MII addr reg bits 10-6
  2866. + * @phyaddr: MII addr
  2867. + * @phyreg: MII reg
  2868. * @phydata: phy data
  2869. * Description: it writes the data into the MII register from within the device.
  2870. */
  2871. @@ -117,85 +116,18 @@ static int stmmac_mdio_write(struct mii_
  2872. unsigned int mii_address = priv->hw->mii.addr;
  2873. unsigned int mii_data = priv->hw->mii.data;
  2874. - u16 value =
  2875. - (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
  2876. - | MII_WRITE;
  2877. -
  2878. - value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
  2879. -
  2880. - /* Wait until any existing MII operation is complete */
  2881. - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2882. - return -EBUSY;
  2883. -
  2884. - /* Set the MII address register to write */
  2885. - writel(phydata, priv->ioaddr + mii_data);
  2886. - writel(value, priv->ioaddr + mii_address);
  2887. -
  2888. - /* Wait until any existing MII operation is complete */
  2889. - return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
  2890. -}
  2891. -
  2892. -/**
  2893. - * stmmac_mdio_read_gmac4
  2894. - * @bus: points to the mii_bus structure
  2895. - * @phyaddr: MII addr reg bits 25-21
  2896. - * @phyreg: MII addr reg bits 20-16
  2897. - * Description: it reads data from the MII register of GMAC4 from within
  2898. - * the phy device.
  2899. - */
  2900. -static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
  2901. -{
  2902. - struct net_device *ndev = bus->priv;
  2903. - struct stmmac_priv *priv = netdev_priv(ndev);
  2904. - unsigned int mii_address = priv->hw->mii.addr;
  2905. - unsigned int mii_data = priv->hw->mii.data;
  2906. - int data;
  2907. - u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
  2908. - (MII_PHY_ADDR_GMAC4_MASK)) |
  2909. - ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
  2910. - (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
  2911. -
  2912. - value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
  2913. - << MII_CSR_CLK_GMAC4_SHIFT);
  2914. -
  2915. - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2916. - return -EBUSY;
  2917. -
  2918. - writel(value, priv->ioaddr + mii_address);
  2919. -
  2920. - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2921. - return -EBUSY;
  2922. -
  2923. - /* Read the data from the MII data register */
  2924. - data = (int)readl(priv->ioaddr + mii_data);
  2925. -
  2926. - return data;
  2927. -}
  2928. -
  2929. -/**
  2930. - * stmmac_mdio_write_gmac4
  2931. - * @bus: points to the mii_bus structure
  2932. - * @phyaddr: MII addr reg bits 25-21
  2933. - * @phyreg: MII addr reg bits 20-16
  2934. - * @phydata: phy data
  2935. - * Description: it writes the data into the MII register of GMAC4 from within
  2936. - * the device.
  2937. - */
  2938. -static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
  2939. - u16 phydata)
  2940. -{
  2941. - struct net_device *ndev = bus->priv;
  2942. - struct stmmac_priv *priv = netdev_priv(ndev);
  2943. - unsigned int mii_address = priv->hw->mii.addr;
  2944. - unsigned int mii_data = priv->hw->mii.data;
  2945. -
  2946. - u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
  2947. - (MII_PHY_ADDR_GMAC4_MASK)) |
  2948. - ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
  2949. - (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
  2950. + u32 value = MII_BUSY;
  2951. - value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
  2952. - << MII_CSR_CLK_GMAC4_SHIFT);
  2953. + value |= (phyaddr << priv->hw->mii.addr_shift)
  2954. + & priv->hw->mii.addr_mask;
  2955. + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
  2956. +
  2957. + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
  2958. + & priv->hw->mii.clk_csr_mask;
  2959. + if (priv->plat->has_gmac4)
  2960. + value |= MII_GMAC4_WRITE;
  2961. + else
  2962. + value |= MII_WRITE;
  2963. /* Wait until any existing MII operation is complete */
  2964. if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
  2965. @@ -260,7 +192,7 @@ int stmmac_mdio_reset(struct mii_bus *bu
  2966. #endif
  2967. if (data->phy_reset) {
  2968. - pr_debug("stmmac_mdio_reset: calling phy_reset\n");
  2969. + netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n");
  2970. data->phy_reset(priv->plat->bsp_priv);
  2971. }
  2972. @@ -305,13 +237,8 @@ int stmmac_mdio_register(struct net_devi
  2973. #endif
  2974. new_bus->name = "stmmac";
  2975. - if (priv->plat->has_gmac4) {
  2976. - new_bus->read = &stmmac_mdio_read_gmac4;
  2977. - new_bus->write = &stmmac_mdio_write_gmac4;
  2978. - } else {
  2979. - new_bus->read = &stmmac_mdio_read;
  2980. - new_bus->write = &stmmac_mdio_write;
  2981. - }
  2982. + new_bus->read = &stmmac_mdio_read;
  2983. + new_bus->write = &stmmac_mdio_write;
  2984. new_bus->reset = &stmmac_mdio_reset;
  2985. snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  2986. @@ -325,7 +252,7 @@ int stmmac_mdio_register(struct net_devi
  2987. else
  2988. err = mdiobus_register(new_bus);
  2989. if (err != 0) {
  2990. - pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
  2991. + netdev_err(ndev, "Cannot register the MDIO bus\n");
  2992. goto bus_register_fail;
  2993. }
  2994. @@ -372,16 +299,16 @@ int stmmac_mdio_register(struct net_devi
  2995. irq_str = irq_num;
  2996. break;
  2997. }
  2998. - pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
  2999. - ndev->name, phydev->phy_id, addr,
  3000. - irq_str, phydev_name(phydev),
  3001. - act ? " active" : "");
  3002. + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
  3003. + phydev->phy_id, addr,
  3004. + irq_str, phydev_name(phydev),
  3005. + act ? " active" : "");
  3006. found = 1;
  3007. }
  3008. }
  3009. if (!found && !mdio_node) {
  3010. - pr_warn("%s: No PHY found\n", ndev->name);
  3011. + netdev_warn(ndev, "No PHY found\n");
  3012. mdiobus_unregister(new_bus);
  3013. mdiobus_free(new_bus);
  3014. return -ENODEV;
  3015. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
  3016. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
  3017. @@ -81,6 +81,7 @@ static void stmmac_default_data(struct p
  3018. plat->mdio_bus_data->phy_mask = 0;
  3019. plat->dma_cfg->pbl = 32;
  3020. + plat->dma_cfg->pblx8 = true;
  3021. /* TODO: AXI */
  3022. /* Set default value for multicast hash bins */
  3023. @@ -88,6 +89,9 @@ static void stmmac_default_data(struct p
  3024. /* Set default value for unicast filter entries */
  3025. plat->unicast_filter_entries = 1;
  3026. +
  3027. + /* Set the maxmtu to a default of JUMBO_LEN */
  3028. + plat->maxmtu = JUMBO_LEN;
  3029. }
  3030. static int quark_default_data(struct plat_stmmacenet_data *plat,
  3031. @@ -115,6 +119,7 @@ static int quark_default_data(struct pla
  3032. plat->mdio_bus_data->phy_mask = 0;
  3033. plat->dma_cfg->pbl = 16;
  3034. + plat->dma_cfg->pblx8 = true;
  3035. plat->dma_cfg->fixed_burst = 1;
  3036. /* AXI (TODO) */
  3037. @@ -124,6 +129,9 @@ static int quark_default_data(struct pla
  3038. /* Set default value for unicast filter entries */
  3039. plat->unicast_filter_entries = 1;
  3040. + /* Set the maxmtu to a default of JUMBO_LEN */
  3041. + plat->maxmtu = JUMBO_LEN;
  3042. +
  3043. return 0;
  3044. }
  3045. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
  3046. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
  3047. @@ -292,6 +292,7 @@ stmmac_probe_config_dt(struct platform_d
  3048. if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
  3049. of_device_is_compatible(np, "snps,dwmac-4.10a")) {
  3050. plat->has_gmac4 = 1;
  3051. + plat->has_gmac = 0;
  3052. plat->pmt = 1;
  3053. plat->tso_en = of_property_read_bool(np, "snps,tso");
  3054. }
  3055. @@ -303,21 +304,25 @@ stmmac_probe_config_dt(struct platform_d
  3056. plat->force_sf_dma_mode = 1;
  3057. }
  3058. - if (of_find_property(np, "snps,pbl", NULL)) {
  3059. - dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
  3060. - GFP_KERNEL);
  3061. - if (!dma_cfg) {
  3062. - stmmac_remove_config_dt(pdev, plat);
  3063. - return ERR_PTR(-ENOMEM);
  3064. - }
  3065. - plat->dma_cfg = dma_cfg;
  3066. - of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
  3067. - dma_cfg->aal = of_property_read_bool(np, "snps,aal");
  3068. - dma_cfg->fixed_burst =
  3069. - of_property_read_bool(np, "snps,fixed-burst");
  3070. - dma_cfg->mixed_burst =
  3071. - of_property_read_bool(np, "snps,mixed-burst");
  3072. - }
  3073. + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
  3074. + GFP_KERNEL);
  3075. + if (!dma_cfg) {
  3076. + stmmac_remove_config_dt(pdev, plat);
  3077. + return ERR_PTR(-ENOMEM);
  3078. + }
  3079. + plat->dma_cfg = dma_cfg;
  3080. +
  3081. + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
  3082. + if (!dma_cfg->pbl)
  3083. + dma_cfg->pbl = DEFAULT_DMA_PBL;
  3084. + of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
  3085. + of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
  3086. + dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
  3087. +
  3088. + dma_cfg->aal = of_property_read_bool(np, "snps,aal");
  3089. + dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
  3090. + dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
  3091. +
  3092. plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
  3093. if (plat->force_thresh_dma_mode) {
  3094. plat->force_sf_dma_mode = 0;
  3095. @@ -445,9 +450,7 @@ static int stmmac_pltfr_suspend(struct d
  3096. struct platform_device *pdev = to_platform_device(dev);
  3097. ret = stmmac_suspend(dev);
  3098. - if (priv->plat->suspend)
  3099. - priv->plat->suspend(pdev, priv->plat->bsp_priv);
  3100. - else if (priv->plat->exit)
  3101. + if (priv->plat->exit)
  3102. priv->plat->exit(pdev, priv->plat->bsp_priv);
  3103. return ret;
  3104. @@ -466,9 +469,7 @@ static int stmmac_pltfr_resume(struct de
  3105. struct stmmac_priv *priv = netdev_priv(ndev);
  3106. struct platform_device *pdev = to_platform_device(dev);
  3107. - if (priv->plat->resume)
  3108. - priv->plat->resume(pdev, priv->plat->bsp_priv);
  3109. - else if (priv->plat->init)
  3110. + if (priv->plat->init)
  3111. priv->plat->init(pdev, priv->plat->bsp_priv);
  3112. return stmmac_resume(dev);
  3113. --- a/include/linux/stmmac.h
  3114. +++ b/include/linux/stmmac.h
  3115. @@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data {
  3116. struct stmmac_dma_cfg {
  3117. int pbl;
  3118. + int txpbl;
  3119. + int rxpbl;
  3120. + bool pblx8;
  3121. int fixed_burst;
  3122. int mixed_burst;
  3123. bool aal;
  3124. @@ -135,8 +138,6 @@ struct plat_stmmacenet_data {
  3125. void (*bus_setup)(void __iomem *ioaddr);
  3126. int (*init)(struct platform_device *pdev, void *priv);
  3127. void (*exit)(struct platform_device *pdev, void *priv);
  3128. - void (*suspend)(struct platform_device *pdev, void *priv);
  3129. - void (*resume)(struct platform_device *pdev, void *priv);
  3130. void *bsp_priv;
  3131. struct stmmac_axi *axi;
  3132. int has_gmac4;