rtl838x_eth.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/net/ethernet/rtl838x_eth.c
  4. * Copyright (C) 2020 B. Koblitz
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include <linux/etherdevice.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/of.h>
  14. #include <linux/of_net.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/module.h>
  17. #include <linux/phylink.h>
  18. #include <linux/pkt_sched.h>
  19. #include <net/dsa.h>
  20. #include <net/switchdev.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/mach-rtl838x/mach-rtl83xx.h>
  23. #include "rtl838x_eth.h"
  24. extern struct rtl83xx_soc_info soc_info;
  25. /*
  26. * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
  27. * The ring is assigned by switch based on packet/port priortity
  28. * Maximum number of TX rings is 2, Ring 2 being the high priority
  29. * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
  30. * for an RX ring, MAX_ENTRIES the maximum number of entries
  31. * available in total for all queues.
  32. */
  33. #define MAX_RXRINGS 32
  34. #define MAX_RXLEN 300
  35. #define MAX_ENTRIES (300 * 8)
  36. #define TXRINGS 2
  37. #define TXRINGLEN 160
  38. #define NOTIFY_EVENTS 10
  39. #define NOTIFY_BLOCKS 10
  40. #define TX_EN 0x8
  41. #define RX_EN 0x4
  42. #define TX_EN_93XX 0x20
  43. #define RX_EN_93XX 0x10
  44. #define TX_DO 0x2
  45. #define WRAP 0x2
  46. #define MAX_PORTS 57
  47. #define MAX_SMI_BUSSES 4
  48. #define RING_BUFFER 1600
  49. struct p_hdr {
  50. uint8_t *buf;
  51. uint16_t reserved;
  52. uint16_t size; /* buffer size */
  53. uint16_t offset;
  54. uint16_t len; /* pkt len */
  55. /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
  56. uint16_t cpu_tag[10];
  57. } __packed __aligned(1);
  58. struct n_event {
  59. uint32_t type:2;
  60. uint32_t fidVid:12;
  61. uint64_t mac:48;
  62. uint32_t slp:6;
  63. uint32_t valid:1;
  64. uint32_t reserved:27;
  65. } __packed __aligned(1);
  66. struct ring_b {
  67. uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
  68. uint32_t tx_r[TXRINGS][TXRINGLEN];
  69. struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
  70. struct p_hdr tx_header[TXRINGS][TXRINGLEN];
  71. uint32_t c_rx[MAX_RXRINGS];
  72. uint32_t c_tx[TXRINGS];
  73. uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
  74. uint8_t *rx_space;
  75. };
  76. struct notify_block {
  77. struct n_event events[NOTIFY_EVENTS];
  78. };
  79. struct notify_b {
  80. struct notify_block blocks[NOTIFY_BLOCKS];
  81. u32 reserved1[8];
  82. u32 ring[NOTIFY_BLOCKS];
  83. u32 reserved2[8];
  84. };
  85. static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  86. {
  87. // cpu_tag[0] is reserved on the RTL83XX SoCs
  88. h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on
  89. h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
  90. h->cpu_tag[3] = 0x0000;
  91. h->cpu_tag[4] = BIT(dest_port) >> 16;
  92. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  93. /* Set internal priority (PRI) and enable (AS_PRI) */
  94. if (prio >= 0)
  95. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
  96. }
  97. static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  98. {
  99. // cpu_tag[0] is reserved on the RTL83XX SoCs
  100. h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
  101. h->cpu_tag[2] = BIT(4) | BIT(7); /* AS_DPM (4) and L2LEARNING (7) flags */
  102. h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
  103. // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
  104. if (dest_port >= 32) {
  105. dest_port -= 32;
  106. h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
  107. h->cpu_tag[3] = BIT(dest_port) & 0xffff;
  108. } else {
  109. h->cpu_tag[4] = BIT(dest_port) >> 16;
  110. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  111. }
  112. /* Set internal priority (PRI) and enable (AS_PRI) */
  113. if (prio >= 0)
  114. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
  115. }
  116. static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  117. {
  118. h->cpu_tag[0] = 0x8000; // CPU tag marker
  119. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  120. h->cpu_tag[3] = 0;
  121. h->cpu_tag[4] = 0;
  122. h->cpu_tag[5] = 0;
  123. h->cpu_tag[6] = BIT(dest_port) >> 16;
  124. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  125. /* Enable (AS_QID) and set priority queue (QID) */
  126. if (prio >= 0)
  127. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  128. }
  129. static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  130. {
  131. h->cpu_tag[0] = 0x8000; // CPU tag marker
  132. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  133. h->cpu_tag[3] = 0;
  134. h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
  135. if (dest_port >= 32) {
  136. dest_port -= 32;
  137. h->cpu_tag[4] = BIT(dest_port) >> 16;
  138. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  139. } else {
  140. h->cpu_tag[6] = BIT(dest_port) >> 16;
  141. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  142. }
  143. /* Enable (AS_QID) and set priority queue (QID) */
  144. if (prio >= 0)
  145. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  146. }
  147. static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
  148. {
  149. h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
  150. h->cpu_tag[2] |= (vlan >> 8) & 0xf;
  151. h->cpu_tag[3] |= (vlan & 0xff) << 8;
  152. }
  153. struct rtl838x_rx_q {
  154. int id;
  155. struct rtl838x_eth_priv *priv;
  156. struct napi_struct napi;
  157. };
  158. struct rtl838x_eth_priv {
  159. struct net_device *netdev;
  160. struct platform_device *pdev;
  161. void *membase;
  162. spinlock_t lock;
  163. struct mii_bus *mii_bus;
  164. struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
  165. struct phylink *phylink;
  166. struct phylink_config phylink_config;
  167. u16 id;
  168. u16 family_id;
  169. const struct rtl838x_eth_reg *r;
  170. u8 cpu_port;
  171. u32 lastEvent;
  172. u16 rxrings;
  173. u16 rxringlen;
  174. u8 smi_bus[MAX_PORTS];
  175. u8 smi_addr[MAX_PORTS];
  176. u32 sds_id[MAX_PORTS];
  177. bool smi_bus_isc45[MAX_SMI_BUSSES];
  178. bool phy_is_internal[MAX_PORTS];
  179. phy_interface_t interfaces[MAX_PORTS];
  180. };
  181. extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
  182. extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
  183. extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
  184. extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
  185. extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
  186. extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  187. extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
  188. extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  189. extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  190. extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  191. extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  192. extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  193. /*
  194. * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
  195. * the rings. Writing x into these registers substracts x from its content.
  196. * When the content reaches the ring size, the ASIC no longer adds
  197. * packets to this receive queue.
  198. */
  199. void rtl838x_update_cntr(int r, int released)
  200. {
  201. // This feature is not available on RTL838x SoCs
  202. }
  203. void rtl839x_update_cntr(int r, int released)
  204. {
  205. // This feature is not available on RTL839x SoCs
  206. }
  207. void rtl930x_update_cntr(int r, int released)
  208. {
  209. int pos = (r % 3) * 10;
  210. u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  211. u32 v = sw_r32(reg);
  212. v = (v >> pos) & 0x3ff;
  213. pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
  214. sw_w32_mask(0x3ff << pos, released << pos, reg);
  215. sw_w32(v, reg);
  216. }
  217. void rtl931x_update_cntr(int r, int released)
  218. {
  219. int pos = (r % 3) * 10;
  220. u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  221. u32 v = sw_r32(reg);
  222. v = (v >> pos) & 0x3ff;
  223. sw_w32_mask(0x3ff << pos, released << pos, reg);
  224. sw_w32(v, reg);
  225. }
  226. struct dsa_tag {
  227. u8 reason;
  228. u8 queue;
  229. u16 port;
  230. u8 l2_offloaded;
  231. u8 prio;
  232. bool crc_error;
  233. };
  234. bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  235. {
  236. /* cpu_tag[0] is reserved. Fields are off-by-one */
  237. t->reason = h->cpu_tag[4] & 0xf;
  238. t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
  239. t->port = h->cpu_tag[1] & 0x1f;
  240. t->crc_error = t->reason == 13;
  241. pr_debug("Reason: %d\n", t->reason);
  242. if (t->reason != 6) // NIC_RX_REASON_SPECIAL_TRAP
  243. t->l2_offloaded = 1;
  244. else
  245. t->l2_offloaded = 0;
  246. return t->l2_offloaded;
  247. }
  248. bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  249. {
  250. /* cpu_tag[0] is reserved. Fields are off-by-one */
  251. t->reason = h->cpu_tag[5] & 0x1f;
  252. t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
  253. t->port = h->cpu_tag[1] & 0x3f;
  254. t->crc_error = h->cpu_tag[4] & BIT(6);
  255. pr_debug("Reason: %d\n", t->reason);
  256. if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
  257. (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
  258. t->l2_offloaded = 0;
  259. else
  260. t->l2_offloaded = 1;
  261. return t->l2_offloaded;
  262. }
  263. bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  264. {
  265. t->reason = h->cpu_tag[7] & 0x3f;
  266. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  267. t->port = (h->cpu_tag[0] >> 8) & 0x1f;
  268. t->crc_error = h->cpu_tag[1] & BIT(6);
  269. pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
  270. if (t->reason >= 19 && t->reason <= 27)
  271. t->l2_offloaded = 0;
  272. else
  273. t->l2_offloaded = 1;
  274. return t->l2_offloaded;
  275. }
  276. bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  277. {
  278. t->reason = h->cpu_tag[7] & 0x3f;
  279. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  280. t->port = (h->cpu_tag[0] >> 8) & 0x3f;
  281. t->crc_error = h->cpu_tag[1] & BIT(6);
  282. if (t->reason != 63)
  283. pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
  284. if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA
  285. t->l2_offloaded = 0;
  286. else
  287. t->l2_offloaded = 1;
  288. return t->l2_offloaded;
  289. }
  290. /*
  291. * Discard the RX ring-buffers, called as part of the net-ISR
  292. * when the buffer runs over
  293. */
  294. static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
  295. {
  296. int r;
  297. u32 *last;
  298. struct p_hdr *h;
  299. struct ring_b *ring = priv->membase;
  300. for (r = 0; r < priv->rxrings; r++) {
  301. pr_debug("In %s working on r: %d\n", __func__, r);
  302. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  303. do {
  304. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
  305. break;
  306. pr_debug("Got something: %d\n", ring->c_rx[r]);
  307. h = &ring->rx_header[r][ring->c_rx[r]];
  308. memset(h, 0, sizeof(struct p_hdr));
  309. h->buf = (u8 *)KSEG1ADDR(ring->rx_space
  310. + r * priv->rxringlen * RING_BUFFER
  311. + ring->c_rx[r] * RING_BUFFER);
  312. h->size = RING_BUFFER;
  313. /* make sure the header is visible to the ASIC */
  314. mb();
  315. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
  316. | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
  317. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  318. } while (&ring->rx_r[r][ring->c_rx[r]] != last);
  319. }
  320. }
  321. struct fdb_update_work {
  322. struct work_struct work;
  323. struct net_device *ndev;
  324. u64 macs[NOTIFY_EVENTS + 1];
  325. };
  326. void rtl838x_fdb_sync(struct work_struct *work)
  327. {
  328. const struct fdb_update_work *uw =
  329. container_of(work, struct fdb_update_work, work);
  330. struct switchdev_notifier_fdb_info info;
  331. u8 addr[ETH_ALEN];
  332. int i = 0;
  333. int action;
  334. while (uw->macs[i]) {
  335. action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
  336. : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  337. u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
  338. info.addr = &addr[0];
  339. info.vid = 0;
  340. info.offloaded = 1;
  341. pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
  342. call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
  343. i++;
  344. }
  345. kfree(work);
  346. }
  347. static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
  348. {
  349. struct notify_b *nb = priv->membase + sizeof(struct ring_b);
  350. u32 e = priv->lastEvent;
  351. struct n_event *event;
  352. int i;
  353. u64 mac;
  354. struct fdb_update_work *w;
  355. while (!(nb->ring[e] & 1)) {
  356. w = kzalloc(sizeof(*w), GFP_ATOMIC);
  357. if (!w) {
  358. pr_err("Out of memory: %s", __func__);
  359. return;
  360. }
  361. INIT_WORK(&w->work, rtl838x_fdb_sync);
  362. for (i = 0; i < NOTIFY_EVENTS; i++) {
  363. event = &nb->blocks[e].events[i];
  364. if (!event->valid)
  365. continue;
  366. mac = event->mac;
  367. if (event->type)
  368. mac |= 1ULL << 63;
  369. w->ndev = priv->netdev;
  370. w->macs[i] = mac;
  371. }
  372. /* Hand the ring entry back to the switch */
  373. nb->ring[e] = nb->ring[e] | 1;
  374. e = (e + 1) % NOTIFY_BLOCKS;
  375. w->macs[i] = 0ULL;
  376. schedule_work(&w->work);
  377. }
  378. priv->lastEvent = e;
  379. }
  380. static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
  381. {
  382. struct net_device *dev = dev_id;
  383. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  384. u32 status = sw_r32(priv->r->dma_if_intr_sts);
  385. int i;
  386. pr_debug("IRQ: %08x\n", status);
  387. /* Ignore TX interrupt */
  388. if ((status & 0xf0000)) {
  389. /* Clear ISR */
  390. sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
  391. }
  392. /* RX interrupt */
  393. if (status & 0x0ff00) {
  394. /* ACK and disable RX interrupt for this ring */
  395. sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
  396. sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
  397. for (i = 0; i < priv->rxrings; i++) {
  398. if (status & BIT(i + 8)) {
  399. pr_debug("Scheduling queue: %d\n", i);
  400. napi_schedule(&priv->rx_qs[i].napi);
  401. }
  402. }
  403. }
  404. /* RX buffer overrun */
  405. if (status & 0x000ff) {
  406. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  407. status, sw_r32(priv->r->dma_if_intr_msk));
  408. sw_w32(status, priv->r->dma_if_intr_sts);
  409. rtl838x_rb_cleanup(priv, status & 0xff);
  410. }
  411. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
  412. sw_w32(0x00100000, priv->r->dma_if_intr_sts);
  413. rtl839x_l2_notification_handler(priv);
  414. }
  415. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
  416. sw_w32(0x00200000, priv->r->dma_if_intr_sts);
  417. rtl839x_l2_notification_handler(priv);
  418. }
  419. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
  420. sw_w32(0x00400000, priv->r->dma_if_intr_sts);
  421. rtl839x_l2_notification_handler(priv);
  422. }
  423. return IRQ_HANDLED;
  424. }
  425. static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
  426. {
  427. struct net_device *dev = dev_id;
  428. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  429. u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
  430. u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
  431. u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
  432. int i;
  433. pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
  434. __func__, status_tx, status_rx, status_rx_r);
  435. /* Ignore TX interrupt */
  436. if (status_tx) {
  437. /* Clear ISR */
  438. pr_debug("TX done\n");
  439. sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
  440. }
  441. /* RX interrupt */
  442. if (status_rx) {
  443. pr_debug("RX IRQ\n");
  444. /* ACK and disable RX interrupt for given rings */
  445. sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
  446. sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
  447. for (i = 0; i < priv->rxrings; i++) {
  448. if (status_rx & BIT(i)) {
  449. pr_debug("Scheduling queue: %d\n", i);
  450. napi_schedule(&priv->rx_qs[i].napi);
  451. }
  452. }
  453. }
  454. /* RX buffer overrun */
  455. if (status_rx_r) {
  456. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  457. status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
  458. sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
  459. rtl838x_rb_cleanup(priv, status_rx_r);
  460. }
  461. return IRQ_HANDLED;
  462. }
  463. static const struct rtl838x_eth_reg rtl838x_reg = {
  464. .net_irq = rtl83xx_net_irq,
  465. .mac_port_ctrl = rtl838x_mac_port_ctrl,
  466. .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
  467. .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
  468. .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
  469. .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
  470. .dma_rx_base = RTL838X_DMA_RX_BASE,
  471. .dma_tx_base = RTL838X_DMA_TX_BASE,
  472. .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
  473. .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
  474. .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
  475. .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
  476. .get_mac_link_sts = rtl838x_get_mac_link_sts,
  477. .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
  478. .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
  479. .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
  480. .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
  481. .mac = RTL838X_MAC,
  482. .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
  483. .update_cntr = rtl838x_update_cntr,
  484. .create_tx_header = rtl838x_create_tx_header,
  485. .decode_tag = rtl838x_decode_tag,
  486. };
  487. static const struct rtl838x_eth_reg rtl839x_reg = {
  488. .net_irq = rtl83xx_net_irq,
  489. .mac_port_ctrl = rtl839x_mac_port_ctrl,
  490. .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
  491. .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
  492. .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
  493. .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
  494. .dma_rx_base = RTL839X_DMA_RX_BASE,
  495. .dma_tx_base = RTL839X_DMA_TX_BASE,
  496. .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
  497. .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
  498. .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
  499. .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
  500. .get_mac_link_sts = rtl839x_get_mac_link_sts,
  501. .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
  502. .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
  503. .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
  504. .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
  505. .mac = RTL839X_MAC,
  506. .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
  507. .update_cntr = rtl839x_update_cntr,
  508. .create_tx_header = rtl839x_create_tx_header,
  509. .decode_tag = rtl839x_decode_tag,
  510. };
  511. static const struct rtl838x_eth_reg rtl930x_reg = {
  512. .net_irq = rtl93xx_net_irq,
  513. .mac_port_ctrl = rtl930x_mac_port_ctrl,
  514. .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
  515. .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
  516. .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
  517. .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
  518. .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
  519. .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
  520. .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
  521. .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
  522. .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
  523. .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
  524. .dma_rx_base = RTL930X_DMA_RX_BASE,
  525. .dma_tx_base = RTL930X_DMA_TX_BASE,
  526. .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
  527. .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
  528. .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
  529. .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
  530. .get_mac_link_sts = rtl930x_get_mac_link_sts,
  531. .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
  532. .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
  533. .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
  534. .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
  535. .mac = RTL930X_MAC_L2_ADDR_CTRL,
  536. .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
  537. .update_cntr = rtl930x_update_cntr,
  538. .create_tx_header = rtl930x_create_tx_header,
  539. .decode_tag = rtl930x_decode_tag,
  540. };
  541. static const struct rtl838x_eth_reg rtl931x_reg = {
  542. .net_irq = rtl93xx_net_irq,
  543. .mac_port_ctrl = rtl931x_mac_port_ctrl,
  544. .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
  545. .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
  546. .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
  547. .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
  548. .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
  549. .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
  550. .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
  551. .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
  552. .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
  553. .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
  554. .dma_rx_base = RTL931X_DMA_RX_BASE,
  555. .dma_tx_base = RTL931X_DMA_TX_BASE,
  556. .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
  557. .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
  558. .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
  559. .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
  560. .get_mac_link_sts = rtl931x_get_mac_link_sts,
  561. .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
  562. .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
  563. .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
  564. .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
  565. .mac = RTL931X_MAC_L2_ADDR_CTRL,
  566. .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
  567. .update_cntr = rtl931x_update_cntr,
  568. .create_tx_header = rtl931x_create_tx_header,
  569. .decode_tag = rtl931x_decode_tag,
  570. };
  571. static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
  572. {
  573. u32 int_saved, nbuf;
  574. u32 reset_mask;
  575. int i, pos;
  576. pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
  577. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  578. mdelay(100);
  579. /* Disable and clear interrupts */
  580. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  581. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  582. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  583. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  584. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  585. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  586. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  587. } else {
  588. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  589. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  590. }
  591. if (priv->family_id == RTL8390_FAMILY_ID) {
  592. /* Preserve L2 notification and NBUF settings */
  593. int_saved = sw_r32(priv->r->dma_if_intr_msk);
  594. nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  595. /* Disable link change interrupt on RTL839x */
  596. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
  597. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  598. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  599. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  600. }
  601. /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
  602. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  603. reset_mask = 0x6;
  604. else
  605. reset_mask = 0xc;
  606. sw_w32(reset_mask, priv->r->rst_glb_ctrl);
  607. do { /* Wait for reset of NIC and Queues done */
  608. udelay(20);
  609. } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
  610. mdelay(100);
  611. /* Setup Head of Line */
  612. if (priv->family_id == RTL8380_FAMILY_ID)
  613. sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
  614. if (priv->family_id == RTL8390_FAMILY_ID)
  615. sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
  616. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  617. for (i = 0; i < priv->rxrings; i++) {
  618. pos = (i % 3) * 10;
  619. sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
  620. sw_w32_mask(0x3ff << pos, priv->rxringlen,
  621. priv->r->dma_if_rx_ring_cntr(i));
  622. }
  623. }
  624. /* Re-enable link change interrupt */
  625. if (priv->family_id == RTL8390_FAMILY_ID) {
  626. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
  627. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
  628. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
  629. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  630. /* Restore notification settings: on RTL838x these bits are null */
  631. sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
  632. sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  633. }
  634. }
  635. static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
  636. {
  637. int i;
  638. struct ring_b *ring = priv->membase;
  639. for (i = 0; i < priv->rxrings; i++)
  640. sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
  641. for (i = 0; i < TXRINGS; i++)
  642. sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
  643. }
  644. static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  645. {
  646. /* Disable Head of Line features for all RX rings */
  647. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  648. /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
  649. sw_w32(0x06400020, priv->r->dma_if_ctrl);
  650. /* Enable RX done, RX overflow and TX done interrupts */
  651. sw_w32(0xfffff, priv->r->dma_if_intr_msk);
  652. /* Enable DMA, engine expects empty FCS field */
  653. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  654. /* Restart TX/RX to CPU port */
  655. sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
  656. /* Set Speed, duplex, flow control
  657. * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
  658. * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
  659. * | MEDIA_SEL
  660. */
  661. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  662. /* Enable CRC checks on CPU-port */
  663. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  664. }
  665. static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  666. {
  667. /* Setup CPU-Port: RX Buffer */
  668. sw_w32(0x0000c808, priv->r->dma_if_ctrl);
  669. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  670. sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
  671. /* Enable DMA */
  672. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  673. /* Restart TX/RX to CPU port, enable CRC checking */
  674. sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  675. /* CPU port joins Lookup Miss Flooding Portmask */
  676. // TODO: The code below should also work for the RTL838x
  677. sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
  678. sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
  679. sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
  680. /* Force CPU port link up */
  681. sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  682. }
  683. static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  684. {
  685. int i, pos;
  686. u32 v;
  687. /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
  688. sw_w32(0x06400040, priv->r->dma_if_ctrl);
  689. for (i = 0; i < priv->rxrings; i++) {
  690. pos = (i % 3) * 10;
  691. sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
  692. // Some SoCs have issues with missing underflow protection
  693. v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
  694. sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
  695. }
  696. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  697. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
  698. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  699. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
  700. /* Enable DMA */
  701. sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
  702. /* Restart TX/RX to CPU port, enable CRC checking */
  703. sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  704. if (priv->family_id == RTL9300_FAMILY_ID)
  705. sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
  706. else
  707. sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
  708. if (priv->family_id == RTL9300_FAMILY_ID)
  709. sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  710. else
  711. sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  712. }
  713. static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
  714. {
  715. int i, j;
  716. struct p_hdr *h;
  717. for (i = 0; i < priv->rxrings; i++) {
  718. for (j = 0; j < priv->rxringlen; j++) {
  719. h = &ring->rx_header[i][j];
  720. memset(h, 0, sizeof(struct p_hdr));
  721. h->buf = (u8 *)KSEG1ADDR(ring->rx_space
  722. + i * priv->rxringlen * RING_BUFFER
  723. + j * RING_BUFFER);
  724. h->size = RING_BUFFER;
  725. /* All rings owned by switch, last one wraps */
  726. ring->rx_r[i][j] = KSEG1ADDR(h) | 1
  727. | (j == (priv->rxringlen - 1) ? WRAP : 0);
  728. }
  729. ring->c_rx[i] = 0;
  730. }
  731. for (i = 0; i < TXRINGS; i++) {
  732. for (j = 0; j < TXRINGLEN; j++) {
  733. h = &ring->tx_header[i][j];
  734. memset(h, 0, sizeof(struct p_hdr));
  735. h->buf = (u8 *)KSEG1ADDR(ring->tx_space
  736. + i * TXRINGLEN * RING_BUFFER
  737. + j * RING_BUFFER);
  738. h->size = RING_BUFFER;
  739. ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
  740. }
  741. /* Last header is wrapping around */
  742. ring->tx_r[i][j-1] |= WRAP;
  743. ring->c_tx[i] = 0;
  744. }
  745. }
  746. static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
  747. {
  748. int i;
  749. struct notify_b *b = priv->membase + sizeof(struct ring_b);
  750. for (i = 0; i < NOTIFY_BLOCKS; i++)
  751. b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
  752. sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  753. sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
  754. /* Setup notification events */
  755. sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
  756. sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
  757. /* Enable Notification */
  758. sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
  759. priv->lastEvent = 0;
  760. }
  761. static int rtl838x_eth_open(struct net_device *ndev)
  762. {
  763. unsigned long flags;
  764. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  765. struct ring_b *ring = priv->membase;
  766. int i;
  767. pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
  768. __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
  769. spin_lock_irqsave(&priv->lock, flags);
  770. rtl838x_hw_reset(priv);
  771. rtl838x_setup_ring_buffer(priv, ring);
  772. if (priv->family_id == RTL8390_FAMILY_ID) {
  773. rtl839x_setup_notify_ring_buffer(priv);
  774. /* Make sure the ring structure is visible to the ASIC */
  775. mb();
  776. flush_cache_all();
  777. }
  778. rtl838x_hw_ring_setup(priv);
  779. phylink_start(priv->phylink);
  780. for (i = 0; i < priv->rxrings; i++)
  781. napi_enable(&priv->rx_qs[i].napi);
  782. switch (priv->family_id) {
  783. case RTL8380_FAMILY_ID:
  784. rtl838x_hw_en_rxtx(priv);
  785. /* Trap IGMP/MLD traffic to CPU-Port */
  786. sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
  787. /* Flush learned FDB entries on link down of a port */
  788. sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
  789. break;
  790. case RTL8390_FAMILY_ID:
  791. rtl839x_hw_en_rxtx(priv);
  792. // Trap MLD and IGMP messages to CPU_PORT
  793. sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
  794. /* Flush learned FDB entries on link down of a port */
  795. sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
  796. break;
  797. case RTL9300_FAMILY_ID:
  798. rtl93xx_hw_en_rxtx(priv);
  799. /* Flush learned FDB entries on link down of a port */
  800. sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
  801. // Trap MLD and IGMP messages to CPU_PORT
  802. sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
  803. break;
  804. case RTL9310_FAMILY_ID:
  805. rtl93xx_hw_en_rxtx(priv);
  806. // Trap MLD and IGMP messages to CPU_PORT
  807. sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
  808. // Disable External CPU access to switch, clear EXT_CPU_EN
  809. sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
  810. // Set PCIE_PWR_DOWN
  811. sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
  812. break;
  813. }
  814. netif_tx_start_all_queues(ndev);
  815. spin_unlock_irqrestore(&priv->lock, flags);
  816. return 0;
  817. }
  818. static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
  819. {
  820. u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
  821. u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
  822. int i;
  823. // Disable RX/TX from/to CPU-port
  824. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  825. /* Disable traffic */
  826. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  827. sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
  828. else
  829. sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
  830. mdelay(200); // Test, whether this is needed
  831. /* Block all ports */
  832. if (priv->family_id == RTL8380_FAMILY_ID) {
  833. sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
  834. sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
  835. sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
  836. }
  837. /* Flush L2 address cache */
  838. if (priv->family_id == RTL8380_FAMILY_ID) {
  839. for (i = 0; i <= priv->cpu_port; i++) {
  840. sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
  841. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
  842. }
  843. } else if (priv->family_id == RTL8390_FAMILY_ID) {
  844. for (i = 0; i <= priv->cpu_port; i++) {
  845. sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
  846. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
  847. }
  848. }
  849. // TODO: L2 flush register is 64 bit on RTL931X and 930X
  850. /* CPU-Port: Link down */
  851. if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
  852. sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  853. else if (priv->family_id == RTL9300_FAMILY_ID)
  854. sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  855. else if (priv->family_id == RTL9310_FAMILY_ID)
  856. sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  857. mdelay(100);
  858. /* Disable all TX/RX interrupts */
  859. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  860. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  861. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  862. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  863. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  864. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  865. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  866. } else {
  867. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  868. sw_w32(clear_irq, priv->r->dma_if_intr_sts);
  869. }
  870. /* Disable TX/RX DMA */
  871. sw_w32(0x00000000, priv->r->dma_if_ctrl);
  872. mdelay(200);
  873. }
  874. static int rtl838x_eth_stop(struct net_device *ndev)
  875. {
  876. unsigned long flags;
  877. int i;
  878. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  879. pr_info("in %s\n", __func__);
  880. phylink_stop(priv->phylink);
  881. rtl838x_hw_stop(priv);
  882. for (i = 0; i < priv->rxrings; i++)
  883. napi_disable(&priv->rx_qs[i].napi);
  884. netif_tx_stop_all_queues(ndev);
  885. return 0;
  886. }
  887. static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
  888. {
  889. /*
  890. * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  891. * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
  892. */
  893. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  894. sw_w32(0x0, RTL838X_RMA_CTRL_0);
  895. sw_w32(0x0, RTL838X_RMA_CTRL_1);
  896. }
  897. if (ndev->flags & IFF_ALLMULTI)
  898. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  899. if (ndev->flags & IFF_PROMISC) {
  900. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  901. sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
  902. }
  903. }
  904. static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
  905. {
  906. /*
  907. * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  908. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  909. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  910. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  911. */
  912. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  913. sw_w32(0x0, RTL839X_RMA_CTRL_0);
  914. sw_w32(0x0, RTL839X_RMA_CTRL_1);
  915. sw_w32(0x0, RTL839X_RMA_CTRL_2);
  916. sw_w32(0x0, RTL839X_RMA_CTRL_3);
  917. }
  918. if (ndev->flags & IFF_ALLMULTI) {
  919. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  920. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  921. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  922. }
  923. if (ndev->flags & IFF_PROMISC) {
  924. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  925. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  926. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  927. sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
  928. }
  929. }
  930. static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
  931. {
  932. /*
  933. * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  934. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  935. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  936. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  937. */
  938. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  939. sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
  940. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
  941. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
  942. } else {
  943. sw_w32(0x0, RTL930X_RMA_CTRL_0);
  944. sw_w32(0x0, RTL930X_RMA_CTRL_1);
  945. sw_w32(0x0, RTL930X_RMA_CTRL_2);
  946. }
  947. }
  948. static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
  949. {
  950. /*
  951. * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  952. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  953. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
  954. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  955. */
  956. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  957. sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
  958. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
  959. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
  960. } else {
  961. sw_w32(0x0, RTL931X_RMA_CTRL_0);
  962. sw_w32(0x0, RTL931X_RMA_CTRL_1);
  963. sw_w32(0x0, RTL931X_RMA_CTRL_2);
  964. }
  965. }
  966. static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  967. {
  968. unsigned long flags;
  969. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  970. pr_warn("%s\n", __func__);
  971. spin_lock_irqsave(&priv->lock, flags);
  972. rtl838x_hw_stop(priv);
  973. rtl838x_hw_ring_setup(priv);
  974. rtl838x_hw_en_rxtx(priv);
  975. netif_trans_update(ndev);
  976. netif_start_queue(ndev);
  977. spin_unlock_irqrestore(&priv->lock, flags);
  978. }
  979. static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
  980. {
  981. int len, i;
  982. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  983. struct ring_b *ring = priv->membase;
  984. uint32_t val;
  985. int ret;
  986. unsigned long flags;
  987. struct p_hdr *h;
  988. int dest_port = -1;
  989. int q = skb_get_queue_mapping(skb) % TXRINGS;
  990. if (q) // Check for high prio queue
  991. pr_debug("SKB priority: %d\n", skb->priority);
  992. spin_lock_irqsave(&priv->lock, flags);
  993. len = skb->len;
  994. /* Check for DSA tagging at the end of the buffer */
  995. if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80
  996. && skb->data[len-3] < priv->cpu_port
  997. && skb->data[len-2] == 0x10
  998. && skb->data[len-1] == 0x00) {
  999. /* Reuse tag space for CRC if possible */
  1000. dest_port = skb->data[len-3];
  1001. skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
  1002. len -= 4;
  1003. }
  1004. len += 4; // Add space for CRC
  1005. if (skb_padto(skb, len)) {
  1006. ret = NETDEV_TX_OK;
  1007. goto txdone;
  1008. }
  1009. /* We can send this packet if CPU owns the descriptor */
  1010. if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
  1011. /* Set descriptor for tx */
  1012. h = &ring->tx_header[q][ring->c_tx[q]];
  1013. h->size = len;
  1014. h->len = len;
  1015. // On RTL8380 SoCs, small packet lengths being sent need adjustments
  1016. if (priv->family_id == RTL8380_FAMILY_ID) {
  1017. if (len < ETH_ZLEN - 4)
  1018. h->len -= 4;
  1019. }
  1020. if (dest_port >= 0)
  1021. priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
  1022. /* Copy packet data to tx buffer */
  1023. memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
  1024. /* Make sure packet data is visible to ASIC */
  1025. wmb();
  1026. /* Hand over to switch */
  1027. ring->tx_r[q][ring->c_tx[q]] |= 1;
  1028. // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
  1029. if (priv->family_id == RTL8380_FAMILY_ID) {
  1030. for (i = 0; i < 10; i++) {
  1031. val = sw_r32(priv->r->dma_if_ctrl);
  1032. if ((val & 0xc) == 0xc)
  1033. break;
  1034. }
  1035. }
  1036. /* Tell switch to send data */
  1037. if (priv->family_id == RTL9310_FAMILY_ID
  1038. || priv->family_id == RTL9300_FAMILY_ID) {
  1039. // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
  1040. if (!q)
  1041. sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
  1042. else
  1043. sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
  1044. } else {
  1045. sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
  1046. }
  1047. dev->stats.tx_packets++;
  1048. dev->stats.tx_bytes += len;
  1049. dev_kfree_skb(skb);
  1050. ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
  1051. ret = NETDEV_TX_OK;
  1052. } else {
  1053. dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
  1054. ret = NETDEV_TX_BUSY;
  1055. }
  1056. txdone:
  1057. spin_unlock_irqrestore(&priv->lock, flags);
  1058. return ret;
  1059. }
  1060. /*
  1061. * Return queue number for TX. On the RTL83XX, these queues have equal priority
  1062. * so we do round-robin
  1063. */
  1064. u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1065. struct net_device *sb_dev)
  1066. {
  1067. static u8 last = 0;
  1068. last++;
  1069. return last % TXRINGS;
  1070. }
  1071. /*
  1072. * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
  1073. */
  1074. u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1075. struct net_device *sb_dev)
  1076. {
  1077. if (skb->priority >= TC_PRIO_CONTROL)
  1078. return 1;
  1079. return 0;
  1080. }
  1081. static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
  1082. {
  1083. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1084. struct ring_b *ring = priv->membase;
  1085. struct sk_buff *skb;
  1086. LIST_HEAD(rx_list);
  1087. unsigned long flags;
  1088. int i, len, work_done = 0;
  1089. u8 *data, *skb_data;
  1090. unsigned int val;
  1091. u32 *last;
  1092. struct p_hdr *h;
  1093. bool dsa = netdev_uses_dsa(dev);
  1094. struct dsa_tag tag;
  1095. pr_debug("---------------------------------------------------------- RX - %d\n", r);
  1096. spin_lock_irqsave(&priv->lock, flags);
  1097. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1098. do {
  1099. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
  1100. if (&ring->rx_r[r][ring->c_rx[r]] != last) {
  1101. netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
  1102. r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
  1103. }
  1104. break;
  1105. }
  1106. h = &ring->rx_header[r][ring->c_rx[r]];
  1107. data = (u8 *)KSEG1ADDR(h->buf);
  1108. len = h->len;
  1109. if (!len)
  1110. break;
  1111. work_done++;
  1112. len -= 4; /* strip the CRC */
  1113. /* Add 4 bytes for cpu_tag */
  1114. if (dsa)
  1115. len += 4;
  1116. skb = netdev_alloc_skb(dev, len + 4);
  1117. skb_reserve(skb, NET_IP_ALIGN);
  1118. if (likely(skb)) {
  1119. /* BUG: Prevent bug on RTL838x SoCs*/
  1120. if (priv->family_id == RTL8380_FAMILY_ID) {
  1121. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  1122. for (i = 0; i < priv->rxrings; i++) {
  1123. /* Update each ring cnt */
  1124. val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
  1125. sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
  1126. }
  1127. }
  1128. skb_data = skb_put(skb, len);
  1129. /* Make sure data is visible */
  1130. mb();
  1131. memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
  1132. /* Overwrite CRC with cpu_tag */
  1133. if (dsa) {
  1134. priv->r->decode_tag(h, &tag);
  1135. skb->data[len-4] = 0x80;
  1136. skb->data[len-3] = tag.port;
  1137. skb->data[len-2] = 0x10;
  1138. skb->data[len-1] = 0x00;
  1139. if (tag.l2_offloaded)
  1140. skb->data[len-3] |= 0x40;
  1141. }
  1142. if (tag.queue >= 0)
  1143. pr_debug("Queue: %d, len: %d, reason %d port %d\n",
  1144. tag.queue, len, tag.reason, tag.port);
  1145. skb->protocol = eth_type_trans(skb, dev);
  1146. if (dev->features & NETIF_F_RXCSUM) {
  1147. if (tag.crc_error)
  1148. skb_checksum_none_assert(skb);
  1149. else
  1150. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1151. }
  1152. dev->stats.rx_packets++;
  1153. dev->stats.rx_bytes += len;
  1154. list_add_tail(&skb->list, &rx_list);
  1155. } else {
  1156. if (net_ratelimit())
  1157. dev_warn(&dev->dev, "low on memory - packet dropped\n");
  1158. dev->stats.rx_dropped++;
  1159. }
  1160. /* Reset header structure */
  1161. memset(h, 0, sizeof(struct p_hdr));
  1162. h->buf = data;
  1163. h->size = RING_BUFFER;
  1164. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
  1165. | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
  1166. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  1167. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1168. } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
  1169. netif_receive_skb_list(&rx_list);
  1170. // Update counters
  1171. priv->r->update_cntr(r, 0);
  1172. spin_unlock_irqrestore(&priv->lock, flags);
  1173. return work_done;
  1174. }
  1175. static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
  1176. {
  1177. struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
  1178. struct rtl838x_eth_priv *priv = rx_q->priv;
  1179. int work_done = 0;
  1180. int r = rx_q->id;
  1181. int work;
  1182. while (work_done < budget) {
  1183. work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
  1184. if (!work)
  1185. break;
  1186. work_done += work;
  1187. }
  1188. if (work_done < budget) {
  1189. napi_complete_done(napi, work_done);
  1190. /* Enable RX interrupt */
  1191. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  1192. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  1193. else
  1194. sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
  1195. }
  1196. return work_done;
  1197. }
  1198. static void rtl838x_validate(struct phylink_config *config,
  1199. unsigned long *supported,
  1200. struct phylink_link_state *state)
  1201. {
  1202. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  1203. pr_debug("In %s\n", __func__);
  1204. if (!phy_interface_mode_is_rgmii(state->interface) &&
  1205. state->interface != PHY_INTERFACE_MODE_1000BASEX &&
  1206. state->interface != PHY_INTERFACE_MODE_MII &&
  1207. state->interface != PHY_INTERFACE_MODE_REVMII &&
  1208. state->interface != PHY_INTERFACE_MODE_GMII &&
  1209. state->interface != PHY_INTERFACE_MODE_QSGMII &&
  1210. state->interface != PHY_INTERFACE_MODE_INTERNAL &&
  1211. state->interface != PHY_INTERFACE_MODE_SGMII) {
  1212. bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
  1213. pr_err("Unsupported interface: %d\n", state->interface);
  1214. return;
  1215. }
  1216. /* Allow all the expected bits */
  1217. phylink_set(mask, Autoneg);
  1218. phylink_set_port_modes(mask);
  1219. phylink_set(mask, Pause);
  1220. phylink_set(mask, Asym_Pause);
  1221. /* With the exclusion of MII and Reverse MII, we support Gigabit,
  1222. * including Half duplex
  1223. */
  1224. if (state->interface != PHY_INTERFACE_MODE_MII &&
  1225. state->interface != PHY_INTERFACE_MODE_REVMII) {
  1226. phylink_set(mask, 1000baseT_Full);
  1227. phylink_set(mask, 1000baseT_Half);
  1228. }
  1229. phylink_set(mask, 10baseT_Half);
  1230. phylink_set(mask, 10baseT_Full);
  1231. phylink_set(mask, 100baseT_Half);
  1232. phylink_set(mask, 100baseT_Full);
  1233. bitmap_and(supported, supported, mask,
  1234. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1235. bitmap_and(state->advertising, state->advertising, mask,
  1236. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1237. }
  1238. static void rtl838x_mac_config(struct phylink_config *config,
  1239. unsigned int mode,
  1240. const struct phylink_link_state *state)
  1241. {
  1242. /* This is only being called for the master device,
  1243. * i.e. the CPU-Port. We don't need to do anything.
  1244. */
  1245. pr_info("In %s, mode %x\n", __func__, mode);
  1246. }
  1247. static void rtl838x_mac_an_restart(struct phylink_config *config)
  1248. {
  1249. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1250. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1251. /* This works only on RTL838x chips */
  1252. if (priv->family_id != RTL8380_FAMILY_ID)
  1253. return;
  1254. pr_debug("In %s\n", __func__);
  1255. /* Restart by disabling and re-enabling link */
  1256. sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1257. mdelay(20);
  1258. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1259. }
  1260. static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
  1261. struct phylink_link_state *state)
  1262. {
  1263. u32 speed;
  1264. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1265. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1266. int port = priv->cpu_port;
  1267. pr_info("In %s\n", __func__);
  1268. state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
  1269. state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
  1270. pr_info("%s link status is %d\n", __func__, state->link);
  1271. speed = priv->r->get_mac_link_spd_sts(port);
  1272. switch (speed) {
  1273. case 0:
  1274. state->speed = SPEED_10;
  1275. break;
  1276. case 1:
  1277. state->speed = SPEED_100;
  1278. break;
  1279. case 2:
  1280. state->speed = SPEED_1000;
  1281. break;
  1282. case 5:
  1283. state->speed = SPEED_2500;
  1284. break;
  1285. case 6:
  1286. state->speed = SPEED_5000;
  1287. break;
  1288. case 4:
  1289. state->speed = SPEED_10000;
  1290. break;
  1291. default:
  1292. state->speed = SPEED_UNKNOWN;
  1293. break;
  1294. }
  1295. state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
  1296. if (priv->r->get_mac_rx_pause_sts(port))
  1297. state->pause |= MLO_PAUSE_RX;
  1298. if (priv->r->get_mac_tx_pause_sts(port))
  1299. state->pause |= MLO_PAUSE_TX;
  1300. }
  1301. static void rtl838x_mac_link_down(struct phylink_config *config,
  1302. unsigned int mode,
  1303. phy_interface_t interface)
  1304. {
  1305. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1306. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1307. pr_debug("In %s\n", __func__);
  1308. /* Stop TX/RX to port */
  1309. sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1310. }
  1311. static void rtl838x_mac_link_up(struct phylink_config *config,
  1312. struct phy_device *phy, unsigned int mode,
  1313. phy_interface_t interface, int speed, int duplex,
  1314. bool tx_pause, bool rx_pause)
  1315. {
  1316. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1317. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1318. pr_debug("In %s\n", __func__);
  1319. /* Restart TX/RX to port */
  1320. sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
  1321. }
  1322. static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
  1323. {
  1324. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1325. unsigned long flags;
  1326. spin_lock_irqsave(&priv->lock, flags);
  1327. pr_debug("In %s\n", __func__);
  1328. sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
  1329. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
  1330. if (priv->family_id == RTL8380_FAMILY_ID) {
  1331. /* 2 more registers, ALE/MAC block */
  1332. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
  1333. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1334. (RTL838X_MAC_ALE + 4));
  1335. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
  1336. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1337. RTL838X_MAC2 + 4);
  1338. }
  1339. spin_unlock_irqrestore(&priv->lock, flags);
  1340. }
  1341. static int rtl838x_set_mac_address(struct net_device *dev, void *p)
  1342. {
  1343. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1344. const struct sockaddr *addr = p;
  1345. u8 *mac = (u8 *) (addr->sa_data);
  1346. if (!is_valid_ether_addr(addr->sa_data))
  1347. return -EADDRNOTAVAIL;
  1348. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  1349. rtl838x_set_mac_hw(dev, mac);
  1350. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
  1351. return 0;
  1352. }
  1353. static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
  1354. {
  1355. // We will need to set-up EEE and the egress-rate limitation
  1356. return 0;
  1357. }
  1358. static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
  1359. {
  1360. int i;
  1361. if (priv->family_id == 0x8390)
  1362. return rtl8390_init_mac(priv);
  1363. // At present we do not know how to set up EEE on any other SoC than RTL8380
  1364. if (priv->family_id != 0x8380)
  1365. return 0;
  1366. pr_info("%s\n", __func__);
  1367. /* fix timer for EEE */
  1368. sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
  1369. sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
  1370. /* Init VLAN. TODO: Understand what is being done, here */
  1371. if (priv->id == 0x8382) {
  1372. for (i = 0; i <= 28; i++)
  1373. sw_w32(0, 0xd57c + i * 0x80);
  1374. }
  1375. if (priv->id == 0x8380) {
  1376. for (i = 8; i <= 28; i++)
  1377. sw_w32(0, 0xd57c + i * 0x80);
  1378. }
  1379. return 0;
  1380. }
  1381. static int rtl838x_get_link_ksettings(struct net_device *ndev,
  1382. struct ethtool_link_ksettings *cmd)
  1383. {
  1384. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1385. pr_debug("%s called\n", __func__);
  1386. return phylink_ethtool_ksettings_get(priv->phylink, cmd);
  1387. }
  1388. static int rtl838x_set_link_ksettings(struct net_device *ndev,
  1389. const struct ethtool_link_ksettings *cmd)
  1390. {
  1391. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1392. pr_debug("%s called\n", __func__);
  1393. return phylink_ethtool_ksettings_set(priv->phylink, cmd);
  1394. }
  1395. static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1396. {
  1397. u32 val;
  1398. int err;
  1399. struct rtl838x_eth_priv *priv = bus->priv;
  1400. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
  1401. return rtl838x_read_sds_phy(mii_id, regnum);
  1402. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1403. err = rtl838x_read_mmd_phy(mii_id,
  1404. mdiobus_c45_devad(regnum),
  1405. regnum, &val);
  1406. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1407. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1408. val, err);
  1409. } else {
  1410. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1411. err = rtl838x_read_phy(mii_id, page, regnum, &val);
  1412. }
  1413. if (err)
  1414. return err;
  1415. return val;
  1416. }
  1417. static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1418. {
  1419. return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
  1420. }
  1421. static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1422. {
  1423. u32 val;
  1424. int err;
  1425. struct rtl838x_eth_priv *priv = bus->priv;
  1426. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1427. return rtl839x_read_sds_phy(mii_id, regnum);
  1428. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1429. err = rtl839x_read_mmd_phy(mii_id,
  1430. mdiobus_c45_devad(regnum),
  1431. regnum, &val);
  1432. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1433. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1434. val, err);
  1435. } else {
  1436. err = rtl839x_read_phy(mii_id, page, regnum, &val);
  1437. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1438. }
  1439. if (err)
  1440. return err;
  1441. return val;
  1442. }
  1443. static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1444. {
  1445. return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
  1446. }
  1447. static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1448. {
  1449. u32 val;
  1450. int err;
  1451. struct rtl838x_eth_priv *priv = bus->priv;
  1452. if (priv->phy_is_internal[mii_id])
  1453. return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1454. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1455. err = rtl930x_read_mmd_phy(mii_id,
  1456. mdiobus_c45_devad(regnum),
  1457. regnum, &val);
  1458. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1459. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1460. val, err);
  1461. } else {
  1462. err = rtl930x_read_phy(mii_id, page, regnum, &val);
  1463. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1464. }
  1465. if (err)
  1466. return err;
  1467. return val;
  1468. }
  1469. static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1470. {
  1471. return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
  1472. }
  1473. static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1474. {
  1475. u32 val;
  1476. int err, v;
  1477. struct rtl838x_eth_priv *priv = bus->priv;
  1478. pr_debug("%s: In here, port %d\n", __func__, mii_id);
  1479. if (priv->phy_is_internal[mii_id]) {
  1480. v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1481. if (v < 0) {
  1482. err = v;
  1483. } else {
  1484. err = 0;
  1485. val = v;
  1486. }
  1487. } else {
  1488. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1489. err = rtl931x_read_mmd_phy(mii_id,
  1490. mdiobus_c45_devad(regnum),
  1491. regnum, &val);
  1492. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1493. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1494. val, err);
  1495. } else {
  1496. err = rtl931x_read_phy(mii_id, page, regnum, &val);
  1497. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1498. }
  1499. }
  1500. if (err)
  1501. return err;
  1502. return val;
  1503. }
  1504. static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1505. {
  1506. return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
  1507. }
  1508. static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1509. int regnum, u16 value)
  1510. {
  1511. u32 offset = 0;
  1512. struct rtl838x_eth_priv *priv = bus->priv;
  1513. int err;
  1514. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
  1515. if (mii_id == 26)
  1516. offset = 0x100;
  1517. sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
  1518. return 0;
  1519. }
  1520. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1521. err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1522. regnum, value);
  1523. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1524. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1525. value, err);
  1526. return err;
  1527. }
  1528. err = rtl838x_write_phy(mii_id, page, regnum, value);
  1529. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1530. return err;
  1531. }
  1532. static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
  1533. int regnum, u16 value)
  1534. {
  1535. return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1536. }
  1537. static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1538. int regnum, u16 value)
  1539. {
  1540. struct rtl838x_eth_priv *priv = bus->priv;
  1541. int err;
  1542. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1543. return rtl839x_write_sds_phy(mii_id, regnum, value);
  1544. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1545. err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1546. regnum, value);
  1547. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1548. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1549. value, err);
  1550. return err;
  1551. }
  1552. err = rtl839x_write_phy(mii_id, page, regnum, value);
  1553. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1554. return err;
  1555. }
  1556. static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
  1557. int regnum, u16 value)
  1558. {
  1559. return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1560. }
  1561. static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1562. int regnum, u16 value)
  1563. {
  1564. struct rtl838x_eth_priv *priv = bus->priv;
  1565. int err;
  1566. if (priv->phy_is_internal[mii_id])
  1567. return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1568. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
  1569. return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1570. regnum, value);
  1571. err = rtl930x_write_phy(mii_id, page, regnum, value);
  1572. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1573. return err;
  1574. }
  1575. static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
  1576. int regnum, u16 value)
  1577. {
  1578. return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1579. }
  1580. static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1581. int regnum, u16 value)
  1582. {
  1583. struct rtl838x_eth_priv *priv = bus->priv;
  1584. int err;
  1585. if (priv->phy_is_internal[mii_id])
  1586. return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1587. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1588. err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1589. regnum, value);
  1590. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1591. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1592. value, err);
  1593. return err;
  1594. }
  1595. err = rtl931x_write_phy(mii_id, page, regnum, value);
  1596. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1597. return err;
  1598. }
  1599. static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
  1600. int regnum, u16 value)
  1601. {
  1602. return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1603. }
  1604. static int rtl838x_mdio_reset(struct mii_bus *bus)
  1605. {
  1606. pr_debug("%s called\n", __func__);
  1607. /* Disable MAC polling the PHY so that we can start configuration */
  1608. sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
  1609. /* Enable PHY control via SoC */
  1610. sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
  1611. // Probably should reset all PHYs here...
  1612. return 0;
  1613. }
  1614. static int rtl839x_mdio_reset(struct mii_bus *bus)
  1615. {
  1616. return 0;
  1617. pr_debug("%s called\n", __func__);
  1618. /* BUG: The following does not work, but should! */
  1619. /* Disable MAC polling the PHY so that we can start configuration */
  1620. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
  1621. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
  1622. /* Disable PHY polling via SoC */
  1623. sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
  1624. // Probably should reset all PHYs here...
  1625. return 0;
  1626. }
  1627. u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
  1628. 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
  1629. static int rtl930x_mdio_reset(struct mii_bus *bus)
  1630. {
  1631. int i;
  1632. int pos;
  1633. struct rtl838x_eth_priv *priv = bus->priv;
  1634. u32 c45_mask = 0;
  1635. u32 poll_sel[2];
  1636. u32 poll_ctrl = 0;
  1637. u32 private_poll_mask = 0;
  1638. u32 v;
  1639. bool uses_usxgmii = false; // For the Aquantia PHYs
  1640. bool uses_hisgmii = false; // For the RTL8221/8226
  1641. // Mapping of port to phy-addresses on an SMI bus
  1642. poll_sel[0] = poll_sel[1] = 0;
  1643. for (i = 0; i < RTL930X_CPU_PORT; i++) {
  1644. if (priv->smi_bus[i] > 3)
  1645. continue;
  1646. pos = (i % 6) * 5;
  1647. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
  1648. RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
  1649. pos = (i * 2) % 32;
  1650. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1651. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1652. }
  1653. // Configure which SMI bus is behind which port number
  1654. sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
  1655. sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
  1656. // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+)
  1657. sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
  1658. // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
  1659. for (i = 0; i < 4; i++)
  1660. if (priv->smi_bus_isc45[i])
  1661. c45_mask |= BIT(i + 16);
  1662. pr_info("c45_mask: %08x\n", c45_mask);
  1663. sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
  1664. // Set the MAC type of each port according to the PHY-interface
  1665. // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0
  1666. v = 0;
  1667. for (i = 0; i < RTL930X_CPU_PORT; i++) {
  1668. switch (priv->interfaces[i]) {
  1669. case PHY_INTERFACE_MODE_10GBASER:
  1670. break; // Serdes: Value = 0
  1671. case PHY_INTERFACE_MODE_HSGMII:
  1672. private_poll_mask |= BIT(i);
  1673. // fallthrough
  1674. case PHY_INTERFACE_MODE_USXGMII:
  1675. v |= BIT(mac_type_bit[i]);
  1676. uses_usxgmii = true;
  1677. break;
  1678. case PHY_INTERFACE_MODE_QSGMII:
  1679. private_poll_mask |= BIT(i);
  1680. v |= 3 << mac_type_bit[i];
  1681. break;
  1682. default:
  1683. break;
  1684. }
  1685. }
  1686. sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
  1687. // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones)
  1688. sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
  1689. /* The following magic values are found in the port configuration, they seem to
  1690. * define different ways of polling a PHY. The below is for the Aquantia PHYs of
  1691. * the XGS1250 and the RTL8226 of the XGS1210 */
  1692. if (uses_usxgmii) {
  1693. sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1694. sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1695. sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1696. }
  1697. if (uses_hisgmii) {
  1698. sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1699. sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1700. sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1701. }
  1702. pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
  1703. sw_r32(RTL930X_SMI_GLB_CTRL));
  1704. pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
  1705. sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
  1706. pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
  1707. sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
  1708. pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
  1709. sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
  1710. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
  1711. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
  1712. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
  1713. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
  1714. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
  1715. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
  1716. pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
  1717. sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
  1718. return 0;
  1719. }
  1720. static int rtl931x_mdio_reset(struct mii_bus *bus)
  1721. {
  1722. int i;
  1723. int pos;
  1724. struct rtl838x_eth_priv *priv = bus->priv;
  1725. u32 c45_mask = 0;
  1726. u32 poll_sel[4];
  1727. u32 poll_ctrl = 0;
  1728. bool mdc_on[4];
  1729. pr_info("%s called\n", __func__);
  1730. // Disable port polling for configuration purposes
  1731. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
  1732. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
  1733. msleep(100);
  1734. mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
  1735. // Mapping of port to phy-addresses on an SMI bus
  1736. poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
  1737. for (i = 0; i < 56; i++) {
  1738. pos = (i % 6) * 5;
  1739. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
  1740. pos = (i * 2) % 32;
  1741. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1742. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1743. mdc_on[priv->smi_bus[i]] = true;
  1744. }
  1745. // Configure which SMI bus is behind which port number
  1746. for (i = 0; i < 4; i++) {
  1747. pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
  1748. sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
  1749. }
  1750. // Configure which SMI busses
  1751. pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1752. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1753. for (i = 0; i < 4; i++) {
  1754. // bus is polled in c45
  1755. if (priv->smi_bus_isc45[i])
  1756. c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3
  1757. // Enable bus access via MDC
  1758. if (mdc_on[i])
  1759. sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
  1760. }
  1761. pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1762. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1763. /* We have a 10G PHY enable polling
  1764. sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
  1765. sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
  1766. sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
  1767. */
  1768. sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
  1769. return 0;
  1770. }
  1771. static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
  1772. {
  1773. pr_info("In %s\n", __func__);
  1774. // Initialize Encapsulation memory and wait until finished
  1775. sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
  1776. do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
  1777. pr_info("%s: init ENCAP done\n", __func__);
  1778. // Initialize Managemen Information Base memory and wait until finished
  1779. sw_w32(0x1, RTL931X_MEM_MIB_INIT);
  1780. do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
  1781. pr_info("%s: init MIB done\n", __func__);
  1782. // Initialize ACL (PIE) memory and wait until finished
  1783. sw_w32(0x1, RTL931X_MEM_ACL_INIT);
  1784. do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
  1785. pr_info("%s: init ACL done\n", __func__);
  1786. // Initialize ALE memory and wait until finished
  1787. sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
  1788. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
  1789. sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
  1790. sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
  1791. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
  1792. pr_info("%s: init ALE done\n", __func__);
  1793. // Enable ESD auto recovery
  1794. sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
  1795. // Init SPI, is this for thermal control or what?
  1796. sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
  1797. return 0;
  1798. }
  1799. static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
  1800. {
  1801. struct device_node *mii_np, *dn;
  1802. u32 pn;
  1803. int ret;
  1804. pr_debug("%s called\n", __func__);
  1805. mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
  1806. if (!mii_np) {
  1807. dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
  1808. return -ENODEV;
  1809. }
  1810. if (!of_device_is_available(mii_np)) {
  1811. ret = -ENODEV;
  1812. goto err_put_node;
  1813. }
  1814. priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
  1815. if (!priv->mii_bus) {
  1816. ret = -ENOMEM;
  1817. goto err_put_node;
  1818. }
  1819. switch(priv->family_id) {
  1820. case RTL8380_FAMILY_ID:
  1821. priv->mii_bus->name = "rtl838x-eth-mdio";
  1822. priv->mii_bus->read = rtl838x_mdio_read;
  1823. priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
  1824. priv->mii_bus->write = rtl838x_mdio_write;
  1825. priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
  1826. priv->mii_bus->reset = rtl838x_mdio_reset;
  1827. break;
  1828. case RTL8390_FAMILY_ID:
  1829. priv->mii_bus->name = "rtl839x-eth-mdio";
  1830. priv->mii_bus->read = rtl839x_mdio_read;
  1831. priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
  1832. priv->mii_bus->write = rtl839x_mdio_write;
  1833. priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
  1834. priv->mii_bus->reset = rtl839x_mdio_reset;
  1835. break;
  1836. case RTL9300_FAMILY_ID:
  1837. priv->mii_bus->name = "rtl930x-eth-mdio";
  1838. priv->mii_bus->read = rtl930x_mdio_read;
  1839. priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
  1840. priv->mii_bus->write = rtl930x_mdio_write;
  1841. priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
  1842. priv->mii_bus->reset = rtl930x_mdio_reset;
  1843. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1844. break;
  1845. case RTL9310_FAMILY_ID:
  1846. priv->mii_bus->name = "rtl931x-eth-mdio";
  1847. priv->mii_bus->read = rtl931x_mdio_read;
  1848. priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
  1849. priv->mii_bus->write = rtl931x_mdio_write;
  1850. priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
  1851. priv->mii_bus->reset = rtl931x_mdio_reset;
  1852. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1853. break;
  1854. }
  1855. priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
  1856. priv->mii_bus->priv = priv;
  1857. priv->mii_bus->parent = &priv->pdev->dev;
  1858. for_each_node_by_name(dn, "ethernet-phy") {
  1859. u32 smi_addr[2];
  1860. if (of_property_read_u32(dn, "reg", &pn))
  1861. continue;
  1862. if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
  1863. smi_addr[0] = 0;
  1864. smi_addr[1] = pn;
  1865. }
  1866. if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
  1867. priv->sds_id[pn] = -1;
  1868. else {
  1869. pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
  1870. }
  1871. if (pn < MAX_PORTS) {
  1872. priv->smi_bus[pn] = smi_addr[0];
  1873. priv->smi_addr[pn] = smi_addr[1];
  1874. } else {
  1875. pr_err("%s: illegal port number %d\n", __func__, pn);
  1876. }
  1877. if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
  1878. priv->smi_bus_isc45[smi_addr[0]] = true;
  1879. if (of_property_read_bool(dn, "phy-is-integrated")) {
  1880. priv->phy_is_internal[pn] = true;
  1881. }
  1882. }
  1883. dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
  1884. if (!dn) {
  1885. dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
  1886. return -ENODEV;
  1887. }
  1888. for_each_node_by_name(dn, "port") {
  1889. if (of_property_read_u32(dn, "reg", &pn))
  1890. continue;
  1891. pr_debug("%s Looking at port %d\n", __func__, pn);
  1892. if (pn > priv->cpu_port)
  1893. continue;
  1894. if (of_get_phy_mode(dn, &priv->interfaces[pn]))
  1895. priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
  1896. pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
  1897. }
  1898. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
  1899. ret = of_mdiobus_register(priv->mii_bus, mii_np);
  1900. err_put_node:
  1901. of_node_put(mii_np);
  1902. return ret;
  1903. }
  1904. static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
  1905. {
  1906. pr_debug("%s called\n", __func__);
  1907. if (!priv->mii_bus)
  1908. return 0;
  1909. mdiobus_unregister(priv->mii_bus);
  1910. mdiobus_free(priv->mii_bus);
  1911. return 0;
  1912. }
  1913. static netdev_features_t rtl838x_fix_features(struct net_device *dev,
  1914. netdev_features_t features)
  1915. {
  1916. return features;
  1917. }
  1918. static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
  1919. {
  1920. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1921. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1922. if (!(features & NETIF_F_RXCSUM))
  1923. sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1924. else
  1925. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  1926. }
  1927. return 0;
  1928. }
  1929. static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
  1930. {
  1931. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1932. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1933. if (!(features & NETIF_F_RXCSUM))
  1934. sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1935. else
  1936. sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  1937. }
  1938. return 0;
  1939. }
  1940. static const struct net_device_ops rtl838x_eth_netdev_ops = {
  1941. .ndo_open = rtl838x_eth_open,
  1942. .ndo_stop = rtl838x_eth_stop,
  1943. .ndo_start_xmit = rtl838x_eth_tx,
  1944. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1945. .ndo_set_mac_address = rtl838x_set_mac_address,
  1946. .ndo_validate_addr = eth_validate_addr,
  1947. .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
  1948. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1949. .ndo_set_features = rtl83xx_set_features,
  1950. .ndo_fix_features = rtl838x_fix_features,
  1951. .ndo_setup_tc = rtl83xx_setup_tc,
  1952. };
  1953. static const struct net_device_ops rtl839x_eth_netdev_ops = {
  1954. .ndo_open = rtl838x_eth_open,
  1955. .ndo_stop = rtl838x_eth_stop,
  1956. .ndo_start_xmit = rtl838x_eth_tx,
  1957. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1958. .ndo_set_mac_address = rtl838x_set_mac_address,
  1959. .ndo_validate_addr = eth_validate_addr,
  1960. .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
  1961. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1962. .ndo_set_features = rtl83xx_set_features,
  1963. .ndo_fix_features = rtl838x_fix_features,
  1964. .ndo_setup_tc = rtl83xx_setup_tc,
  1965. };
  1966. static const struct net_device_ops rtl930x_eth_netdev_ops = {
  1967. .ndo_open = rtl838x_eth_open,
  1968. .ndo_stop = rtl838x_eth_stop,
  1969. .ndo_start_xmit = rtl838x_eth_tx,
  1970. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1971. .ndo_set_mac_address = rtl838x_set_mac_address,
  1972. .ndo_validate_addr = eth_validate_addr,
  1973. .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
  1974. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1975. .ndo_set_features = rtl93xx_set_features,
  1976. .ndo_fix_features = rtl838x_fix_features,
  1977. .ndo_setup_tc = rtl83xx_setup_tc,
  1978. };
  1979. static const struct net_device_ops rtl931x_eth_netdev_ops = {
  1980. .ndo_open = rtl838x_eth_open,
  1981. .ndo_stop = rtl838x_eth_stop,
  1982. .ndo_start_xmit = rtl838x_eth_tx,
  1983. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1984. .ndo_set_mac_address = rtl838x_set_mac_address,
  1985. .ndo_validate_addr = eth_validate_addr,
  1986. .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
  1987. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1988. .ndo_set_features = rtl93xx_set_features,
  1989. .ndo_fix_features = rtl838x_fix_features,
  1990. };
  1991. static const struct phylink_mac_ops rtl838x_phylink_ops = {
  1992. .validate = rtl838x_validate,
  1993. .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
  1994. .mac_an_restart = rtl838x_mac_an_restart,
  1995. .mac_config = rtl838x_mac_config,
  1996. .mac_link_down = rtl838x_mac_link_down,
  1997. .mac_link_up = rtl838x_mac_link_up,
  1998. };
  1999. static const struct ethtool_ops rtl838x_ethtool_ops = {
  2000. .get_link_ksettings = rtl838x_get_link_ksettings,
  2001. .set_link_ksettings = rtl838x_set_link_ksettings,
  2002. };
  2003. static int __init rtl838x_eth_probe(struct platform_device *pdev)
  2004. {
  2005. struct net_device *dev;
  2006. struct device_node *dn = pdev->dev.of_node;
  2007. struct rtl838x_eth_priv *priv;
  2008. struct resource *res, *mem;
  2009. phy_interface_t phy_mode;
  2010. struct phylink *phylink;
  2011. int err = 0, i, rxrings, rxringlen;
  2012. struct ring_b *ring;
  2013. pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
  2014. (u32)pdev, (u32)(&(pdev->dev)));
  2015. if (!dn) {
  2016. dev_err(&pdev->dev, "No DT found\n");
  2017. return -EINVAL;
  2018. }
  2019. rxrings = (soc_info.family == RTL8380_FAMILY_ID
  2020. || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
  2021. rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
  2022. rxringlen = MAX_ENTRIES / rxrings;
  2023. rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
  2024. dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
  2025. if (!dev) {
  2026. err = -ENOMEM;
  2027. goto err_free;
  2028. }
  2029. SET_NETDEV_DEV(dev, &pdev->dev);
  2030. priv = netdev_priv(dev);
  2031. /* obtain buffer memory space */
  2032. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2033. if (res) {
  2034. mem = devm_request_mem_region(&pdev->dev, res->start,
  2035. resource_size(res), res->name);
  2036. if (!mem) {
  2037. dev_err(&pdev->dev, "cannot request memory space\n");
  2038. err = -ENXIO;
  2039. goto err_free;
  2040. }
  2041. dev->mem_start = mem->start;
  2042. dev->mem_end = mem->end;
  2043. } else {
  2044. dev_err(&pdev->dev, "cannot request IO resource\n");
  2045. err = -ENXIO;
  2046. goto err_free;
  2047. }
  2048. /* Allocate buffer memory */
  2049. priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
  2050. + sizeof(struct ring_b) + sizeof(struct notify_b),
  2051. (void *)&dev->mem_start, GFP_KERNEL);
  2052. if (!priv->membase) {
  2053. dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
  2054. err = -ENOMEM;
  2055. goto err_free;
  2056. }
  2057. // Allocate ring-buffer space at the end of the allocated memory
  2058. ring = priv->membase;
  2059. ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
  2060. spin_lock_init(&priv->lock);
  2061. dev->ethtool_ops = &rtl838x_ethtool_ops;
  2062. dev->min_mtu = ETH_ZLEN;
  2063. dev->max_mtu = 1536;
  2064. dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
  2065. dev->hw_features = NETIF_F_RXCSUM;
  2066. priv->id = soc_info.id;
  2067. priv->family_id = soc_info.family;
  2068. if (priv->id) {
  2069. pr_info("Found SoC ID: %4x: %s, family %x\n",
  2070. priv->id, soc_info.name, priv->family_id);
  2071. } else {
  2072. pr_err("Unknown chip id (%04x)\n", priv->id);
  2073. return -ENODEV;
  2074. }
  2075. switch (priv->family_id) {
  2076. case RTL8380_FAMILY_ID:
  2077. priv->cpu_port = RTL838X_CPU_PORT;
  2078. priv->r = &rtl838x_reg;
  2079. dev->netdev_ops = &rtl838x_eth_netdev_ops;
  2080. break;
  2081. case RTL8390_FAMILY_ID:
  2082. priv->cpu_port = RTL839X_CPU_PORT;
  2083. priv->r = &rtl839x_reg;
  2084. dev->netdev_ops = &rtl839x_eth_netdev_ops;
  2085. break;
  2086. case RTL9300_FAMILY_ID:
  2087. priv->cpu_port = RTL930X_CPU_PORT;
  2088. priv->r = &rtl930x_reg;
  2089. dev->netdev_ops = &rtl930x_eth_netdev_ops;
  2090. break;
  2091. case RTL9310_FAMILY_ID:
  2092. priv->cpu_port = RTL931X_CPU_PORT;
  2093. priv->r = &rtl931x_reg;
  2094. dev->netdev_ops = &rtl931x_eth_netdev_ops;
  2095. rtl931x_chip_init(priv);
  2096. break;
  2097. default:
  2098. pr_err("Unknown SoC family\n");
  2099. return -ENODEV;
  2100. }
  2101. priv->rxringlen = rxringlen;
  2102. priv->rxrings = rxrings;
  2103. /* Obtain device IRQ number */
  2104. dev->irq = platform_get_irq(pdev, 0);
  2105. if (dev->irq < 0) {
  2106. dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
  2107. goto err_free;
  2108. }
  2109. err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
  2110. IRQF_SHARED, dev->name, dev);
  2111. if (err) {
  2112. dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
  2113. __func__, err);
  2114. goto err_free;
  2115. }
  2116. rtl8380_init_mac(priv);
  2117. /* try to get mac address in the following order:
  2118. * 1) from device tree data
  2119. * 2) from internal registers set by bootloader
  2120. */
  2121. of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
  2122. if (is_valid_ether_addr(dev->dev_addr)) {
  2123. rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
  2124. } else {
  2125. dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
  2126. dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
  2127. dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
  2128. dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
  2129. dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
  2130. dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
  2131. }
  2132. /* if the address is invalid, use a random value */
  2133. if (!is_valid_ether_addr(dev->dev_addr)) {
  2134. struct sockaddr sa = { AF_UNSPEC };
  2135. netdev_warn(dev, "Invalid MAC address, using random\n");
  2136. eth_hw_addr_random(dev);
  2137. memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
  2138. if (rtl838x_set_mac_address(dev, &sa))
  2139. netdev_warn(dev, "Failed to set MAC address.\n");
  2140. }
  2141. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
  2142. sw_r32(priv->r->mac + 4));
  2143. strcpy(dev->name, "eth%d");
  2144. priv->pdev = pdev;
  2145. priv->netdev = dev;
  2146. err = rtl838x_mdio_init(priv);
  2147. if (err)
  2148. goto err_free;
  2149. err = register_netdev(dev);
  2150. if (err)
  2151. goto err_free;
  2152. for (i = 0; i < priv->rxrings; i++) {
  2153. priv->rx_qs[i].id = i;
  2154. priv->rx_qs[i].priv = priv;
  2155. netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
  2156. }
  2157. platform_set_drvdata(pdev, dev);
  2158. phy_mode = PHY_INTERFACE_MODE_NA;
  2159. err = of_get_phy_mode(dn, &phy_mode);
  2160. if (err < 0) {
  2161. dev_err(&pdev->dev, "incorrect phy-mode\n");
  2162. err = -EINVAL;
  2163. goto err_free;
  2164. }
  2165. priv->phylink_config.dev = &dev->dev;
  2166. priv->phylink_config.type = PHYLINK_NETDEV;
  2167. phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
  2168. phy_mode, &rtl838x_phylink_ops);
  2169. if (IS_ERR(phylink)) {
  2170. err = PTR_ERR(phylink);
  2171. goto err_free;
  2172. }
  2173. priv->phylink = phylink;
  2174. return 0;
  2175. err_free:
  2176. pr_err("Error setting up netdev, freeing it again.\n");
  2177. free_netdev(dev);
  2178. return err;
  2179. }
  2180. static int rtl838x_eth_remove(struct platform_device *pdev)
  2181. {
  2182. struct net_device *dev = platform_get_drvdata(pdev);
  2183. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  2184. int i;
  2185. if (dev) {
  2186. pr_info("Removing platform driver for rtl838x-eth\n");
  2187. rtl838x_mdio_remove(priv);
  2188. rtl838x_hw_stop(priv);
  2189. netif_tx_stop_all_queues(dev);
  2190. for (i = 0; i < priv->rxrings; i++)
  2191. netif_napi_del(&priv->rx_qs[i].napi);
  2192. unregister_netdev(dev);
  2193. free_netdev(dev);
  2194. }
  2195. return 0;
  2196. }
  2197. static const struct of_device_id rtl838x_eth_of_ids[] = {
  2198. { .compatible = "realtek,rtl838x-eth"},
  2199. { /* sentinel */ }
  2200. };
  2201. MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
  2202. static struct platform_driver rtl838x_eth_driver = {
  2203. .probe = rtl838x_eth_probe,
  2204. .remove = rtl838x_eth_remove,
  2205. .driver = {
  2206. .name = "rtl838x-eth",
  2207. .pm = NULL,
  2208. .of_match_table = rtl838x_eth_of_ids,
  2209. },
  2210. };
  2211. module_platform_driver(rtl838x_eth_driver);
  2212. MODULE_AUTHOR("B. Koblitz");
  2213. MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
  2214. MODULE_LICENSE("GPL");