rtl838x_eth.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* linux/drivers/net/ethernet/rtl838x_eth.c
  3. * Copyright (C) 2020 B. Koblitz
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/io.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/of.h>
  13. #include <linux/of_net.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/module.h>
  16. #include <linux/phylink.h>
  17. #include <linux/pkt_sched.h>
  18. #include <net/dsa.h>
  19. #include <net/switchdev.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/mach-rtl838x/mach-rtl83xx.h>
  22. #include "rtl838x_eth.h"
  23. extern struct rtl83xx_soc_info soc_info;
  24. /* Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
  25. * The ring is assigned by switch based on packet/port priortity
  26. * Maximum number of TX rings is 2, Ring 2 being the high priority
  27. * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
  28. * for an RX ring, MAX_ENTRIES the maximum number of entries
  29. * available in total for all queues.
  30. */
  31. #define MAX_RXRINGS 32
  32. #define MAX_RXLEN 300
  33. #define MAX_ENTRIES (300 * 8)
  34. #define TXRINGS 2
  35. #define TXRINGLEN 160
  36. #define NOTIFY_EVENTS 10
  37. #define NOTIFY_BLOCKS 10
  38. #define TX_EN 0x8
  39. #define RX_EN 0x4
  40. #define TX_EN_93XX 0x20
  41. #define RX_EN_93XX 0x10
  42. #define TX_DO 0x2
  43. #define WRAP 0x2
  44. #define MAX_PORTS 57
  45. #define MAX_SMI_BUSSES 4
  46. #define RING_BUFFER 1600
  47. struct p_hdr {
  48. uint8_t *buf;
  49. uint16_t reserved;
  50. uint16_t size; /* buffer size */
  51. uint16_t offset;
  52. uint16_t len; /* pkt len */
  53. /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
  54. uint16_t cpu_tag[10];
  55. } __packed __aligned(1);
  56. struct n_event {
  57. uint32_t type:2;
  58. uint32_t fidVid:12;
  59. uint64_t mac:48;
  60. uint32_t slp:6;
  61. uint32_t valid:1;
  62. uint32_t reserved:27;
  63. } __packed __aligned(1);
  64. struct ring_b {
  65. uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
  66. uint32_t tx_r[TXRINGS][TXRINGLEN];
  67. struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
  68. struct p_hdr tx_header[TXRINGS][TXRINGLEN];
  69. uint32_t c_rx[MAX_RXRINGS];
  70. uint32_t c_tx[TXRINGS];
  71. uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
  72. uint8_t *rx_space;
  73. };
  74. struct notify_block {
  75. struct n_event events[NOTIFY_EVENTS];
  76. };
  77. struct notify_b {
  78. struct notify_block blocks[NOTIFY_BLOCKS];
  79. u32 reserved1[8];
  80. u32 ring[NOTIFY_BLOCKS];
  81. u32 reserved2[8];
  82. };
  83. static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  84. {
  85. /* cpu_tag[0] is reserved on the RTL83XX SoCs */
  86. h->cpu_tag[1] = 0x0400; /* BIT 10: RTL8380_CPU_TAG */
  87. h->cpu_tag[2] = 0x0200; /* Set only AS_DPM, to enable DPM settings below */
  88. h->cpu_tag[3] = 0x0000;
  89. h->cpu_tag[4] = BIT(dest_port) >> 16;
  90. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  91. /* Set internal priority (PRI) and enable (AS_PRI) */
  92. if (prio >= 0)
  93. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
  94. }
  95. static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  96. {
  97. /* cpu_tag[0] is reserved on the RTL83XX SoCs */
  98. h->cpu_tag[1] = 0x0100; /* RTL8390_CPU_TAG marker */
  99. h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
  100. h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
  101. /* h->cpu_tag[1] |= BIT(1) | BIT(0); */ /* Bypass filter 1/2 */
  102. if (dest_port >= 32) {
  103. dest_port -= 32;
  104. h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
  105. h->cpu_tag[3] = BIT(dest_port) & 0xffff;
  106. } else {
  107. h->cpu_tag[4] = BIT(dest_port) >> 16;
  108. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  109. }
  110. /* Set internal priority (PRI) and enable (AS_PRI) */
  111. if (prio >= 0)
  112. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
  113. }
  114. static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  115. {
  116. h->cpu_tag[0] = 0x8000; /* CPU tag marker */
  117. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  118. h->cpu_tag[3] = 0;
  119. h->cpu_tag[4] = 0;
  120. h->cpu_tag[5] = 0;
  121. h->cpu_tag[6] = BIT(dest_port) >> 16;
  122. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  123. /* Enable (AS_QID) and set priority queue (QID) */
  124. if (prio >= 0)
  125. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  126. }
  127. static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  128. {
  129. h->cpu_tag[0] = 0x8000; /* CPU tag marker */
  130. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  131. h->cpu_tag[3] = 0;
  132. h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
  133. if (dest_port >= 32) {
  134. dest_port -= 32;
  135. h->cpu_tag[4] = BIT(dest_port) >> 16;
  136. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  137. } else {
  138. h->cpu_tag[6] = BIT(dest_port) >> 16;
  139. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  140. }
  141. /* Enable (AS_QID) and set priority queue (QID) */
  142. if (prio >= 0)
  143. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  144. }
  145. static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
  146. {
  147. h->cpu_tag[2] |= BIT(4); /* Enable VLAN forwarding offload */
  148. h->cpu_tag[2] |= (vlan >> 8) & 0xf;
  149. h->cpu_tag[3] |= (vlan & 0xff) << 8;
  150. }
  151. struct rtl838x_rx_q {
  152. int id;
  153. struct rtl838x_eth_priv *priv;
  154. struct napi_struct napi;
  155. };
  156. struct rtl838x_eth_priv {
  157. struct net_device *netdev;
  158. struct platform_device *pdev;
  159. void *membase;
  160. spinlock_t lock;
  161. struct mii_bus *mii_bus;
  162. struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
  163. struct phylink *phylink;
  164. struct phylink_config phylink_config;
  165. u16 id;
  166. u16 family_id;
  167. const struct rtl838x_eth_reg *r;
  168. u8 cpu_port;
  169. u32 lastEvent;
  170. u16 rxrings;
  171. u16 rxringlen;
  172. u8 smi_bus[MAX_PORTS];
  173. u8 smi_addr[MAX_PORTS];
  174. u32 sds_id[MAX_PORTS];
  175. bool smi_bus_isc45[MAX_SMI_BUSSES];
  176. bool phy_is_internal[MAX_PORTS];
  177. phy_interface_t interfaces[MAX_PORTS];
  178. };
  179. extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
  180. extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
  181. extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
  182. extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
  183. extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
  184. extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  185. extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
  186. extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  187. extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  188. extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  189. extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  190. extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  191. /* On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
  192. * the rings. Writing x into these registers substracts x from its content.
  193. * When the content reaches the ring size, the ASIC no longer adds
  194. * packets to this receive queue.
  195. */
  196. void rtl838x_update_cntr(int r, int released)
  197. {
  198. /* This feature is not available on RTL838x SoCs */
  199. }
  200. void rtl839x_update_cntr(int r, int released)
  201. {
  202. /* This feature is not available on RTL839x SoCs */
  203. }
  204. void rtl930x_update_cntr(int r, int released)
  205. {
  206. int pos = (r % 3) * 10;
  207. u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  208. u32 v = sw_r32(reg);
  209. v = (v >> pos) & 0x3ff;
  210. pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
  211. sw_w32_mask(0x3ff << pos, released << pos, reg);
  212. sw_w32(v, reg);
  213. }
  214. void rtl931x_update_cntr(int r, int released)
  215. {
  216. int pos = (r % 3) * 10;
  217. u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  218. u32 v = sw_r32(reg);
  219. v = (v >> pos) & 0x3ff;
  220. sw_w32_mask(0x3ff << pos, released << pos, reg);
  221. sw_w32(v, reg);
  222. }
  223. struct dsa_tag {
  224. u8 reason;
  225. u8 queue;
  226. u16 port;
  227. u8 l2_offloaded;
  228. u8 prio;
  229. bool crc_error;
  230. };
  231. bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  232. {
  233. /* cpu_tag[0] is reserved. Fields are off-by-one */
  234. t->reason = h->cpu_tag[4] & 0xf;
  235. t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
  236. t->port = h->cpu_tag[1] & 0x1f;
  237. t->crc_error = t->reason == 13;
  238. pr_debug("Reason: %d\n", t->reason);
  239. if (t->reason != 6) /* NIC_RX_REASON_SPECIAL_TRAP */
  240. t->l2_offloaded = 1;
  241. else
  242. t->l2_offloaded = 0;
  243. return t->l2_offloaded;
  244. }
  245. bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  246. {
  247. /* cpu_tag[0] is reserved. Fields are off-by-one */
  248. t->reason = h->cpu_tag[5] & 0x1f;
  249. t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
  250. t->port = h->cpu_tag[1] & 0x3f;
  251. t->crc_error = h->cpu_tag[4] & BIT(6);
  252. pr_debug("Reason: %d\n", t->reason);
  253. if ((t->reason >= 7 && t->reason <= 13) || /* NIC_RX_REASON_RMA */
  254. (t->reason >= 23 && t->reason <= 25)) /* NIC_RX_REASON_SPECIAL_TRAP */
  255. t->l2_offloaded = 0;
  256. else
  257. t->l2_offloaded = 1;
  258. return t->l2_offloaded;
  259. }
  260. bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  261. {
  262. t->reason = h->cpu_tag[7] & 0x3f;
  263. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  264. t->port = (h->cpu_tag[0] >> 8) & 0x1f;
  265. t->crc_error = h->cpu_tag[1] & BIT(6);
  266. pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
  267. if (t->reason >= 19 && t->reason <= 27)
  268. t->l2_offloaded = 0;
  269. else
  270. t->l2_offloaded = 1;
  271. return t->l2_offloaded;
  272. }
  273. bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  274. {
  275. t->reason = h->cpu_tag[7] & 0x3f;
  276. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  277. t->port = (h->cpu_tag[0] >> 8) & 0x3f;
  278. t->crc_error = h->cpu_tag[1] & BIT(6);
  279. if (t->reason != 63)
  280. pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
  281. if (t->reason >= 19 && t->reason <= 27) /* NIC_RX_REASON_RMA */
  282. t->l2_offloaded = 0;
  283. else
  284. t->l2_offloaded = 1;
  285. return t->l2_offloaded;
  286. }
  287. /* Discard the RX ring-buffers, called as part of the net-ISR
  288. * when the buffer runs over
  289. */
  290. static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
  291. {
  292. for (int r = 0; r < priv->rxrings; r++) {
  293. struct ring_b *ring = priv->membase;
  294. struct p_hdr *h;
  295. u32 *last;
  296. pr_debug("In %s working on r: %d\n", __func__, r);
  297. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  298. do {
  299. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
  300. break;
  301. pr_debug("Got something: %d\n", ring->c_rx[r]);
  302. h = &ring->rx_header[r][ring->c_rx[r]];
  303. memset(h, 0, sizeof(struct p_hdr));
  304. h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
  305. r * priv->rxringlen * RING_BUFFER +
  306. ring->c_rx[r] * RING_BUFFER);
  307. h->size = RING_BUFFER;
  308. /* make sure the header is visible to the ASIC */
  309. mb();
  310. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
  311. WRAP :
  312. 0x1);
  313. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  314. } while (&ring->rx_r[r][ring->c_rx[r]] != last);
  315. }
  316. }
  317. struct fdb_update_work {
  318. struct work_struct work;
  319. struct net_device *ndev;
  320. u64 macs[NOTIFY_EVENTS + 1];
  321. };
  322. void rtl838x_fdb_sync(struct work_struct *work)
  323. {
  324. const struct fdb_update_work *uw = container_of(work, struct fdb_update_work, work);
  325. for (int i = 0; uw->macs[i]; i++) {
  326. struct switchdev_notifier_fdb_info info;
  327. u8 addr[ETH_ALEN];
  328. int action;
  329. action = (uw->macs[i] & (1ULL << 63)) ?
  330. SWITCHDEV_FDB_ADD_TO_BRIDGE :
  331. SWITCHDEV_FDB_DEL_TO_BRIDGE;
  332. u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
  333. info.addr = &addr[0];
  334. info.vid = 0;
  335. info.offloaded = 1;
  336. pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
  337. call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
  338. }
  339. kfree(work);
  340. }
  341. static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
  342. {
  343. struct notify_b *nb = priv->membase + sizeof(struct ring_b);
  344. u32 e = priv->lastEvent;
  345. while (!(nb->ring[e] & 1)) {
  346. struct fdb_update_work *w;
  347. struct n_event *event;
  348. u64 mac;
  349. int i;
  350. w = kzalloc(sizeof(*w), GFP_ATOMIC);
  351. if (!w) {
  352. pr_err("Out of memory: %s", __func__);
  353. return;
  354. }
  355. INIT_WORK(&w->work, rtl838x_fdb_sync);
  356. for (i = 0; i < NOTIFY_EVENTS; i++) {
  357. event = &nb->blocks[e].events[i];
  358. if (!event->valid)
  359. continue;
  360. mac = event->mac;
  361. if (event->type)
  362. mac |= 1ULL << 63;
  363. w->ndev = priv->netdev;
  364. w->macs[i] = mac;
  365. }
  366. /* Hand the ring entry back to the switch */
  367. nb->ring[e] = nb->ring[e] | 1;
  368. e = (e + 1) % NOTIFY_BLOCKS;
  369. w->macs[i] = 0ULL;
  370. schedule_work(&w->work);
  371. }
  372. priv->lastEvent = e;
  373. }
  374. static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
  375. {
  376. struct net_device *dev = dev_id;
  377. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  378. u32 status = sw_r32(priv->r->dma_if_intr_sts);
  379. pr_debug("IRQ: %08x\n", status);
  380. /* Ignore TX interrupt */
  381. if ((status & 0xf0000)) {
  382. /* Clear ISR */
  383. sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
  384. }
  385. /* RX interrupt */
  386. if (status & 0x0ff00) {
  387. /* ACK and disable RX interrupt for this ring */
  388. sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
  389. sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
  390. for (int i = 0; i < priv->rxrings; i++) {
  391. if (status & BIT(i + 8)) {
  392. pr_debug("Scheduling queue: %d\n", i);
  393. napi_schedule(&priv->rx_qs[i].napi);
  394. }
  395. }
  396. }
  397. /* RX buffer overrun */
  398. if (status & 0x000ff) {
  399. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  400. status, sw_r32(priv->r->dma_if_intr_msk));
  401. sw_w32(status, priv->r->dma_if_intr_sts);
  402. rtl838x_rb_cleanup(priv, status & 0xff);
  403. }
  404. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
  405. sw_w32(0x00100000, priv->r->dma_if_intr_sts);
  406. rtl839x_l2_notification_handler(priv);
  407. }
  408. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
  409. sw_w32(0x00200000, priv->r->dma_if_intr_sts);
  410. rtl839x_l2_notification_handler(priv);
  411. }
  412. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
  413. sw_w32(0x00400000, priv->r->dma_if_intr_sts);
  414. rtl839x_l2_notification_handler(priv);
  415. }
  416. return IRQ_HANDLED;
  417. }
  418. static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
  419. {
  420. struct net_device *dev = dev_id;
  421. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  422. u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
  423. u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
  424. u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
  425. pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
  426. __func__, status_tx, status_rx, status_rx_r);
  427. /* Ignore TX interrupt */
  428. if (status_tx) {
  429. /* Clear ISR */
  430. pr_debug("TX done\n");
  431. sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
  432. }
  433. /* RX interrupt */
  434. if (status_rx) {
  435. pr_debug("RX IRQ\n");
  436. /* ACK and disable RX interrupt for given rings */
  437. sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
  438. sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
  439. for (int i = 0; i < priv->rxrings; i++) {
  440. if (status_rx & BIT(i)) {
  441. pr_debug("Scheduling queue: %d\n", i);
  442. napi_schedule(&priv->rx_qs[i].napi);
  443. }
  444. }
  445. }
  446. /* RX buffer overrun */
  447. if (status_rx_r) {
  448. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  449. status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
  450. sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
  451. rtl838x_rb_cleanup(priv, status_rx_r);
  452. }
  453. return IRQ_HANDLED;
  454. }
  455. static const struct rtl838x_eth_reg rtl838x_reg = {
  456. .net_irq = rtl83xx_net_irq,
  457. .mac_port_ctrl = rtl838x_mac_port_ctrl,
  458. .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
  459. .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
  460. .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
  461. .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
  462. .dma_rx_base = RTL838X_DMA_RX_BASE,
  463. .dma_tx_base = RTL838X_DMA_TX_BASE,
  464. .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
  465. .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
  466. .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
  467. .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
  468. .get_mac_link_sts = rtl838x_get_mac_link_sts,
  469. .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
  470. .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
  471. .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
  472. .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
  473. .mac = RTL838X_MAC,
  474. .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
  475. .update_cntr = rtl838x_update_cntr,
  476. .create_tx_header = rtl838x_create_tx_header,
  477. .decode_tag = rtl838x_decode_tag,
  478. };
  479. static const struct rtl838x_eth_reg rtl839x_reg = {
  480. .net_irq = rtl83xx_net_irq,
  481. .mac_port_ctrl = rtl839x_mac_port_ctrl,
  482. .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
  483. .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
  484. .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
  485. .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
  486. .dma_rx_base = RTL839X_DMA_RX_BASE,
  487. .dma_tx_base = RTL839X_DMA_TX_BASE,
  488. .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
  489. .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
  490. .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
  491. .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
  492. .get_mac_link_sts = rtl839x_get_mac_link_sts,
  493. .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
  494. .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
  495. .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
  496. .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
  497. .mac = RTL839X_MAC,
  498. .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
  499. .update_cntr = rtl839x_update_cntr,
  500. .create_tx_header = rtl839x_create_tx_header,
  501. .decode_tag = rtl839x_decode_tag,
  502. };
  503. static const struct rtl838x_eth_reg rtl930x_reg = {
  504. .net_irq = rtl93xx_net_irq,
  505. .mac_port_ctrl = rtl930x_mac_port_ctrl,
  506. .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
  507. .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
  508. .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
  509. .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
  510. .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
  511. .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
  512. .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
  513. .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
  514. .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
  515. .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
  516. .dma_rx_base = RTL930X_DMA_RX_BASE,
  517. .dma_tx_base = RTL930X_DMA_TX_BASE,
  518. .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
  519. .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
  520. .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
  521. .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
  522. .get_mac_link_sts = rtl930x_get_mac_link_sts,
  523. .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
  524. .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
  525. .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
  526. .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
  527. .mac = RTL930X_MAC_L2_ADDR_CTRL,
  528. .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
  529. .update_cntr = rtl930x_update_cntr,
  530. .create_tx_header = rtl930x_create_tx_header,
  531. .decode_tag = rtl930x_decode_tag,
  532. };
  533. static const struct rtl838x_eth_reg rtl931x_reg = {
  534. .net_irq = rtl93xx_net_irq,
  535. .mac_port_ctrl = rtl931x_mac_port_ctrl,
  536. .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
  537. .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
  538. .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
  539. .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
  540. .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
  541. .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
  542. .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
  543. .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
  544. .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
  545. .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
  546. .dma_rx_base = RTL931X_DMA_RX_BASE,
  547. .dma_tx_base = RTL931X_DMA_TX_BASE,
  548. .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
  549. .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
  550. .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
  551. .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
  552. .get_mac_link_sts = rtl931x_get_mac_link_sts,
  553. .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
  554. .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
  555. .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
  556. .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
  557. .mac = RTL931X_MAC_L2_ADDR_CTRL,
  558. .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
  559. .update_cntr = rtl931x_update_cntr,
  560. .create_tx_header = rtl931x_create_tx_header,
  561. .decode_tag = rtl931x_decode_tag,
  562. };
  563. static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
  564. {
  565. u32 int_saved, nbuf;
  566. u32 reset_mask;
  567. pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
  568. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  569. mdelay(100);
  570. /* Disable and clear interrupts */
  571. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  572. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  573. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  574. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  575. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  576. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  577. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  578. } else {
  579. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  580. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  581. }
  582. if (priv->family_id == RTL8390_FAMILY_ID) {
  583. /* Preserve L2 notification and NBUF settings */
  584. int_saved = sw_r32(priv->r->dma_if_intr_msk);
  585. nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  586. /* Disable link change interrupt on RTL839x */
  587. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
  588. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  589. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  590. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  591. }
  592. /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
  593. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  594. reset_mask = 0x6;
  595. else
  596. reset_mask = 0xc;
  597. sw_w32_mask(0, reset_mask, priv->r->rst_glb_ctrl);
  598. do { /* Wait for reset of NIC and Queues done */
  599. udelay(20);
  600. } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
  601. mdelay(100);
  602. /* Setup Head of Line */
  603. if (priv->family_id == RTL8380_FAMILY_ID)
  604. sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); /* Disabled on RTL8380 */
  605. if (priv->family_id == RTL8390_FAMILY_ID)
  606. sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
  607. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  608. for (int i = 0; i < priv->rxrings; i++) {
  609. int pos = (i % 3) * 10;
  610. sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
  611. sw_w32_mask(0x3ff << pos, priv->rxringlen,
  612. priv->r->dma_if_rx_ring_cntr(i));
  613. }
  614. }
  615. /* Re-enable link change interrupt */
  616. if (priv->family_id == RTL8390_FAMILY_ID) {
  617. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
  618. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
  619. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
  620. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  621. /* Restore notification settings: on RTL838x these bits are null */
  622. sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
  623. sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  624. }
  625. }
  626. static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
  627. {
  628. struct ring_b *ring = priv->membase;
  629. for (int i = 0; i < priv->rxrings; i++)
  630. sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
  631. for (int i = 0; i < TXRINGS; i++)
  632. sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
  633. }
  634. static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  635. {
  636. /* Disable Head of Line features for all RX rings */
  637. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  638. /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
  639. sw_w32(0x06400020, priv->r->dma_if_ctrl);
  640. /* Enable RX done, RX overflow and TX done interrupts */
  641. sw_w32(0xfffff, priv->r->dma_if_intr_msk);
  642. /* Enable DMA, engine expects empty FCS field */
  643. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  644. /* Restart TX/RX to CPU port */
  645. sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
  646. /* Set Speed, duplex, flow control
  647. * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
  648. * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
  649. * | MEDIA_SEL
  650. */
  651. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  652. /* Enable CRC checks on CPU-port */
  653. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  654. }
  655. static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  656. {
  657. /* Setup CPU-Port: RX Buffer */
  658. sw_w32(0x0000c808, priv->r->dma_if_ctrl);
  659. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  660. sw_w32(0x007fffff, priv->r->dma_if_intr_msk); /* Notify IRQ! */
  661. /* Enable DMA */
  662. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  663. /* Restart TX/RX to CPU port, enable CRC checking */
  664. sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  665. /* CPU port joins Lookup Miss Flooding Portmask */
  666. /* TODO: The code below should also work for the RTL838x */
  667. sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
  668. sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
  669. sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
  670. /* Force CPU port link up */
  671. sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  672. }
  673. static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  674. {
  675. /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
  676. sw_w32(0x06400040, priv->r->dma_if_ctrl);
  677. for (int i = 0; i < priv->rxrings; i++) {
  678. int pos = (i % 3) * 10;
  679. u32 v;
  680. sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
  681. /* Some SoCs have issues with missing underflow protection */
  682. v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
  683. sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
  684. }
  685. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  686. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
  687. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  688. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
  689. /* Enable DMA */
  690. sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
  691. /* Restart TX/RX to CPU port, enable CRC checking */
  692. sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  693. if (priv->family_id == RTL9300_FAMILY_ID)
  694. sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
  695. else
  696. sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
  697. if (priv->family_id == RTL9300_FAMILY_ID)
  698. sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  699. else
  700. sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  701. }
  702. static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
  703. {
  704. for (int i = 0; i < priv->rxrings; i++) {
  705. struct p_hdr *h;
  706. int j;
  707. for (j = 0; j < priv->rxringlen; j++) {
  708. h = &ring->rx_header[i][j];
  709. memset(h, 0, sizeof(struct p_hdr));
  710. h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
  711. i * priv->rxringlen * RING_BUFFER +
  712. j * RING_BUFFER);
  713. h->size = RING_BUFFER;
  714. /* All rings owned by switch, last one wraps */
  715. ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (priv->rxringlen - 1) ?
  716. WRAP :
  717. 0);
  718. }
  719. ring->c_rx[i] = 0;
  720. }
  721. for (int i = 0; i < TXRINGS; i++) {
  722. struct p_hdr *h;
  723. int j;
  724. for (j = 0; j < TXRINGLEN; j++) {
  725. h = &ring->tx_header[i][j];
  726. memset(h, 0, sizeof(struct p_hdr));
  727. h->buf = (u8 *)KSEG1ADDR(ring->tx_space +
  728. i * TXRINGLEN * RING_BUFFER +
  729. j * RING_BUFFER);
  730. h->size = RING_BUFFER;
  731. ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
  732. }
  733. /* Last header is wrapping around */
  734. ring->tx_r[i][j - 1] |= WRAP;
  735. ring->c_tx[i] = 0;
  736. }
  737. }
  738. static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
  739. {
  740. struct notify_b *b = priv->membase + sizeof(struct ring_b);
  741. for (int i = 0; i < NOTIFY_BLOCKS; i++)
  742. b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
  743. sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  744. sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
  745. /* Setup notification events */
  746. sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); /* RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN */
  747. sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); /* SUSPEND_NOTIFICATION_EN */
  748. /* Enable Notification */
  749. sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
  750. priv->lastEvent = 0;
  751. }
  752. static int rtl838x_eth_open(struct net_device *ndev)
  753. {
  754. unsigned long flags;
  755. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  756. struct ring_b *ring = priv->membase;
  757. pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
  758. __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
  759. spin_lock_irqsave(&priv->lock, flags);
  760. rtl838x_hw_reset(priv);
  761. rtl838x_setup_ring_buffer(priv, ring);
  762. if (priv->family_id == RTL8390_FAMILY_ID) {
  763. rtl839x_setup_notify_ring_buffer(priv);
  764. /* Make sure the ring structure is visible to the ASIC */
  765. mb();
  766. flush_cache_all();
  767. }
  768. rtl838x_hw_ring_setup(priv);
  769. phylink_start(priv->phylink);
  770. for (int i = 0; i < priv->rxrings; i++)
  771. napi_enable(&priv->rx_qs[i].napi);
  772. switch (priv->family_id) {
  773. case RTL8380_FAMILY_ID:
  774. rtl838x_hw_en_rxtx(priv);
  775. /* Trap IGMP/MLD traffic to CPU-Port */
  776. sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
  777. /* Flush learned FDB entries on link down of a port */
  778. sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
  779. break;
  780. case RTL8390_FAMILY_ID:
  781. rtl839x_hw_en_rxtx(priv);
  782. /* Trap MLD and IGMP messages to CPU_PORT */
  783. sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
  784. /* Flush learned FDB entries on link down of a port */
  785. sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
  786. break;
  787. case RTL9300_FAMILY_ID:
  788. rtl93xx_hw_en_rxtx(priv);
  789. /* Flush learned FDB entries on link down of a port */
  790. sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
  791. /* Trap MLD and IGMP messages to CPU_PORT */
  792. sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
  793. break;
  794. case RTL9310_FAMILY_ID:
  795. rtl93xx_hw_en_rxtx(priv);
  796. /* Trap MLD and IGMP messages to CPU_PORT */
  797. sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
  798. /* Disable External CPU access to switch, clear EXT_CPU_EN */
  799. sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
  800. /* Set PCIE_PWR_DOWN */
  801. sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
  802. break;
  803. }
  804. netif_tx_start_all_queues(ndev);
  805. spin_unlock_irqrestore(&priv->lock, flags);
  806. return 0;
  807. }
  808. static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
  809. {
  810. u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
  811. u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
  812. /* Disable RX/TX from/to CPU-port */
  813. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  814. /* Disable traffic */
  815. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  816. sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
  817. else
  818. sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
  819. mdelay(200); /* Test, whether this is needed */
  820. /* Block all ports */
  821. if (priv->family_id == RTL8380_FAMILY_ID) {
  822. sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
  823. sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
  824. sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
  825. }
  826. /* Flush L2 address cache */
  827. if (priv->family_id == RTL8380_FAMILY_ID) {
  828. for (int i = 0; i <= priv->cpu_port; i++) {
  829. sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
  830. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
  831. }
  832. } else if (priv->family_id == RTL8390_FAMILY_ID) {
  833. for (int i = 0; i <= priv->cpu_port; i++) {
  834. sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
  835. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
  836. }
  837. }
  838. /* TODO: L2 flush register is 64 bit on RTL931X and 930X */
  839. /* CPU-Port: Link down */
  840. if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
  841. sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  842. else if (priv->family_id == RTL9300_FAMILY_ID)
  843. sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  844. else if (priv->family_id == RTL9310_FAMILY_ID)
  845. sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  846. mdelay(100);
  847. /* Disable all TX/RX interrupts */
  848. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  849. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  850. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  851. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  852. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  853. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  854. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  855. } else {
  856. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  857. sw_w32(clear_irq, priv->r->dma_if_intr_sts);
  858. }
  859. /* Disable TX/RX DMA */
  860. sw_w32(0x00000000, priv->r->dma_if_ctrl);
  861. mdelay(200);
  862. }
  863. static int rtl838x_eth_stop(struct net_device *ndev)
  864. {
  865. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  866. pr_info("in %s\n", __func__);
  867. phylink_stop(priv->phylink);
  868. rtl838x_hw_stop(priv);
  869. for (int i = 0; i < priv->rxrings; i++)
  870. napi_disable(&priv->rx_qs[i].napi);
  871. netif_tx_stop_all_queues(ndev);
  872. return 0;
  873. }
  874. static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
  875. {
  876. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  877. * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
  878. */
  879. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  880. sw_w32(0x0, RTL838X_RMA_CTRL_0);
  881. sw_w32(0x0, RTL838X_RMA_CTRL_1);
  882. }
  883. if (ndev->flags & IFF_ALLMULTI)
  884. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  885. if (ndev->flags & IFF_PROMISC) {
  886. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  887. sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
  888. }
  889. }
  890. static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
  891. {
  892. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  893. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  894. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  895. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  896. */
  897. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  898. sw_w32(0x0, RTL839X_RMA_CTRL_0);
  899. sw_w32(0x0, RTL839X_RMA_CTRL_1);
  900. sw_w32(0x0, RTL839X_RMA_CTRL_2);
  901. sw_w32(0x0, RTL839X_RMA_CTRL_3);
  902. }
  903. if (ndev->flags & IFF_ALLMULTI) {
  904. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  905. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  906. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  907. }
  908. if (ndev->flags & IFF_PROMISC) {
  909. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  910. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  911. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  912. sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
  913. }
  914. }
  915. static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
  916. {
  917. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  918. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  919. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  920. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  921. */
  922. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  923. sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
  924. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
  925. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
  926. } else {
  927. sw_w32(0x0, RTL930X_RMA_CTRL_0);
  928. sw_w32(0x0, RTL930X_RMA_CTRL_1);
  929. sw_w32(0x0, RTL930X_RMA_CTRL_2);
  930. }
  931. }
  932. static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
  933. {
  934. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  935. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  936. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
  937. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  938. */
  939. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  940. sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
  941. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
  942. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
  943. } else {
  944. sw_w32(0x0, RTL931X_RMA_CTRL_0);
  945. sw_w32(0x0, RTL931X_RMA_CTRL_1);
  946. sw_w32(0x0, RTL931X_RMA_CTRL_2);
  947. }
  948. }
  949. static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  950. {
  951. unsigned long flags;
  952. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  953. pr_warn("%s\n", __func__);
  954. spin_lock_irqsave(&priv->lock, flags);
  955. rtl838x_hw_stop(priv);
  956. rtl838x_hw_ring_setup(priv);
  957. rtl838x_hw_en_rxtx(priv);
  958. netif_trans_update(ndev);
  959. netif_start_queue(ndev);
  960. spin_unlock_irqrestore(&priv->lock, flags);
  961. }
  962. static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
  963. {
  964. int len;
  965. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  966. struct ring_b *ring = priv->membase;
  967. int ret;
  968. unsigned long flags;
  969. struct p_hdr *h;
  970. int dest_port = -1;
  971. int q = skb_get_queue_mapping(skb) % TXRINGS;
  972. if (q) /* Check for high prio queue */
  973. pr_debug("SKB priority: %d\n", skb->priority);
  974. spin_lock_irqsave(&priv->lock, flags);
  975. len = skb->len;
  976. /* Check for DSA tagging at the end of the buffer */
  977. if (netdev_uses_dsa(dev) &&
  978. skb->data[len - 4] == 0x80 &&
  979. skb->data[len - 3] < priv->cpu_port &&
  980. skb->data[len - 2] == 0x10 &&
  981. skb->data[len - 1] == 0x00) {
  982. /* Reuse tag space for CRC if possible */
  983. dest_port = skb->data[len - 3];
  984. skb->data[len - 4] = skb->data[len - 3] = skb->data[len - 2] = skb->data[len - 1] = 0x00;
  985. len -= 4;
  986. }
  987. len += 4; /* Add space for CRC */
  988. if (skb_padto(skb, len)) {
  989. ret = NETDEV_TX_OK;
  990. goto txdone;
  991. }
  992. /* We can send this packet if CPU owns the descriptor */
  993. if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
  994. /* Set descriptor for tx */
  995. h = &ring->tx_header[q][ring->c_tx[q]];
  996. h->size = len;
  997. h->len = len;
  998. /* On RTL8380 SoCs, small packet lengths being sent need adjustments */
  999. if (priv->family_id == RTL8380_FAMILY_ID) {
  1000. if (len < ETH_ZLEN - 4)
  1001. h->len -= 4;
  1002. }
  1003. if (dest_port >= 0)
  1004. priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
  1005. /* Copy packet data to tx buffer */
  1006. memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
  1007. /* Make sure packet data is visible to ASIC */
  1008. wmb();
  1009. /* Hand over to switch */
  1010. ring->tx_r[q][ring->c_tx[q]] |= 1;
  1011. /* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
  1012. if (priv->family_id == RTL8380_FAMILY_ID) {
  1013. for (int i = 0; i < 10; i++) {
  1014. u32 val = sw_r32(priv->r->dma_if_ctrl);
  1015. if ((val & 0xc) == 0xc)
  1016. break;
  1017. }
  1018. }
  1019. /* Tell switch to send data */
  1020. if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
  1021. /* Ring ID q == 0: Low priority, Ring ID = 1: High prio queue */
  1022. if (!q)
  1023. sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
  1024. else
  1025. sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
  1026. } else {
  1027. sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
  1028. }
  1029. dev->stats.tx_packets++;
  1030. dev->stats.tx_bytes += len;
  1031. dev_kfree_skb(skb);
  1032. ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
  1033. ret = NETDEV_TX_OK;
  1034. } else {
  1035. dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
  1036. ret = NETDEV_TX_BUSY;
  1037. }
  1038. txdone:
  1039. spin_unlock_irqrestore(&priv->lock, flags);
  1040. return ret;
  1041. }
  1042. /* Return queue number for TX. On the RTL83XX, these queues have equal priority
  1043. * so we do round-robin
  1044. */
  1045. u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1046. struct net_device *sb_dev)
  1047. {
  1048. static u8 last = 0;
  1049. last++;
  1050. return last % TXRINGS;
  1051. }
  1052. /* Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
  1053. */
  1054. u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1055. struct net_device *sb_dev)
  1056. {
  1057. if (skb->priority >= TC_PRIO_CONTROL)
  1058. return 1;
  1059. return 0;
  1060. }
  1061. static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
  1062. {
  1063. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1064. struct ring_b *ring = priv->membase;
  1065. LIST_HEAD(rx_list);
  1066. unsigned long flags;
  1067. int work_done = 0;
  1068. u32 *last;
  1069. bool dsa = netdev_uses_dsa(dev);
  1070. pr_debug("---------------------------------------------------------- RX - %d\n", r);
  1071. spin_lock_irqsave(&priv->lock, flags);
  1072. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1073. do {
  1074. struct sk_buff *skb;
  1075. struct dsa_tag tag;
  1076. struct p_hdr *h;
  1077. u8 *skb_data;
  1078. u8 *data;
  1079. int len;
  1080. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
  1081. if (&ring->rx_r[r][ring->c_rx[r]] != last) {
  1082. netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
  1083. r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
  1084. }
  1085. break;
  1086. }
  1087. h = &ring->rx_header[r][ring->c_rx[r]];
  1088. data = (u8 *)KSEG1ADDR(h->buf);
  1089. len = h->len;
  1090. if (!len)
  1091. break;
  1092. work_done++;
  1093. len -= 4; /* strip the CRC */
  1094. /* Add 4 bytes for cpu_tag */
  1095. if (dsa)
  1096. len += 4;
  1097. skb = netdev_alloc_skb(dev, len + 4);
  1098. skb_reserve(skb, NET_IP_ALIGN);
  1099. if (likely(skb)) {
  1100. /* BUG: Prevent bug on RTL838x SoCs */
  1101. if (priv->family_id == RTL8380_FAMILY_ID) {
  1102. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  1103. for (int i = 0; i < priv->rxrings; i++) {
  1104. unsigned int val;
  1105. /* Update each ring cnt */
  1106. val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
  1107. sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
  1108. }
  1109. }
  1110. skb_data = skb_put(skb, len);
  1111. /* Make sure data is visible */
  1112. mb();
  1113. memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
  1114. /* Overwrite CRC with cpu_tag */
  1115. if (dsa) {
  1116. priv->r->decode_tag(h, &tag);
  1117. skb->data[len - 4] = 0x80;
  1118. skb->data[len - 3] = tag.port;
  1119. skb->data[len - 2] = 0x10;
  1120. skb->data[len - 1] = 0x00;
  1121. if (tag.l2_offloaded)
  1122. skb->data[len - 3] |= 0x40;
  1123. }
  1124. if (tag.queue >= 0)
  1125. pr_debug("Queue: %d, len: %d, reason %d port %d\n",
  1126. tag.queue, len, tag.reason, tag.port);
  1127. skb->protocol = eth_type_trans(skb, dev);
  1128. if (dev->features & NETIF_F_RXCSUM) {
  1129. if (tag.crc_error)
  1130. skb_checksum_none_assert(skb);
  1131. else
  1132. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1133. }
  1134. dev->stats.rx_packets++;
  1135. dev->stats.rx_bytes += len;
  1136. list_add_tail(&skb->list, &rx_list);
  1137. } else {
  1138. if (net_ratelimit())
  1139. dev_warn(&dev->dev, "low on memory - packet dropped\n");
  1140. dev->stats.rx_dropped++;
  1141. }
  1142. /* Reset header structure */
  1143. memset(h, 0, sizeof(struct p_hdr));
  1144. h->buf = data;
  1145. h->size = RING_BUFFER;
  1146. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
  1147. WRAP :
  1148. 0x1);
  1149. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  1150. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1151. } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
  1152. netif_receive_skb_list(&rx_list);
  1153. /* Update counters */
  1154. priv->r->update_cntr(r, 0);
  1155. spin_unlock_irqrestore(&priv->lock, flags);
  1156. return work_done;
  1157. }
  1158. static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
  1159. {
  1160. struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
  1161. struct rtl838x_eth_priv *priv = rx_q->priv;
  1162. int work_done = 0;
  1163. int r = rx_q->id;
  1164. int work;
  1165. while (work_done < budget) {
  1166. work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
  1167. if (!work)
  1168. break;
  1169. work_done += work;
  1170. }
  1171. if (work_done < budget) {
  1172. napi_complete_done(napi, work_done);
  1173. /* Enable RX interrupt */
  1174. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  1175. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  1176. else
  1177. sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
  1178. }
  1179. return work_done;
  1180. }
  1181. static void rtl838x_validate(struct phylink_config *config,
  1182. unsigned long *supported,
  1183. struct phylink_link_state *state)
  1184. {
  1185. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  1186. pr_debug("In %s\n", __func__);
  1187. if (!phy_interface_mode_is_rgmii(state->interface) &&
  1188. state->interface != PHY_INTERFACE_MODE_1000BASEX &&
  1189. state->interface != PHY_INTERFACE_MODE_MII &&
  1190. state->interface != PHY_INTERFACE_MODE_REVMII &&
  1191. state->interface != PHY_INTERFACE_MODE_GMII &&
  1192. state->interface != PHY_INTERFACE_MODE_QSGMII &&
  1193. state->interface != PHY_INTERFACE_MODE_INTERNAL &&
  1194. state->interface != PHY_INTERFACE_MODE_SGMII) {
  1195. bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
  1196. pr_err("Unsupported interface: %d\n", state->interface);
  1197. return;
  1198. }
  1199. /* Allow all the expected bits */
  1200. phylink_set(mask, Autoneg);
  1201. phylink_set_port_modes(mask);
  1202. phylink_set(mask, Pause);
  1203. phylink_set(mask, Asym_Pause);
  1204. /* With the exclusion of MII and Reverse MII, we support Gigabit,
  1205. * including Half duplex
  1206. */
  1207. if (state->interface != PHY_INTERFACE_MODE_MII &&
  1208. state->interface != PHY_INTERFACE_MODE_REVMII) {
  1209. phylink_set(mask, 1000baseT_Full);
  1210. phylink_set(mask, 1000baseT_Half);
  1211. }
  1212. phylink_set(mask, 10baseT_Half);
  1213. phylink_set(mask, 10baseT_Full);
  1214. phylink_set(mask, 100baseT_Half);
  1215. phylink_set(mask, 100baseT_Full);
  1216. bitmap_and(supported, supported, mask,
  1217. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1218. bitmap_and(state->advertising, state->advertising, mask,
  1219. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1220. }
  1221. static void rtl838x_mac_config(struct phylink_config *config,
  1222. unsigned int mode,
  1223. const struct phylink_link_state *state)
  1224. {
  1225. /* This is only being called for the master device,
  1226. * i.e. the CPU-Port. We don't need to do anything.
  1227. */
  1228. pr_info("In %s, mode %x\n", __func__, mode);
  1229. }
  1230. static void rtl838x_mac_an_restart(struct phylink_config *config)
  1231. {
  1232. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1233. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1234. /* This works only on RTL838x chips */
  1235. if (priv->family_id != RTL8380_FAMILY_ID)
  1236. return;
  1237. pr_debug("In %s\n", __func__);
  1238. /* Restart by disabling and re-enabling link */
  1239. sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1240. mdelay(20);
  1241. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1242. }
  1243. static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
  1244. struct phylink_link_state *state)
  1245. {
  1246. u32 speed;
  1247. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1248. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1249. int port = priv->cpu_port;
  1250. pr_info("In %s\n", __func__);
  1251. state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
  1252. state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
  1253. pr_info("%s link status is %d\n", __func__, state->link);
  1254. speed = priv->r->get_mac_link_spd_sts(port);
  1255. switch (speed) {
  1256. case 0:
  1257. state->speed = SPEED_10;
  1258. break;
  1259. case 1:
  1260. state->speed = SPEED_100;
  1261. break;
  1262. case 2:
  1263. state->speed = SPEED_1000;
  1264. break;
  1265. case 5:
  1266. state->speed = SPEED_2500;
  1267. break;
  1268. case 6:
  1269. state->speed = SPEED_5000;
  1270. break;
  1271. case 4:
  1272. state->speed = SPEED_10000;
  1273. break;
  1274. default:
  1275. state->speed = SPEED_UNKNOWN;
  1276. break;
  1277. }
  1278. state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
  1279. if (priv->r->get_mac_rx_pause_sts(port))
  1280. state->pause |= MLO_PAUSE_RX;
  1281. if (priv->r->get_mac_tx_pause_sts(port))
  1282. state->pause |= MLO_PAUSE_TX;
  1283. }
  1284. static void rtl838x_mac_link_down(struct phylink_config *config,
  1285. unsigned int mode,
  1286. phy_interface_t interface)
  1287. {
  1288. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1289. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1290. pr_debug("In %s\n", __func__);
  1291. /* Stop TX/RX to port */
  1292. sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1293. }
  1294. static void rtl838x_mac_link_up(struct phylink_config *config,
  1295. struct phy_device *phy, unsigned int mode,
  1296. phy_interface_t interface, int speed, int duplex,
  1297. bool tx_pause, bool rx_pause)
  1298. {
  1299. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1300. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1301. pr_debug("In %s\n", __func__);
  1302. /* Restart TX/RX to port */
  1303. sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
  1304. }
  1305. static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
  1306. {
  1307. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1308. unsigned long flags;
  1309. spin_lock_irqsave(&priv->lock, flags);
  1310. pr_debug("In %s\n", __func__);
  1311. sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
  1312. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
  1313. if (priv->family_id == RTL8380_FAMILY_ID) {
  1314. /* 2 more registers, ALE/MAC block */
  1315. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
  1316. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1317. (RTL838X_MAC_ALE + 4));
  1318. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
  1319. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1320. RTL838X_MAC2 + 4);
  1321. }
  1322. spin_unlock_irqrestore(&priv->lock, flags);
  1323. }
  1324. static int rtl838x_set_mac_address(struct net_device *dev, void *p)
  1325. {
  1326. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1327. const struct sockaddr *addr = p;
  1328. u8 *mac = (u8 *) (addr->sa_data);
  1329. if (!is_valid_ether_addr(addr->sa_data))
  1330. return -EADDRNOTAVAIL;
  1331. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  1332. rtl838x_set_mac_hw(dev, mac);
  1333. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
  1334. return 0;
  1335. }
  1336. static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
  1337. {
  1338. /* We will need to set-up EEE and the egress-rate limitation */
  1339. return 0;
  1340. }
  1341. static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
  1342. {
  1343. if (priv->family_id == 0x8390)
  1344. return rtl8390_init_mac(priv);
  1345. /* At present we do not know how to set up EEE on any other SoC than RTL8380 */
  1346. if (priv->family_id != 0x8380)
  1347. return 0;
  1348. pr_info("%s\n", __func__);
  1349. /* fix timer for EEE */
  1350. sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
  1351. sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
  1352. /* Init VLAN. TODO: Understand what is being done, here */
  1353. if (priv->id == 0x8382) {
  1354. for (int i = 0; i <= 28; i++)
  1355. sw_w32(0, 0xd57c + i * 0x80);
  1356. }
  1357. if (priv->id == 0x8380) {
  1358. for (int i = 8; i <= 28; i++)
  1359. sw_w32(0, 0xd57c + i * 0x80);
  1360. }
  1361. return 0;
  1362. }
  1363. static int rtl838x_get_link_ksettings(struct net_device *ndev,
  1364. struct ethtool_link_ksettings *cmd)
  1365. {
  1366. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1367. pr_debug("%s called\n", __func__);
  1368. return phylink_ethtool_ksettings_get(priv->phylink, cmd);
  1369. }
  1370. static int rtl838x_set_link_ksettings(struct net_device *ndev,
  1371. const struct ethtool_link_ksettings *cmd)
  1372. {
  1373. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1374. pr_debug("%s called\n", __func__);
  1375. return phylink_ethtool_ksettings_set(priv->phylink, cmd);
  1376. }
  1377. static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1378. {
  1379. u32 val;
  1380. int err;
  1381. struct rtl838x_eth_priv *priv = bus->priv;
  1382. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
  1383. return rtl838x_read_sds_phy(mii_id, regnum);
  1384. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1385. err = rtl838x_read_mmd_phy(mii_id,
  1386. mdiobus_c45_devad(regnum),
  1387. regnum, &val);
  1388. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1389. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1390. val, err);
  1391. } else {
  1392. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1393. err = rtl838x_read_phy(mii_id, page, regnum, &val);
  1394. }
  1395. if (err)
  1396. return err;
  1397. return val;
  1398. }
  1399. static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1400. {
  1401. return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
  1402. }
  1403. static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1404. {
  1405. u32 val;
  1406. int err;
  1407. struct rtl838x_eth_priv *priv = bus->priv;
  1408. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1409. return rtl839x_read_sds_phy(mii_id, regnum);
  1410. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1411. err = rtl839x_read_mmd_phy(mii_id,
  1412. mdiobus_c45_devad(regnum),
  1413. regnum, &val);
  1414. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1415. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1416. val, err);
  1417. } else {
  1418. err = rtl839x_read_phy(mii_id, page, regnum, &val);
  1419. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1420. }
  1421. if (err)
  1422. return err;
  1423. return val;
  1424. }
  1425. static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1426. {
  1427. return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
  1428. }
  1429. static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1430. {
  1431. u32 val;
  1432. int err;
  1433. struct rtl838x_eth_priv *priv = bus->priv;
  1434. if (priv->phy_is_internal[mii_id])
  1435. return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1436. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1437. err = rtl930x_read_mmd_phy(mii_id,
  1438. mdiobus_c45_devad(regnum),
  1439. regnum, &val);
  1440. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1441. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1442. val, err);
  1443. } else {
  1444. err = rtl930x_read_phy(mii_id, page, regnum, &val);
  1445. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1446. }
  1447. if (err)
  1448. return err;
  1449. return val;
  1450. }
  1451. static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1452. {
  1453. return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
  1454. }
  1455. static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1456. {
  1457. u32 val;
  1458. int err, v;
  1459. struct rtl838x_eth_priv *priv = bus->priv;
  1460. pr_debug("%s: In here, port %d\n", __func__, mii_id);
  1461. if (priv->phy_is_internal[mii_id]) {
  1462. v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1463. if (v < 0) {
  1464. err = v;
  1465. } else {
  1466. err = 0;
  1467. val = v;
  1468. }
  1469. } else {
  1470. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1471. err = rtl931x_read_mmd_phy(mii_id,
  1472. mdiobus_c45_devad(regnum),
  1473. regnum, &val);
  1474. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1475. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1476. val, err);
  1477. } else {
  1478. err = rtl931x_read_phy(mii_id, page, regnum, &val);
  1479. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1480. }
  1481. }
  1482. if (err)
  1483. return err;
  1484. return val;
  1485. }
  1486. static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1487. {
  1488. return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
  1489. }
  1490. static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1491. int regnum, u16 value)
  1492. {
  1493. u32 offset = 0;
  1494. struct rtl838x_eth_priv *priv = bus->priv;
  1495. int err;
  1496. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
  1497. if (mii_id == 26)
  1498. offset = 0x100;
  1499. sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
  1500. return 0;
  1501. }
  1502. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1503. err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1504. regnum, value);
  1505. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1506. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1507. value, err);
  1508. return err;
  1509. }
  1510. err = rtl838x_write_phy(mii_id, page, regnum, value);
  1511. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1512. return err;
  1513. }
  1514. static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
  1515. int regnum, u16 value)
  1516. {
  1517. return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1518. }
  1519. static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1520. int regnum, u16 value)
  1521. {
  1522. struct rtl838x_eth_priv *priv = bus->priv;
  1523. int err;
  1524. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1525. return rtl839x_write_sds_phy(mii_id, regnum, value);
  1526. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1527. err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1528. regnum, value);
  1529. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1530. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1531. value, err);
  1532. return err;
  1533. }
  1534. err = rtl839x_write_phy(mii_id, page, regnum, value);
  1535. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1536. return err;
  1537. }
  1538. static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
  1539. int regnum, u16 value)
  1540. {
  1541. return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1542. }
  1543. static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1544. int regnum, u16 value)
  1545. {
  1546. struct rtl838x_eth_priv *priv = bus->priv;
  1547. int err;
  1548. if (priv->phy_is_internal[mii_id])
  1549. return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1550. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
  1551. return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1552. regnum, value);
  1553. err = rtl930x_write_phy(mii_id, page, regnum, value);
  1554. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1555. return err;
  1556. }
  1557. static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
  1558. int regnum, u16 value)
  1559. {
  1560. return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1561. }
  1562. static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1563. int regnum, u16 value)
  1564. {
  1565. struct rtl838x_eth_priv *priv = bus->priv;
  1566. int err;
  1567. if (priv->phy_is_internal[mii_id])
  1568. return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1569. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1570. err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1571. regnum, value);
  1572. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1573. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1574. value, err);
  1575. return err;
  1576. }
  1577. err = rtl931x_write_phy(mii_id, page, regnum, value);
  1578. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1579. return err;
  1580. }
  1581. static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
  1582. int regnum, u16 value)
  1583. {
  1584. return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1585. }
  1586. static int rtl838x_mdio_reset(struct mii_bus *bus)
  1587. {
  1588. pr_debug("%s called\n", __func__);
  1589. /* Disable MAC polling the PHY so that we can start configuration */
  1590. sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
  1591. /* Enable PHY control via SoC */
  1592. sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
  1593. /* Probably should reset all PHYs here... */
  1594. return 0;
  1595. }
  1596. static int rtl839x_mdio_reset(struct mii_bus *bus)
  1597. {
  1598. return 0;
  1599. pr_debug("%s called\n", __func__);
  1600. /* BUG: The following does not work, but should! */
  1601. /* Disable MAC polling the PHY so that we can start configuration */
  1602. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
  1603. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
  1604. /* Disable PHY polling via SoC */
  1605. sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
  1606. /* Probably should reset all PHYs here... */
  1607. return 0;
  1608. }
  1609. u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
  1610. 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
  1611. static int rtl930x_mdio_reset(struct mii_bus *bus)
  1612. {
  1613. struct rtl838x_eth_priv *priv = bus->priv;
  1614. u32 c45_mask = 0;
  1615. u32 poll_sel[2];
  1616. u32 poll_ctrl = 0;
  1617. u32 private_poll_mask = 0;
  1618. u32 v;
  1619. bool uses_usxgmii = false; /* For the Aquantia PHYs */
  1620. bool uses_hisgmii = false; /* For the RTL8221/8226 */
  1621. /* Mapping of port to phy-addresses on an SMI bus */
  1622. poll_sel[0] = poll_sel[1] = 0;
  1623. for (int i = 0; i < RTL930X_CPU_PORT; i++) {
  1624. int pos;
  1625. if (priv->smi_bus[i] > 3)
  1626. continue;
  1627. pos = (i % 6) * 5;
  1628. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
  1629. RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
  1630. pos = (i * 2) % 32;
  1631. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1632. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1633. }
  1634. /* Configure which SMI bus is behind which port number */
  1635. sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
  1636. sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
  1637. /* Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) */
  1638. sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
  1639. /* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
  1640. for (int i = 0; i < 4; i++)
  1641. if (priv->smi_bus_isc45[i])
  1642. c45_mask |= BIT(i + 16);
  1643. pr_info("c45_mask: %08x\n", c45_mask);
  1644. sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
  1645. /* Set the MAC type of each port according to the PHY-interface */
  1646. /* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
  1647. v = 0;
  1648. for (int i = 0; i < RTL930X_CPU_PORT; i++) {
  1649. switch (priv->interfaces[i]) {
  1650. case PHY_INTERFACE_MODE_10GBASER:
  1651. break; /* Serdes: Value = 0 */
  1652. case PHY_INTERFACE_MODE_HSGMII:
  1653. private_poll_mask |= BIT(i);
  1654. /* fallthrough */
  1655. case PHY_INTERFACE_MODE_USXGMII:
  1656. v |= BIT(mac_type_bit[i]);
  1657. uses_usxgmii = true;
  1658. break;
  1659. case PHY_INTERFACE_MODE_QSGMII:
  1660. private_poll_mask |= BIT(i);
  1661. v |= 3 << mac_type_bit[i];
  1662. break;
  1663. default:
  1664. break;
  1665. }
  1666. }
  1667. sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
  1668. /* Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) */
  1669. sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
  1670. /* The following magic values are found in the port configuration, they seem to
  1671. * define different ways of polling a PHY. The below is for the Aquantia PHYs of
  1672. * the XGS1250 and the RTL8226 of the XGS1210
  1673. */
  1674. if (uses_usxgmii) {
  1675. sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1676. sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1677. sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1678. }
  1679. if (uses_hisgmii) {
  1680. sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1681. sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1682. sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1683. }
  1684. pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
  1685. sw_r32(RTL930X_SMI_GLB_CTRL));
  1686. pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
  1687. sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
  1688. pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
  1689. sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
  1690. pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
  1691. sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
  1692. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
  1693. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
  1694. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
  1695. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
  1696. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
  1697. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
  1698. pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
  1699. sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
  1700. return 0;
  1701. }
  1702. static int rtl931x_mdio_reset(struct mii_bus *bus)
  1703. {
  1704. struct rtl838x_eth_priv *priv = bus->priv;
  1705. u32 c45_mask = 0;
  1706. u32 poll_sel[4];
  1707. u32 poll_ctrl = 0;
  1708. bool mdc_on[4];
  1709. pr_info("%s called\n", __func__);
  1710. /* Disable port polling for configuration purposes */
  1711. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
  1712. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
  1713. msleep(100);
  1714. mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
  1715. /* Mapping of port to phy-addresses on an SMI bus */
  1716. poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
  1717. for (int i = 0; i < 56; i++) {
  1718. u32 pos;
  1719. pos = (i % 6) * 5;
  1720. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
  1721. pos = (i * 2) % 32;
  1722. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1723. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1724. mdc_on[priv->smi_bus[i]] = true;
  1725. }
  1726. /* Configure which SMI bus is behind which port number */
  1727. for (int i = 0; i < 4; i++) {
  1728. pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
  1729. sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
  1730. }
  1731. /* Configure which SMI busses */
  1732. pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1733. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1734. for (int i = 0; i < 4; i++) {
  1735. /* bus is polled in c45 */
  1736. if (priv->smi_bus_isc45[i])
  1737. c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
  1738. /* Enable bus access via MDC */
  1739. if (mdc_on[i])
  1740. sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
  1741. }
  1742. pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1743. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1744. /* We have a 10G PHY enable polling
  1745. * sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
  1746. * sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
  1747. * sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
  1748. */
  1749. sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
  1750. return 0;
  1751. }
  1752. static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
  1753. {
  1754. pr_info("In %s\n", __func__);
  1755. /* Initialize Encapsulation memory and wait until finished */
  1756. sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
  1757. do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
  1758. pr_info("%s: init ENCAP done\n", __func__);
  1759. /* Initialize Managemen Information Base memory and wait until finished */
  1760. sw_w32(0x1, RTL931X_MEM_MIB_INIT);
  1761. do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
  1762. pr_info("%s: init MIB done\n", __func__);
  1763. /* Initialize ACL (PIE) memory and wait until finished */
  1764. sw_w32(0x1, RTL931X_MEM_ACL_INIT);
  1765. do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
  1766. pr_info("%s: init ACL done\n", __func__);
  1767. /* Initialize ALE memory and wait until finished */
  1768. sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
  1769. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
  1770. sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
  1771. sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
  1772. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
  1773. pr_info("%s: init ALE done\n", __func__);
  1774. /* Enable ESD auto recovery */
  1775. sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
  1776. /* Init SPI, is this for thermal control or what? */
  1777. sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
  1778. return 0;
  1779. }
  1780. static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
  1781. {
  1782. struct device_node *mii_np, *dn;
  1783. u32 pn;
  1784. int ret;
  1785. pr_debug("%s called\n", __func__);
  1786. mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
  1787. if (!mii_np) {
  1788. dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
  1789. return -ENODEV;
  1790. }
  1791. if (!of_device_is_available(mii_np)) {
  1792. ret = -ENODEV;
  1793. goto err_put_node;
  1794. }
  1795. priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
  1796. if (!priv->mii_bus) {
  1797. ret = -ENOMEM;
  1798. goto err_put_node;
  1799. }
  1800. switch(priv->family_id) {
  1801. case RTL8380_FAMILY_ID:
  1802. priv->mii_bus->name = "rtl838x-eth-mdio";
  1803. priv->mii_bus->read = rtl838x_mdio_read;
  1804. priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
  1805. priv->mii_bus->write = rtl838x_mdio_write;
  1806. priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
  1807. priv->mii_bus->reset = rtl838x_mdio_reset;
  1808. break;
  1809. case RTL8390_FAMILY_ID:
  1810. priv->mii_bus->name = "rtl839x-eth-mdio";
  1811. priv->mii_bus->read = rtl839x_mdio_read;
  1812. priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
  1813. priv->mii_bus->write = rtl839x_mdio_write;
  1814. priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
  1815. priv->mii_bus->reset = rtl839x_mdio_reset;
  1816. break;
  1817. case RTL9300_FAMILY_ID:
  1818. priv->mii_bus->name = "rtl930x-eth-mdio";
  1819. priv->mii_bus->read = rtl930x_mdio_read;
  1820. priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
  1821. priv->mii_bus->write = rtl930x_mdio_write;
  1822. priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
  1823. priv->mii_bus->reset = rtl930x_mdio_reset;
  1824. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1825. break;
  1826. case RTL9310_FAMILY_ID:
  1827. priv->mii_bus->name = "rtl931x-eth-mdio";
  1828. priv->mii_bus->read = rtl931x_mdio_read;
  1829. priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
  1830. priv->mii_bus->write = rtl931x_mdio_write;
  1831. priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
  1832. priv->mii_bus->reset = rtl931x_mdio_reset;
  1833. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1834. break;
  1835. }
  1836. priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
  1837. priv->mii_bus->priv = priv;
  1838. priv->mii_bus->parent = &priv->pdev->dev;
  1839. for_each_node_by_name(dn, "ethernet-phy") {
  1840. u32 smi_addr[2];
  1841. if (of_property_read_u32(dn, "reg", &pn))
  1842. continue;
  1843. if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
  1844. smi_addr[0] = 0;
  1845. smi_addr[1] = pn;
  1846. }
  1847. if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
  1848. priv->sds_id[pn] = -1;
  1849. else {
  1850. pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
  1851. }
  1852. if (pn < MAX_PORTS) {
  1853. priv->smi_bus[pn] = smi_addr[0];
  1854. priv->smi_addr[pn] = smi_addr[1];
  1855. } else {
  1856. pr_err("%s: illegal port number %d\n", __func__, pn);
  1857. }
  1858. if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
  1859. priv->smi_bus_isc45[smi_addr[0]] = true;
  1860. if (of_property_read_bool(dn, "phy-is-integrated")) {
  1861. priv->phy_is_internal[pn] = true;
  1862. }
  1863. }
  1864. dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
  1865. if (!dn) {
  1866. dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
  1867. return -ENODEV;
  1868. }
  1869. for_each_node_by_name(dn, "port") {
  1870. if (of_property_read_u32(dn, "reg", &pn))
  1871. continue;
  1872. pr_debug("%s Looking at port %d\n", __func__, pn);
  1873. if (pn > priv->cpu_port)
  1874. continue;
  1875. if (of_get_phy_mode(dn, &priv->interfaces[pn]))
  1876. priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
  1877. pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
  1878. }
  1879. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
  1880. ret = of_mdiobus_register(priv->mii_bus, mii_np);
  1881. err_put_node:
  1882. of_node_put(mii_np);
  1883. return ret;
  1884. }
  1885. static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
  1886. {
  1887. pr_debug("%s called\n", __func__);
  1888. if (!priv->mii_bus)
  1889. return 0;
  1890. mdiobus_unregister(priv->mii_bus);
  1891. mdiobus_free(priv->mii_bus);
  1892. return 0;
  1893. }
  1894. static netdev_features_t rtl838x_fix_features(struct net_device *dev,
  1895. netdev_features_t features)
  1896. {
  1897. return features;
  1898. }
  1899. static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
  1900. {
  1901. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1902. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1903. if (!(features & NETIF_F_RXCSUM))
  1904. sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1905. else
  1906. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  1907. }
  1908. return 0;
  1909. }
  1910. static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
  1911. {
  1912. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1913. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1914. if (!(features & NETIF_F_RXCSUM))
  1915. sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1916. else
  1917. sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  1918. }
  1919. return 0;
  1920. }
  1921. static const struct net_device_ops rtl838x_eth_netdev_ops = {
  1922. .ndo_open = rtl838x_eth_open,
  1923. .ndo_stop = rtl838x_eth_stop,
  1924. .ndo_start_xmit = rtl838x_eth_tx,
  1925. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1926. .ndo_set_mac_address = rtl838x_set_mac_address,
  1927. .ndo_validate_addr = eth_validate_addr,
  1928. .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
  1929. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1930. .ndo_set_features = rtl83xx_set_features,
  1931. .ndo_fix_features = rtl838x_fix_features,
  1932. .ndo_setup_tc = rtl83xx_setup_tc,
  1933. };
  1934. static const struct net_device_ops rtl839x_eth_netdev_ops = {
  1935. .ndo_open = rtl838x_eth_open,
  1936. .ndo_stop = rtl838x_eth_stop,
  1937. .ndo_start_xmit = rtl838x_eth_tx,
  1938. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1939. .ndo_set_mac_address = rtl838x_set_mac_address,
  1940. .ndo_validate_addr = eth_validate_addr,
  1941. .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
  1942. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1943. .ndo_set_features = rtl83xx_set_features,
  1944. .ndo_fix_features = rtl838x_fix_features,
  1945. .ndo_setup_tc = rtl83xx_setup_tc,
  1946. };
  1947. static const struct net_device_ops rtl930x_eth_netdev_ops = {
  1948. .ndo_open = rtl838x_eth_open,
  1949. .ndo_stop = rtl838x_eth_stop,
  1950. .ndo_start_xmit = rtl838x_eth_tx,
  1951. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1952. .ndo_set_mac_address = rtl838x_set_mac_address,
  1953. .ndo_validate_addr = eth_validate_addr,
  1954. .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
  1955. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1956. .ndo_set_features = rtl93xx_set_features,
  1957. .ndo_fix_features = rtl838x_fix_features,
  1958. .ndo_setup_tc = rtl83xx_setup_tc,
  1959. };
  1960. static const struct net_device_ops rtl931x_eth_netdev_ops = {
  1961. .ndo_open = rtl838x_eth_open,
  1962. .ndo_stop = rtl838x_eth_stop,
  1963. .ndo_start_xmit = rtl838x_eth_tx,
  1964. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1965. .ndo_set_mac_address = rtl838x_set_mac_address,
  1966. .ndo_validate_addr = eth_validate_addr,
  1967. .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
  1968. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1969. .ndo_set_features = rtl93xx_set_features,
  1970. .ndo_fix_features = rtl838x_fix_features,
  1971. };
  1972. static const struct phylink_mac_ops rtl838x_phylink_ops = {
  1973. .validate = rtl838x_validate,
  1974. .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
  1975. .mac_an_restart = rtl838x_mac_an_restart,
  1976. .mac_config = rtl838x_mac_config,
  1977. .mac_link_down = rtl838x_mac_link_down,
  1978. .mac_link_up = rtl838x_mac_link_up,
  1979. };
  1980. static const struct ethtool_ops rtl838x_ethtool_ops = {
  1981. .get_link_ksettings = rtl838x_get_link_ksettings,
  1982. .set_link_ksettings = rtl838x_set_link_ksettings,
  1983. };
  1984. static int __init rtl838x_eth_probe(struct platform_device *pdev)
  1985. {
  1986. struct net_device *dev;
  1987. struct device_node *dn = pdev->dev.of_node;
  1988. struct rtl838x_eth_priv *priv;
  1989. struct resource *res, *mem;
  1990. phy_interface_t phy_mode;
  1991. struct phylink *phylink;
  1992. int err = 0, rxrings, rxringlen;
  1993. struct ring_b *ring;
  1994. pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
  1995. (u32)pdev, (u32)(&(pdev->dev)));
  1996. if (!dn) {
  1997. dev_err(&pdev->dev, "No DT found\n");
  1998. return -EINVAL;
  1999. }
  2000. rxrings = (soc_info.family == RTL8380_FAMILY_ID
  2001. || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
  2002. rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
  2003. rxringlen = MAX_ENTRIES / rxrings;
  2004. rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
  2005. dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
  2006. if (!dev) {
  2007. err = -ENOMEM;
  2008. goto err_free;
  2009. }
  2010. SET_NETDEV_DEV(dev, &pdev->dev);
  2011. priv = netdev_priv(dev);
  2012. /* obtain buffer memory space */
  2013. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2014. if (res) {
  2015. mem = devm_request_mem_region(&pdev->dev, res->start,
  2016. resource_size(res), res->name);
  2017. if (!mem) {
  2018. dev_err(&pdev->dev, "cannot request memory space\n");
  2019. err = -ENXIO;
  2020. goto err_free;
  2021. }
  2022. dev->mem_start = mem->start;
  2023. dev->mem_end = mem->end;
  2024. } else {
  2025. dev_err(&pdev->dev, "cannot request IO resource\n");
  2026. err = -ENXIO;
  2027. goto err_free;
  2028. }
  2029. /* Allocate buffer memory */
  2030. priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER +
  2031. sizeof(struct ring_b) + sizeof(struct notify_b),
  2032. (void *)&dev->mem_start, GFP_KERNEL);
  2033. if (!priv->membase) {
  2034. dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
  2035. err = -ENOMEM;
  2036. goto err_free;
  2037. }
  2038. /* Allocate ring-buffer space at the end of the allocated memory */
  2039. ring = priv->membase;
  2040. ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
  2041. spin_lock_init(&priv->lock);
  2042. dev->ethtool_ops = &rtl838x_ethtool_ops;
  2043. dev->min_mtu = ETH_ZLEN;
  2044. dev->max_mtu = 1536;
  2045. dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
  2046. dev->hw_features = NETIF_F_RXCSUM;
  2047. priv->id = soc_info.id;
  2048. priv->family_id = soc_info.family;
  2049. if (priv->id) {
  2050. pr_info("Found SoC ID: %4x: %s, family %x\n",
  2051. priv->id, soc_info.name, priv->family_id);
  2052. } else {
  2053. pr_err("Unknown chip id (%04x)\n", priv->id);
  2054. return -ENODEV;
  2055. }
  2056. switch (priv->family_id) {
  2057. case RTL8380_FAMILY_ID:
  2058. priv->cpu_port = RTL838X_CPU_PORT;
  2059. priv->r = &rtl838x_reg;
  2060. dev->netdev_ops = &rtl838x_eth_netdev_ops;
  2061. break;
  2062. case RTL8390_FAMILY_ID:
  2063. priv->cpu_port = RTL839X_CPU_PORT;
  2064. priv->r = &rtl839x_reg;
  2065. dev->netdev_ops = &rtl839x_eth_netdev_ops;
  2066. break;
  2067. case RTL9300_FAMILY_ID:
  2068. priv->cpu_port = RTL930X_CPU_PORT;
  2069. priv->r = &rtl930x_reg;
  2070. dev->netdev_ops = &rtl930x_eth_netdev_ops;
  2071. break;
  2072. case RTL9310_FAMILY_ID:
  2073. priv->cpu_port = RTL931X_CPU_PORT;
  2074. priv->r = &rtl931x_reg;
  2075. dev->netdev_ops = &rtl931x_eth_netdev_ops;
  2076. rtl931x_chip_init(priv);
  2077. break;
  2078. default:
  2079. pr_err("Unknown SoC family\n");
  2080. return -ENODEV;
  2081. }
  2082. priv->rxringlen = rxringlen;
  2083. priv->rxrings = rxrings;
  2084. /* Obtain device IRQ number */
  2085. dev->irq = platform_get_irq(pdev, 0);
  2086. if (dev->irq < 0) {
  2087. dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
  2088. goto err_free;
  2089. }
  2090. err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
  2091. IRQF_SHARED, dev->name, dev);
  2092. if (err) {
  2093. dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
  2094. __func__, err);
  2095. goto err_free;
  2096. }
  2097. rtl8380_init_mac(priv);
  2098. /* Try to get mac address in the following order:
  2099. * 1) from device tree data
  2100. * 2) from internal registers set by bootloader
  2101. */
  2102. of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
  2103. if (is_valid_ether_addr(dev->dev_addr)) {
  2104. rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
  2105. } else {
  2106. dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
  2107. dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
  2108. dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
  2109. dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
  2110. dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
  2111. dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
  2112. }
  2113. /* if the address is invalid, use a random value */
  2114. if (!is_valid_ether_addr(dev->dev_addr)) {
  2115. struct sockaddr sa = { AF_UNSPEC };
  2116. netdev_warn(dev, "Invalid MAC address, using random\n");
  2117. eth_hw_addr_random(dev);
  2118. memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
  2119. if (rtl838x_set_mac_address(dev, &sa))
  2120. netdev_warn(dev, "Failed to set MAC address.\n");
  2121. }
  2122. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
  2123. sw_r32(priv->r->mac + 4));
  2124. strcpy(dev->name, "eth%d");
  2125. priv->pdev = pdev;
  2126. priv->netdev = dev;
  2127. err = rtl838x_mdio_init(priv);
  2128. if (err)
  2129. goto err_free;
  2130. err = register_netdev(dev);
  2131. if (err)
  2132. goto err_free;
  2133. for (int i = 0; i < priv->rxrings; i++) {
  2134. priv->rx_qs[i].id = i;
  2135. priv->rx_qs[i].priv = priv;
  2136. netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
  2137. }
  2138. platform_set_drvdata(pdev, dev);
  2139. phy_mode = PHY_INTERFACE_MODE_NA;
  2140. err = of_get_phy_mode(dn, &phy_mode);
  2141. if (err < 0) {
  2142. dev_err(&pdev->dev, "incorrect phy-mode\n");
  2143. err = -EINVAL;
  2144. goto err_free;
  2145. }
  2146. priv->phylink_config.dev = &dev->dev;
  2147. priv->phylink_config.type = PHYLINK_NETDEV;
  2148. phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
  2149. phy_mode, &rtl838x_phylink_ops);
  2150. if (IS_ERR(phylink)) {
  2151. err = PTR_ERR(phylink);
  2152. goto err_free;
  2153. }
  2154. priv->phylink = phylink;
  2155. return 0;
  2156. err_free:
  2157. pr_err("Error setting up netdev, freeing it again.\n");
  2158. free_netdev(dev);
  2159. return err;
  2160. }
  2161. static int rtl838x_eth_remove(struct platform_device *pdev)
  2162. {
  2163. struct net_device *dev = platform_get_drvdata(pdev);
  2164. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  2165. if (dev) {
  2166. pr_info("Removing platform driver for rtl838x-eth\n");
  2167. rtl838x_mdio_remove(priv);
  2168. rtl838x_hw_stop(priv);
  2169. netif_tx_stop_all_queues(dev);
  2170. for (int i = 0; i < priv->rxrings; i++)
  2171. netif_napi_del(&priv->rx_qs[i].napi);
  2172. unregister_netdev(dev);
  2173. free_netdev(dev);
  2174. }
  2175. return 0;
  2176. }
  2177. static const struct of_device_id rtl838x_eth_of_ids[] = {
  2178. { .compatible = "realtek,rtl838x-eth"},
  2179. { /* sentinel */ }
  2180. };
  2181. MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
  2182. static struct platform_driver rtl838x_eth_driver = {
  2183. .probe = rtl838x_eth_probe,
  2184. .remove = rtl838x_eth_remove,
  2185. .driver = {
  2186. .name = "rtl838x-eth",
  2187. .pm = NULL,
  2188. .of_match_table = rtl838x_eth_of_ids,
  2189. },
  2190. };
  2191. module_platform_driver(rtl838x_eth_driver);
  2192. MODULE_AUTHOR("B. Koblitz");
  2193. MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
  2194. MODULE_LICENSE("GPL");