rtl838x_eth.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* linux/drivers/net/ethernet/rtl838x_eth.c
  3. * Copyright (C) 2020 B. Koblitz
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/io.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/of.h>
  13. #include <linux/of_net.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/module.h>
  16. #include <linux/phylink.h>
  17. #include <linux/pkt_sched.h>
  18. #include <net/dsa.h>
  19. #include <net/switchdev.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/mach-rtl838x/mach-rtl83xx.h>
  22. #include "rtl838x_eth.h"
  23. extern struct rtl83xx_soc_info soc_info;
  24. /* Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
  25. * The ring is assigned by switch based on packet/port priortity
  26. * Maximum number of TX rings is 2, Ring 2 being the high priority
  27. * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
  28. * for an RX ring, MAX_ENTRIES the maximum number of entries
  29. * available in total for all queues.
  30. */
  31. #define MAX_RXRINGS 32
  32. #define MAX_RXLEN 300
  33. #define MAX_ENTRIES (300 * 8)
  34. #define TXRINGS 2
  35. #define TXRINGLEN 160
  36. #define NOTIFY_EVENTS 10
  37. #define NOTIFY_BLOCKS 10
  38. #define TX_EN 0x8
  39. #define RX_EN 0x4
  40. #define TX_EN_93XX 0x20
  41. #define RX_EN_93XX 0x10
  42. #define TX_DO 0x2
  43. #define WRAP 0x2
  44. #define MAX_PORTS 57
  45. #define MAX_SMI_BUSSES 4
  46. #define RING_BUFFER 1600
  47. struct p_hdr {
  48. uint8_t *buf;
  49. uint16_t reserved;
  50. uint16_t size; /* buffer size */
  51. uint16_t offset;
  52. uint16_t len; /* pkt len */
  53. /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
  54. uint16_t cpu_tag[10];
  55. } __packed __aligned(1);
  56. struct n_event {
  57. uint32_t type:2;
  58. uint32_t fidVid:12;
  59. uint64_t mac:48;
  60. uint32_t slp:6;
  61. uint32_t valid:1;
  62. uint32_t reserved:27;
  63. } __packed __aligned(1);
  64. struct ring_b {
  65. uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
  66. uint32_t tx_r[TXRINGS][TXRINGLEN];
  67. struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
  68. struct p_hdr tx_header[TXRINGS][TXRINGLEN];
  69. uint32_t c_rx[MAX_RXRINGS];
  70. uint32_t c_tx[TXRINGS];
  71. uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
  72. uint8_t *rx_space;
  73. };
  74. struct notify_block {
  75. struct n_event events[NOTIFY_EVENTS];
  76. };
  77. struct notify_b {
  78. struct notify_block blocks[NOTIFY_BLOCKS];
  79. u32 reserved1[8];
  80. u32 ring[NOTIFY_BLOCKS];
  81. u32 reserved2[8];
  82. };
  83. static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  84. {
  85. /* cpu_tag[0] is reserved on the RTL83XX SoCs */
  86. h->cpu_tag[1] = 0x0400; /* BIT 10: RTL8380_CPU_TAG */
  87. h->cpu_tag[2] = 0x0200; /* Set only AS_DPM, to enable DPM settings below */
  88. h->cpu_tag[3] = 0x0000;
  89. h->cpu_tag[4] = BIT(dest_port) >> 16;
  90. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  91. /* Set internal priority (PRI) and enable (AS_PRI) */
  92. if (prio >= 0)
  93. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
  94. }
  95. static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  96. {
  97. /* cpu_tag[0] is reserved on the RTL83XX SoCs */
  98. h->cpu_tag[1] = 0x0100; /* RTL8390_CPU_TAG marker */
  99. h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
  100. h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
  101. /* h->cpu_tag[1] |= BIT(1) | BIT(0); */ /* Bypass filter 1/2 */
  102. if (dest_port >= 32) {
  103. dest_port -= 32;
  104. h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
  105. h->cpu_tag[3] = BIT(dest_port) & 0xffff;
  106. } else {
  107. h->cpu_tag[4] = BIT(dest_port) >> 16;
  108. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  109. }
  110. /* Set internal priority (PRI) and enable (AS_PRI) */
  111. if (prio >= 0)
  112. h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
  113. }
  114. static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  115. {
  116. h->cpu_tag[0] = 0x8000; /* CPU tag marker */
  117. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  118. h->cpu_tag[3] = 0;
  119. h->cpu_tag[4] = 0;
  120. h->cpu_tag[5] = 0;
  121. h->cpu_tag[6] = BIT(dest_port) >> 16;
  122. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  123. /* Enable (AS_QID) and set priority queue (QID) */
  124. if (prio >= 0)
  125. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  126. }
  127. static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
  128. {
  129. h->cpu_tag[0] = 0x8000; /* CPU tag marker */
  130. h->cpu_tag[1] = h->cpu_tag[2] = 0;
  131. h->cpu_tag[3] = 0;
  132. h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
  133. if (dest_port >= 32) {
  134. dest_port -= 32;
  135. h->cpu_tag[4] = BIT(dest_port) >> 16;
  136. h->cpu_tag[5] = BIT(dest_port) & 0xffff;
  137. } else {
  138. h->cpu_tag[6] = BIT(dest_port) >> 16;
  139. h->cpu_tag[7] = BIT(dest_port) & 0xffff;
  140. }
  141. /* Enable (AS_QID) and set priority queue (QID) */
  142. if (prio >= 0)
  143. h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
  144. }
  145. // Currently unused
  146. // static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
  147. // {
  148. // h->cpu_tag[2] |= BIT(4); /* Enable VLAN forwarding offload */
  149. // h->cpu_tag[2] |= (vlan >> 8) & 0xf;
  150. // h->cpu_tag[3] |= (vlan & 0xff) << 8;
  151. // }
  152. struct rtl838x_rx_q {
  153. int id;
  154. struct rtl838x_eth_priv *priv;
  155. struct napi_struct napi;
  156. };
  157. struct rtl838x_eth_priv {
  158. struct net_device *netdev;
  159. struct platform_device *pdev;
  160. void *membase;
  161. spinlock_t lock;
  162. struct mii_bus *mii_bus;
  163. struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
  164. struct phylink *phylink;
  165. struct phylink_config phylink_config;
  166. u16 id;
  167. u16 family_id;
  168. const struct rtl838x_eth_reg *r;
  169. u8 cpu_port;
  170. u32 lastEvent;
  171. u16 rxrings;
  172. u16 rxringlen;
  173. u8 smi_bus[MAX_PORTS];
  174. u8 smi_addr[MAX_PORTS];
  175. u32 sds_id[MAX_PORTS];
  176. bool smi_bus_isc45[MAX_SMI_BUSSES];
  177. bool phy_is_internal[MAX_PORTS];
  178. phy_interface_t interfaces[MAX_PORTS];
  179. };
  180. extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
  181. extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
  182. extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
  183. extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
  184. extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
  185. extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  186. extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
  187. extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
  188. extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  189. extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  190. extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
  191. extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
  192. /* On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
  193. * the rings. Writing x into these registers substracts x from its content.
  194. * When the content reaches the ring size, the ASIC no longer adds
  195. * packets to this receive queue.
  196. */
  197. void rtl838x_update_cntr(int r, int released)
  198. {
  199. /* This feature is not available on RTL838x SoCs */
  200. }
  201. void rtl839x_update_cntr(int r, int released)
  202. {
  203. /* This feature is not available on RTL839x SoCs */
  204. }
  205. void rtl930x_update_cntr(int r, int released)
  206. {
  207. int pos = (r % 3) * 10;
  208. u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  209. u32 v = sw_r32(reg);
  210. v = (v >> pos) & 0x3ff;
  211. pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
  212. sw_w32_mask(0x3ff << pos, released << pos, reg);
  213. sw_w32(v, reg);
  214. }
  215. void rtl931x_update_cntr(int r, int released)
  216. {
  217. int pos = (r % 3) * 10;
  218. u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
  219. u32 v = sw_r32(reg);
  220. v = (v >> pos) & 0x3ff;
  221. sw_w32_mask(0x3ff << pos, released << pos, reg);
  222. sw_w32(v, reg);
  223. }
  224. struct dsa_tag {
  225. u8 reason;
  226. u8 queue;
  227. u16 port;
  228. u8 l2_offloaded;
  229. u8 prio;
  230. bool crc_error;
  231. };
  232. bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  233. {
  234. /* cpu_tag[0] is reserved. Fields are off-by-one */
  235. t->reason = h->cpu_tag[4] & 0xf;
  236. t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
  237. t->port = h->cpu_tag[1] & 0x1f;
  238. t->crc_error = t->reason == 13;
  239. pr_debug("Reason: %d\n", t->reason);
  240. if (t->reason != 6) /* NIC_RX_REASON_SPECIAL_TRAP */
  241. t->l2_offloaded = 1;
  242. else
  243. t->l2_offloaded = 0;
  244. return t->l2_offloaded;
  245. }
  246. bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  247. {
  248. /* cpu_tag[0] is reserved. Fields are off-by-one */
  249. t->reason = h->cpu_tag[5] & 0x1f;
  250. t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
  251. t->port = h->cpu_tag[1] & 0x3f;
  252. t->crc_error = h->cpu_tag[4] & BIT(6);
  253. pr_debug("Reason: %d\n", t->reason);
  254. if ((t->reason >= 7 && t->reason <= 13) || /* NIC_RX_REASON_RMA */
  255. (t->reason >= 23 && t->reason <= 25)) /* NIC_RX_REASON_SPECIAL_TRAP */
  256. t->l2_offloaded = 0;
  257. else
  258. t->l2_offloaded = 1;
  259. return t->l2_offloaded;
  260. }
  261. bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  262. {
  263. t->reason = h->cpu_tag[7] & 0x3f;
  264. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  265. t->port = (h->cpu_tag[0] >> 8) & 0x1f;
  266. t->crc_error = h->cpu_tag[1] & BIT(6);
  267. pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
  268. if (t->reason >= 19 && t->reason <= 27)
  269. t->l2_offloaded = 0;
  270. else
  271. t->l2_offloaded = 1;
  272. return t->l2_offloaded;
  273. }
  274. bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
  275. {
  276. t->reason = h->cpu_tag[7] & 0x3f;
  277. t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
  278. t->port = (h->cpu_tag[0] >> 8) & 0x3f;
  279. t->crc_error = h->cpu_tag[1] & BIT(6);
  280. if (t->reason != 63)
  281. pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
  282. if (t->reason >= 19 && t->reason <= 27) /* NIC_RX_REASON_RMA */
  283. t->l2_offloaded = 0;
  284. else
  285. t->l2_offloaded = 1;
  286. return t->l2_offloaded;
  287. }
  288. /* Discard the RX ring-buffers, called as part of the net-ISR
  289. * when the buffer runs over
  290. */
  291. static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
  292. {
  293. for (int r = 0; r < priv->rxrings; r++) {
  294. struct ring_b *ring = priv->membase;
  295. struct p_hdr *h;
  296. u32 *last;
  297. pr_debug("In %s working on r: %d\n", __func__, r);
  298. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  299. do {
  300. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
  301. break;
  302. pr_debug("Got something: %d\n", ring->c_rx[r]);
  303. h = &ring->rx_header[r][ring->c_rx[r]];
  304. memset(h, 0, sizeof(struct p_hdr));
  305. h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
  306. r * priv->rxringlen * RING_BUFFER +
  307. ring->c_rx[r] * RING_BUFFER);
  308. h->size = RING_BUFFER;
  309. /* make sure the header is visible to the ASIC */
  310. mb();
  311. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
  312. WRAP :
  313. 0x1);
  314. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  315. } while (&ring->rx_r[r][ring->c_rx[r]] != last);
  316. }
  317. }
  318. struct fdb_update_work {
  319. struct work_struct work;
  320. struct net_device *ndev;
  321. u64 macs[NOTIFY_EVENTS + 1];
  322. };
  323. void rtl838x_fdb_sync(struct work_struct *work)
  324. {
  325. const struct fdb_update_work *uw = container_of(work, struct fdb_update_work, work);
  326. for (int i = 0; uw->macs[i]; i++) {
  327. struct switchdev_notifier_fdb_info info;
  328. u8 addr[ETH_ALEN];
  329. int action;
  330. action = (uw->macs[i] & (1ULL << 63)) ?
  331. SWITCHDEV_FDB_ADD_TO_BRIDGE :
  332. SWITCHDEV_FDB_DEL_TO_BRIDGE;
  333. u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
  334. info.addr = &addr[0];
  335. info.vid = 0;
  336. info.offloaded = 1;
  337. pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
  338. call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
  339. }
  340. kfree(work);
  341. }
  342. static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
  343. {
  344. struct notify_b *nb = priv->membase + sizeof(struct ring_b);
  345. u32 e = priv->lastEvent;
  346. while (!(nb->ring[e] & 1)) {
  347. struct fdb_update_work *w;
  348. struct n_event *event;
  349. u64 mac;
  350. int i;
  351. w = kzalloc(sizeof(*w), GFP_ATOMIC);
  352. if (!w) {
  353. pr_err("Out of memory: %s", __func__);
  354. return;
  355. }
  356. INIT_WORK(&w->work, rtl838x_fdb_sync);
  357. for (i = 0; i < NOTIFY_EVENTS; i++) {
  358. event = &nb->blocks[e].events[i];
  359. if (!event->valid)
  360. continue;
  361. mac = event->mac;
  362. if (event->type)
  363. mac |= 1ULL << 63;
  364. w->ndev = priv->netdev;
  365. w->macs[i] = mac;
  366. }
  367. /* Hand the ring entry back to the switch */
  368. nb->ring[e] = nb->ring[e] | 1;
  369. e = (e + 1) % NOTIFY_BLOCKS;
  370. w->macs[i] = 0ULL;
  371. schedule_work(&w->work);
  372. }
  373. priv->lastEvent = e;
  374. }
  375. static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
  376. {
  377. struct net_device *dev = dev_id;
  378. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  379. u32 status = sw_r32(priv->r->dma_if_intr_sts);
  380. pr_debug("IRQ: %08x\n", status);
  381. /* Ignore TX interrupt */
  382. if ((status & 0xf0000)) {
  383. /* Clear ISR */
  384. sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
  385. }
  386. /* RX interrupt */
  387. if (status & 0x0ff00) {
  388. /* ACK and disable RX interrupt for this ring */
  389. sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
  390. sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
  391. for (int i = 0; i < priv->rxrings; i++) {
  392. if (status & BIT(i + 8)) {
  393. pr_debug("Scheduling queue: %d\n", i);
  394. napi_schedule(&priv->rx_qs[i].napi);
  395. }
  396. }
  397. }
  398. /* RX buffer overrun */
  399. if (status & 0x000ff) {
  400. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  401. status, sw_r32(priv->r->dma_if_intr_msk));
  402. sw_w32(status, priv->r->dma_if_intr_sts);
  403. rtl838x_rb_cleanup(priv, status & 0xff);
  404. }
  405. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
  406. sw_w32(0x00100000, priv->r->dma_if_intr_sts);
  407. rtl839x_l2_notification_handler(priv);
  408. }
  409. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
  410. sw_w32(0x00200000, priv->r->dma_if_intr_sts);
  411. rtl839x_l2_notification_handler(priv);
  412. }
  413. if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
  414. sw_w32(0x00400000, priv->r->dma_if_intr_sts);
  415. rtl839x_l2_notification_handler(priv);
  416. }
  417. return IRQ_HANDLED;
  418. }
  419. static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
  420. {
  421. struct net_device *dev = dev_id;
  422. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  423. u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
  424. u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
  425. u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
  426. pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
  427. __func__, status_tx, status_rx, status_rx_r);
  428. /* Ignore TX interrupt */
  429. if (status_tx) {
  430. /* Clear ISR */
  431. pr_debug("TX done\n");
  432. sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
  433. }
  434. /* RX interrupt */
  435. if (status_rx) {
  436. pr_debug("RX IRQ\n");
  437. /* ACK and disable RX interrupt for given rings */
  438. sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
  439. sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
  440. for (int i = 0; i < priv->rxrings; i++) {
  441. if (status_rx & BIT(i)) {
  442. pr_debug("Scheduling queue: %d\n", i);
  443. napi_schedule(&priv->rx_qs[i].napi);
  444. }
  445. }
  446. }
  447. /* RX buffer overrun */
  448. if (status_rx_r) {
  449. pr_debug("RX buffer overrun: status %x, mask: %x\n",
  450. status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
  451. sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
  452. rtl838x_rb_cleanup(priv, status_rx_r);
  453. }
  454. return IRQ_HANDLED;
  455. }
  456. static const struct rtl838x_eth_reg rtl838x_reg = {
  457. .net_irq = rtl83xx_net_irq,
  458. .mac_port_ctrl = rtl838x_mac_port_ctrl,
  459. .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
  460. .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
  461. .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
  462. .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
  463. .dma_rx_base = RTL838X_DMA_RX_BASE,
  464. .dma_tx_base = RTL838X_DMA_TX_BASE,
  465. .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
  466. .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
  467. .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
  468. .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
  469. .get_mac_link_sts = rtl838x_get_mac_link_sts,
  470. .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
  471. .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
  472. .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
  473. .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
  474. .mac = RTL838X_MAC,
  475. .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
  476. .update_cntr = rtl838x_update_cntr,
  477. .create_tx_header = rtl838x_create_tx_header,
  478. .decode_tag = rtl838x_decode_tag,
  479. };
  480. static const struct rtl838x_eth_reg rtl839x_reg = {
  481. .net_irq = rtl83xx_net_irq,
  482. .mac_port_ctrl = rtl839x_mac_port_ctrl,
  483. .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
  484. .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
  485. .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
  486. .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
  487. .dma_rx_base = RTL839X_DMA_RX_BASE,
  488. .dma_tx_base = RTL839X_DMA_TX_BASE,
  489. .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
  490. .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
  491. .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
  492. .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
  493. .get_mac_link_sts = rtl839x_get_mac_link_sts,
  494. .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
  495. .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
  496. .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
  497. .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
  498. .mac = RTL839X_MAC,
  499. .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
  500. .update_cntr = rtl839x_update_cntr,
  501. .create_tx_header = rtl839x_create_tx_header,
  502. .decode_tag = rtl839x_decode_tag,
  503. };
  504. static const struct rtl838x_eth_reg rtl930x_reg = {
  505. .net_irq = rtl93xx_net_irq,
  506. .mac_port_ctrl = rtl930x_mac_port_ctrl,
  507. .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
  508. .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
  509. .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
  510. .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
  511. .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
  512. .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
  513. .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
  514. .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
  515. .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
  516. .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
  517. .dma_rx_base = RTL930X_DMA_RX_BASE,
  518. .dma_tx_base = RTL930X_DMA_TX_BASE,
  519. .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
  520. .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
  521. .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
  522. .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
  523. .get_mac_link_sts = rtl930x_get_mac_link_sts,
  524. .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
  525. .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
  526. .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
  527. .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
  528. .mac = RTL930X_MAC_L2_ADDR_CTRL,
  529. .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
  530. .update_cntr = rtl930x_update_cntr,
  531. .create_tx_header = rtl930x_create_tx_header,
  532. .decode_tag = rtl930x_decode_tag,
  533. };
  534. static const struct rtl838x_eth_reg rtl931x_reg = {
  535. .net_irq = rtl93xx_net_irq,
  536. .mac_port_ctrl = rtl931x_mac_port_ctrl,
  537. .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
  538. .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
  539. .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
  540. .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
  541. .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
  542. .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
  543. .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
  544. .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
  545. .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
  546. .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
  547. .dma_rx_base = RTL931X_DMA_RX_BASE,
  548. .dma_tx_base = RTL931X_DMA_TX_BASE,
  549. .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
  550. .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
  551. .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
  552. .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
  553. .get_mac_link_sts = rtl931x_get_mac_link_sts,
  554. .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
  555. .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
  556. .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
  557. .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
  558. .mac = RTL931X_MAC_L2_ADDR_CTRL,
  559. .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
  560. .update_cntr = rtl931x_update_cntr,
  561. .create_tx_header = rtl931x_create_tx_header,
  562. .decode_tag = rtl931x_decode_tag,
  563. };
  564. static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
  565. {
  566. u32 int_saved, nbuf;
  567. u32 reset_mask;
  568. pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
  569. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  570. mdelay(100);
  571. /* Disable and clear interrupts */
  572. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  573. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  574. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  575. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  576. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  577. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  578. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  579. } else {
  580. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  581. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  582. }
  583. if (priv->family_id == RTL8390_FAMILY_ID) {
  584. /* Preserve L2 notification and NBUF settings */
  585. int_saved = sw_r32(priv->r->dma_if_intr_msk);
  586. nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  587. /* Disable link change interrupt on RTL839x */
  588. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
  589. sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  590. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  591. sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
  592. }
  593. /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
  594. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  595. reset_mask = 0x6;
  596. else
  597. reset_mask = 0xc;
  598. sw_w32_mask(0, reset_mask, priv->r->rst_glb_ctrl);
  599. do { /* Wait for reset of NIC and Queues done */
  600. udelay(20);
  601. } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
  602. mdelay(100);
  603. /* Setup Head of Line */
  604. if (priv->family_id == RTL8380_FAMILY_ID)
  605. sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); /* Disabled on RTL8380 */
  606. if (priv->family_id == RTL8390_FAMILY_ID)
  607. sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
  608. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  609. for (int i = 0; i < priv->rxrings; i++) {
  610. int pos = (i % 3) * 10;
  611. sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
  612. sw_w32_mask(0x3ff << pos, priv->rxringlen,
  613. priv->r->dma_if_rx_ring_cntr(i));
  614. }
  615. }
  616. /* Re-enable link change interrupt */
  617. if (priv->family_id == RTL8390_FAMILY_ID) {
  618. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
  619. sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
  620. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
  621. sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
  622. /* Restore notification settings: on RTL838x these bits are null */
  623. sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
  624. sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  625. }
  626. }
  627. static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
  628. {
  629. struct ring_b *ring = priv->membase;
  630. for (int i = 0; i < priv->rxrings; i++)
  631. sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
  632. for (int i = 0; i < TXRINGS; i++)
  633. sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
  634. }
  635. static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  636. {
  637. /* Disable Head of Line features for all RX rings */
  638. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  639. /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
  640. sw_w32(0x06400020, priv->r->dma_if_ctrl);
  641. /* Enable RX done, RX overflow and TX done interrupts */
  642. sw_w32(0xfffff, priv->r->dma_if_intr_msk);
  643. /* Enable DMA, engine expects empty FCS field */
  644. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  645. /* Restart TX/RX to CPU port */
  646. sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
  647. /* Set Speed, duplex, flow control
  648. * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
  649. * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
  650. * | MEDIA_SEL
  651. */
  652. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  653. /* Enable CRC checks on CPU-port */
  654. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  655. }
  656. static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  657. {
  658. /* Setup CPU-Port: RX Buffer */
  659. sw_w32(0x0000c808, priv->r->dma_if_ctrl);
  660. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  661. sw_w32(0x007fffff, priv->r->dma_if_intr_msk); /* Notify IRQ! */
  662. /* Enable DMA */
  663. sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
  664. /* Restart TX/RX to CPU port, enable CRC checking */
  665. sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  666. /* CPU port joins Lookup Miss Flooding Portmask */
  667. /* TODO: The code below should also work for the RTL838x */
  668. sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
  669. sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
  670. sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
  671. /* Force CPU port link up */
  672. sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  673. }
  674. static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
  675. {
  676. /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
  677. sw_w32(0x06400040, priv->r->dma_if_ctrl);
  678. for (int i = 0; i < priv->rxrings; i++) {
  679. int pos = (i % 3) * 10;
  680. u32 v;
  681. sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
  682. /* Some SoCs have issues with missing underflow protection */
  683. v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
  684. sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
  685. }
  686. /* Enable Notify, RX done, RX overflow and TX done interrupts */
  687. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
  688. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  689. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
  690. /* Enable DMA */
  691. sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
  692. /* Restart TX/RX to CPU port, enable CRC checking */
  693. sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  694. if (priv->family_id == RTL9300_FAMILY_ID)
  695. sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
  696. else
  697. sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
  698. if (priv->family_id == RTL9300_FAMILY_ID)
  699. sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  700. else
  701. sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  702. }
  703. static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
  704. {
  705. for (int i = 0; i < priv->rxrings; i++) {
  706. struct p_hdr *h;
  707. int j;
  708. for (j = 0; j < priv->rxringlen; j++) {
  709. h = &ring->rx_header[i][j];
  710. memset(h, 0, sizeof(struct p_hdr));
  711. h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
  712. i * priv->rxringlen * RING_BUFFER +
  713. j * RING_BUFFER);
  714. h->size = RING_BUFFER;
  715. /* All rings owned by switch, last one wraps */
  716. ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (priv->rxringlen - 1) ?
  717. WRAP :
  718. 0);
  719. }
  720. ring->c_rx[i] = 0;
  721. }
  722. for (int i = 0; i < TXRINGS; i++) {
  723. struct p_hdr *h;
  724. int j;
  725. for (j = 0; j < TXRINGLEN; j++) {
  726. h = &ring->tx_header[i][j];
  727. memset(h, 0, sizeof(struct p_hdr));
  728. h->buf = (u8 *)KSEG1ADDR(ring->tx_space +
  729. i * TXRINGLEN * RING_BUFFER +
  730. j * RING_BUFFER);
  731. h->size = RING_BUFFER;
  732. ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
  733. }
  734. /* Last header is wrapping around */
  735. ring->tx_r[i][j - 1] |= WRAP;
  736. ring->c_tx[i] = 0;
  737. }
  738. }
  739. static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
  740. {
  741. struct notify_b *b = priv->membase + sizeof(struct ring_b);
  742. for (int i = 0; i < NOTIFY_BLOCKS; i++)
  743. b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
  744. sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
  745. sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
  746. /* Setup notification events */
  747. sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); /* RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN */
  748. sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); /* SUSPEND_NOTIFICATION_EN */
  749. /* Enable Notification */
  750. sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
  751. priv->lastEvent = 0;
  752. }
  753. static int rtl838x_eth_open(struct net_device *ndev)
  754. {
  755. unsigned long flags;
  756. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  757. struct ring_b *ring = priv->membase;
  758. pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
  759. __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
  760. spin_lock_irqsave(&priv->lock, flags);
  761. rtl838x_hw_reset(priv);
  762. rtl838x_setup_ring_buffer(priv, ring);
  763. if (priv->family_id == RTL8390_FAMILY_ID) {
  764. rtl839x_setup_notify_ring_buffer(priv);
  765. /* Make sure the ring structure is visible to the ASIC */
  766. mb();
  767. flush_cache_all();
  768. }
  769. rtl838x_hw_ring_setup(priv);
  770. phylink_start(priv->phylink);
  771. for (int i = 0; i < priv->rxrings; i++)
  772. napi_enable(&priv->rx_qs[i].napi);
  773. switch (priv->family_id) {
  774. case RTL8380_FAMILY_ID:
  775. rtl838x_hw_en_rxtx(priv);
  776. /* Trap IGMP/MLD traffic to CPU-Port */
  777. sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
  778. /* Flush learned FDB entries on link down of a port */
  779. sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
  780. break;
  781. case RTL8390_FAMILY_ID:
  782. rtl839x_hw_en_rxtx(priv);
  783. /* Trap MLD and IGMP messages to CPU_PORT */
  784. sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
  785. /* Flush learned FDB entries on link down of a port */
  786. sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
  787. break;
  788. case RTL9300_FAMILY_ID:
  789. rtl93xx_hw_en_rxtx(priv);
  790. /* Flush learned FDB entries on link down of a port */
  791. sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
  792. /* Trap MLD and IGMP messages to CPU_PORT */
  793. sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
  794. break;
  795. case RTL9310_FAMILY_ID:
  796. rtl93xx_hw_en_rxtx(priv);
  797. /* Trap MLD and IGMP messages to CPU_PORT */
  798. sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
  799. /* Disable External CPU access to switch, clear EXT_CPU_EN */
  800. sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
  801. /* Set PCIE_PWR_DOWN */
  802. sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
  803. break;
  804. }
  805. netif_tx_start_all_queues(ndev);
  806. spin_unlock_irqrestore(&priv->lock, flags);
  807. return 0;
  808. }
  809. static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
  810. {
  811. u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
  812. u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
  813. /* Disable RX/TX from/to CPU-port */
  814. sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  815. /* Disable traffic */
  816. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  817. sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
  818. else
  819. sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
  820. mdelay(200); /* Test, whether this is needed */
  821. /* Block all ports */
  822. if (priv->family_id == RTL8380_FAMILY_ID) {
  823. sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
  824. sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
  825. sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
  826. }
  827. /* Flush L2 address cache */
  828. if (priv->family_id == RTL8380_FAMILY_ID) {
  829. for (int i = 0; i <= priv->cpu_port; i++) {
  830. sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
  831. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
  832. }
  833. } else if (priv->family_id == RTL8390_FAMILY_ID) {
  834. for (int i = 0; i <= priv->cpu_port; i++) {
  835. sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
  836. do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
  837. }
  838. }
  839. /* TODO: L2 flush register is 64 bit on RTL931X and 930X */
  840. /* CPU-Port: Link down */
  841. if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
  842. sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  843. else if (priv->family_id == RTL9300_FAMILY_ID)
  844. sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  845. else if (priv->family_id == RTL9310_FAMILY_ID)
  846. sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
  847. mdelay(100);
  848. /* Disable all TX/RX interrupts */
  849. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
  850. sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
  851. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
  852. sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
  853. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
  854. sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
  855. sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
  856. } else {
  857. sw_w32(0x00000000, priv->r->dma_if_intr_msk);
  858. sw_w32(clear_irq, priv->r->dma_if_intr_sts);
  859. }
  860. /* Disable TX/RX DMA */
  861. sw_w32(0x00000000, priv->r->dma_if_ctrl);
  862. mdelay(200);
  863. }
  864. static int rtl838x_eth_stop(struct net_device *ndev)
  865. {
  866. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  867. pr_info("in %s\n", __func__);
  868. phylink_stop(priv->phylink);
  869. rtl838x_hw_stop(priv);
  870. for (int i = 0; i < priv->rxrings; i++)
  871. napi_disable(&priv->rx_qs[i].napi);
  872. netif_tx_stop_all_queues(ndev);
  873. return 0;
  874. }
  875. static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
  876. {
  877. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  878. * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
  879. */
  880. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  881. sw_w32(0x0, RTL838X_RMA_CTRL_0);
  882. sw_w32(0x0, RTL838X_RMA_CTRL_1);
  883. }
  884. if (ndev->flags & IFF_ALLMULTI)
  885. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  886. if (ndev->flags & IFF_PROMISC) {
  887. sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
  888. sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
  889. }
  890. }
  891. static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
  892. {
  893. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  894. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  895. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  896. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  897. */
  898. if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
  899. sw_w32(0x0, RTL839X_RMA_CTRL_0);
  900. sw_w32(0x0, RTL839X_RMA_CTRL_1);
  901. sw_w32(0x0, RTL839X_RMA_CTRL_2);
  902. sw_w32(0x0, RTL839X_RMA_CTRL_3);
  903. }
  904. if (ndev->flags & IFF_ALLMULTI) {
  905. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  906. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  907. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  908. }
  909. if (ndev->flags & IFF_PROMISC) {
  910. sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
  911. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
  912. sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
  913. sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
  914. }
  915. }
  916. static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
  917. {
  918. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  919. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  920. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
  921. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  922. */
  923. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  924. sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
  925. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
  926. sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
  927. } else {
  928. sw_w32(0x0, RTL930X_RMA_CTRL_0);
  929. sw_w32(0x0, RTL930X_RMA_CTRL_1);
  930. sw_w32(0x0, RTL930X_RMA_CTRL_2);
  931. }
  932. }
  933. static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
  934. {
  935. /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
  936. * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
  937. * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
  938. * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
  939. */
  940. if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  941. sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
  942. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
  943. sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
  944. } else {
  945. sw_w32(0x0, RTL931X_RMA_CTRL_0);
  946. sw_w32(0x0, RTL931X_RMA_CTRL_1);
  947. sw_w32(0x0, RTL931X_RMA_CTRL_2);
  948. }
  949. }
  950. static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  951. {
  952. unsigned long flags;
  953. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  954. pr_warn("%s\n", __func__);
  955. spin_lock_irqsave(&priv->lock, flags);
  956. rtl838x_hw_stop(priv);
  957. rtl838x_hw_ring_setup(priv);
  958. rtl838x_hw_en_rxtx(priv);
  959. netif_trans_update(ndev);
  960. netif_start_queue(ndev);
  961. spin_unlock_irqrestore(&priv->lock, flags);
  962. }
  963. static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
  964. {
  965. int len;
  966. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  967. struct ring_b *ring = priv->membase;
  968. int ret;
  969. unsigned long flags;
  970. struct p_hdr *h;
  971. int dest_port = -1;
  972. int q = skb_get_queue_mapping(skb) % TXRINGS;
  973. if (q) /* Check for high prio queue */
  974. pr_debug("SKB priority: %d\n", skb->priority);
  975. spin_lock_irqsave(&priv->lock, flags);
  976. len = skb->len;
  977. /* Check for DSA tagging at the end of the buffer */
  978. if (netdev_uses_dsa(dev) &&
  979. skb->data[len - 4] == 0x80 &&
  980. skb->data[len - 3] < priv->cpu_port &&
  981. skb->data[len - 2] == 0x10 &&
  982. skb->data[len - 1] == 0x00) {
  983. /* Reuse tag space for CRC if possible */
  984. dest_port = skb->data[len - 3];
  985. skb->data[len - 4] = skb->data[len - 3] = skb->data[len - 2] = skb->data[len - 1] = 0x00;
  986. len -= 4;
  987. }
  988. len += 4; /* Add space for CRC */
  989. if (skb_padto(skb, len)) {
  990. ret = NETDEV_TX_OK;
  991. goto txdone;
  992. }
  993. /* We can send this packet if CPU owns the descriptor */
  994. if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
  995. /* Set descriptor for tx */
  996. h = &ring->tx_header[q][ring->c_tx[q]];
  997. h->size = len;
  998. h->len = len;
  999. /* On RTL8380 SoCs, small packet lengths being sent need adjustments */
  1000. if (priv->family_id == RTL8380_FAMILY_ID) {
  1001. if (len < ETH_ZLEN - 4)
  1002. h->len -= 4;
  1003. }
  1004. if (dest_port >= 0)
  1005. priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
  1006. /* Copy packet data to tx buffer */
  1007. memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
  1008. /* Make sure packet data is visible to ASIC */
  1009. wmb();
  1010. /* Hand over to switch */
  1011. ring->tx_r[q][ring->c_tx[q]] |= 1;
  1012. /* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
  1013. if (priv->family_id == RTL8380_FAMILY_ID) {
  1014. for (int i = 0; i < 10; i++) {
  1015. u32 val = sw_r32(priv->r->dma_if_ctrl);
  1016. if ((val & 0xc) == 0xc)
  1017. break;
  1018. }
  1019. }
  1020. /* Tell switch to send data */
  1021. if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
  1022. /* Ring ID q == 0: Low priority, Ring ID = 1: High prio queue */
  1023. if (!q)
  1024. sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
  1025. else
  1026. sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
  1027. } else {
  1028. sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
  1029. }
  1030. dev->stats.tx_packets++;
  1031. dev->stats.tx_bytes += len;
  1032. dev_kfree_skb(skb);
  1033. ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
  1034. ret = NETDEV_TX_OK;
  1035. } else {
  1036. dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
  1037. ret = NETDEV_TX_BUSY;
  1038. }
  1039. txdone:
  1040. spin_unlock_irqrestore(&priv->lock, flags);
  1041. return ret;
  1042. }
  1043. /* Return queue number for TX. On the RTL83XX, these queues have equal priority
  1044. * so we do round-robin
  1045. */
  1046. u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1047. struct net_device *sb_dev)
  1048. {
  1049. static u8 last = 0;
  1050. last++;
  1051. return last % TXRINGS;
  1052. }
  1053. /* Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
  1054. */
  1055. u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
  1056. struct net_device *sb_dev)
  1057. {
  1058. if (skb->priority >= TC_PRIO_CONTROL)
  1059. return 1;
  1060. return 0;
  1061. }
  1062. static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
  1063. {
  1064. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1065. struct ring_b *ring = priv->membase;
  1066. LIST_HEAD(rx_list);
  1067. unsigned long flags;
  1068. int work_done = 0;
  1069. u32 *last;
  1070. bool dsa = netdev_uses_dsa(dev);
  1071. pr_debug("---------------------------------------------------------- RX - %d\n", r);
  1072. spin_lock_irqsave(&priv->lock, flags);
  1073. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1074. do {
  1075. struct sk_buff *skb;
  1076. struct dsa_tag tag;
  1077. struct p_hdr *h;
  1078. u8 *skb_data;
  1079. u8 *data;
  1080. int len;
  1081. if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
  1082. if (&ring->rx_r[r][ring->c_rx[r]] != last) {
  1083. netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
  1084. r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
  1085. }
  1086. break;
  1087. }
  1088. h = &ring->rx_header[r][ring->c_rx[r]];
  1089. data = (u8 *)KSEG1ADDR(h->buf);
  1090. len = h->len;
  1091. if (!len)
  1092. break;
  1093. work_done++;
  1094. len -= 4; /* strip the CRC */
  1095. /* Add 4 bytes for cpu_tag */
  1096. if (dsa)
  1097. len += 4;
  1098. skb = netdev_alloc_skb(dev, len + 4);
  1099. skb_reserve(skb, NET_IP_ALIGN);
  1100. if (likely(skb)) {
  1101. /* BUG: Prevent bug on RTL838x SoCs */
  1102. if (priv->family_id == RTL8380_FAMILY_ID) {
  1103. sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
  1104. for (int i = 0; i < priv->rxrings; i++) {
  1105. unsigned int val;
  1106. /* Update each ring cnt */
  1107. val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
  1108. sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
  1109. }
  1110. }
  1111. skb_data = skb_put(skb, len);
  1112. /* Make sure data is visible */
  1113. mb();
  1114. memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
  1115. /* Overwrite CRC with cpu_tag */
  1116. if (dsa) {
  1117. priv->r->decode_tag(h, &tag);
  1118. skb->data[len - 4] = 0x80;
  1119. skb->data[len - 3] = tag.port;
  1120. skb->data[len - 2] = 0x10;
  1121. skb->data[len - 1] = 0x00;
  1122. if (tag.l2_offloaded)
  1123. skb->data[len - 3] |= 0x40;
  1124. }
  1125. if (tag.queue >= 0)
  1126. pr_debug("Queue: %d, len: %d, reason %d port %d\n",
  1127. tag.queue, len, tag.reason, tag.port);
  1128. skb->protocol = eth_type_trans(skb, dev);
  1129. if (dev->features & NETIF_F_RXCSUM) {
  1130. if (tag.crc_error)
  1131. skb_checksum_none_assert(skb);
  1132. else
  1133. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1134. }
  1135. dev->stats.rx_packets++;
  1136. dev->stats.rx_bytes += len;
  1137. list_add_tail(&skb->list, &rx_list);
  1138. } else {
  1139. if (net_ratelimit())
  1140. dev_warn(&dev->dev, "low on memory - packet dropped\n");
  1141. dev->stats.rx_dropped++;
  1142. }
  1143. /* Reset header structure */
  1144. memset(h, 0, sizeof(struct p_hdr));
  1145. h->buf = data;
  1146. h->size = RING_BUFFER;
  1147. ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
  1148. WRAP :
  1149. 0x1);
  1150. ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
  1151. last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
  1152. } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
  1153. netif_receive_skb_list(&rx_list);
  1154. /* Update counters */
  1155. priv->r->update_cntr(r, 0);
  1156. spin_unlock_irqrestore(&priv->lock, flags);
  1157. return work_done;
  1158. }
  1159. static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
  1160. {
  1161. struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
  1162. struct rtl838x_eth_priv *priv = rx_q->priv;
  1163. int work_done = 0;
  1164. int r = rx_q->id;
  1165. int work;
  1166. while (work_done < budget) {
  1167. work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
  1168. if (!work)
  1169. break;
  1170. work_done += work;
  1171. }
  1172. if (work_done < budget) {
  1173. napi_complete_done(napi, work_done);
  1174. /* Enable RX interrupt */
  1175. if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
  1176. sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
  1177. else
  1178. sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
  1179. }
  1180. return work_done;
  1181. }
  1182. static void rtl838x_validate(struct phylink_config *config,
  1183. unsigned long *supported,
  1184. struct phylink_link_state *state)
  1185. {
  1186. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  1187. pr_debug("In %s\n", __func__);
  1188. if (!phy_interface_mode_is_rgmii(state->interface) &&
  1189. state->interface != PHY_INTERFACE_MODE_1000BASEX &&
  1190. state->interface != PHY_INTERFACE_MODE_MII &&
  1191. state->interface != PHY_INTERFACE_MODE_REVMII &&
  1192. state->interface != PHY_INTERFACE_MODE_GMII &&
  1193. state->interface != PHY_INTERFACE_MODE_QSGMII &&
  1194. state->interface != PHY_INTERFACE_MODE_INTERNAL &&
  1195. state->interface != PHY_INTERFACE_MODE_SGMII) {
  1196. bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
  1197. pr_err("Unsupported interface: %d\n", state->interface);
  1198. return;
  1199. }
  1200. /* Allow all the expected bits */
  1201. phylink_set(mask, Autoneg);
  1202. phylink_set_port_modes(mask);
  1203. phylink_set(mask, Pause);
  1204. phylink_set(mask, Asym_Pause);
  1205. /* With the exclusion of MII and Reverse MII, we support Gigabit,
  1206. * including Half duplex
  1207. */
  1208. if (state->interface != PHY_INTERFACE_MODE_MII &&
  1209. state->interface != PHY_INTERFACE_MODE_REVMII) {
  1210. phylink_set(mask, 1000baseT_Full);
  1211. phylink_set(mask, 1000baseT_Half);
  1212. }
  1213. phylink_set(mask, 10baseT_Half);
  1214. phylink_set(mask, 10baseT_Full);
  1215. phylink_set(mask, 100baseT_Half);
  1216. phylink_set(mask, 100baseT_Full);
  1217. bitmap_and(supported, supported, mask,
  1218. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1219. bitmap_and(state->advertising, state->advertising, mask,
  1220. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1221. }
  1222. static void rtl838x_mac_config(struct phylink_config *config,
  1223. unsigned int mode,
  1224. const struct phylink_link_state *state)
  1225. {
  1226. /* This is only being called for the master device,
  1227. * i.e. the CPU-Port. We don't need to do anything.
  1228. */
  1229. pr_info("In %s, mode %x\n", __func__, mode);
  1230. }
  1231. static void rtl838x_mac_an_restart(struct phylink_config *config)
  1232. {
  1233. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1234. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1235. /* This works only on RTL838x chips */
  1236. if (priv->family_id != RTL8380_FAMILY_ID)
  1237. return;
  1238. pr_debug("In %s\n", __func__);
  1239. /* Restart by disabling and re-enabling link */
  1240. sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1241. mdelay(20);
  1242. sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
  1243. }
  1244. static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
  1245. struct phylink_link_state *state)
  1246. {
  1247. u32 speed;
  1248. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1249. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1250. int port = priv->cpu_port;
  1251. pr_info("In %s\n", __func__);
  1252. state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
  1253. state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
  1254. pr_info("%s link status is %d\n", __func__, state->link);
  1255. speed = priv->r->get_mac_link_spd_sts(port);
  1256. switch (speed) {
  1257. case 0:
  1258. state->speed = SPEED_10;
  1259. break;
  1260. case 1:
  1261. state->speed = SPEED_100;
  1262. break;
  1263. case 2:
  1264. state->speed = SPEED_1000;
  1265. break;
  1266. case 5:
  1267. state->speed = SPEED_2500;
  1268. break;
  1269. case 6:
  1270. state->speed = SPEED_5000;
  1271. break;
  1272. case 4:
  1273. state->speed = SPEED_10000;
  1274. break;
  1275. default:
  1276. state->speed = SPEED_UNKNOWN;
  1277. break;
  1278. }
  1279. state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
  1280. if (priv->r->get_mac_rx_pause_sts(port))
  1281. state->pause |= MLO_PAUSE_RX;
  1282. if (priv->r->get_mac_tx_pause_sts(port))
  1283. state->pause |= MLO_PAUSE_TX;
  1284. }
  1285. static void rtl838x_mac_link_down(struct phylink_config *config,
  1286. unsigned int mode,
  1287. phy_interface_t interface)
  1288. {
  1289. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1290. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1291. pr_debug("In %s\n", __func__);
  1292. /* Stop TX/RX to port */
  1293. sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1294. }
  1295. static void rtl838x_mac_link_up(struct phylink_config *config,
  1296. struct phy_device *phy, unsigned int mode,
  1297. phy_interface_t interface, int speed, int duplex,
  1298. bool tx_pause, bool rx_pause)
  1299. {
  1300. struct net_device *dev = container_of(config->dev, struct net_device, dev);
  1301. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1302. pr_debug("In %s\n", __func__);
  1303. /* Restart TX/RX to port */
  1304. sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
  1305. }
  1306. static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
  1307. {
  1308. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1309. unsigned long flags;
  1310. spin_lock_irqsave(&priv->lock, flags);
  1311. pr_debug("In %s\n", __func__);
  1312. sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
  1313. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
  1314. if (priv->family_id == RTL8380_FAMILY_ID) {
  1315. /* 2 more registers, ALE/MAC block */
  1316. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
  1317. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1318. (RTL838X_MAC_ALE + 4));
  1319. sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
  1320. sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  1321. RTL838X_MAC2 + 4);
  1322. }
  1323. spin_unlock_irqrestore(&priv->lock, flags);
  1324. }
  1325. static int rtl838x_set_mac_address(struct net_device *dev, void *p)
  1326. {
  1327. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1328. const struct sockaddr *addr = p;
  1329. u8 *mac = (u8 *) (addr->sa_data);
  1330. if (!is_valid_ether_addr(addr->sa_data))
  1331. return -EADDRNOTAVAIL;
  1332. dev_addr_set(dev, addr->sa_data);
  1333. rtl838x_set_mac_hw(dev, mac);
  1334. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
  1335. return 0;
  1336. }
  1337. static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
  1338. {
  1339. /* We will need to set-up EEE and the egress-rate limitation */
  1340. return 0;
  1341. }
  1342. static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
  1343. {
  1344. if (priv->family_id == 0x8390)
  1345. return rtl8390_init_mac(priv);
  1346. /* At present we do not know how to set up EEE on any other SoC than RTL8380 */
  1347. if (priv->family_id != 0x8380)
  1348. return 0;
  1349. pr_info("%s\n", __func__);
  1350. /* fix timer for EEE */
  1351. sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
  1352. sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
  1353. /* Init VLAN. TODO: Understand what is being done, here */
  1354. if (priv->id == 0x8382) {
  1355. for (int i = 0; i <= 28; i++)
  1356. sw_w32(0, 0xd57c + i * 0x80);
  1357. }
  1358. if (priv->id == 0x8380) {
  1359. for (int i = 8; i <= 28; i++)
  1360. sw_w32(0, 0xd57c + i * 0x80);
  1361. }
  1362. return 0;
  1363. }
  1364. static int rtl838x_get_link_ksettings(struct net_device *ndev,
  1365. struct ethtool_link_ksettings *cmd)
  1366. {
  1367. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1368. pr_debug("%s called\n", __func__);
  1369. return phylink_ethtool_ksettings_get(priv->phylink, cmd);
  1370. }
  1371. static int rtl838x_set_link_ksettings(struct net_device *ndev,
  1372. const struct ethtool_link_ksettings *cmd)
  1373. {
  1374. struct rtl838x_eth_priv *priv = netdev_priv(ndev);
  1375. pr_debug("%s called\n", __func__);
  1376. return phylink_ethtool_ksettings_set(priv->phylink, cmd);
  1377. }
  1378. static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1379. {
  1380. u32 val;
  1381. int err;
  1382. struct rtl838x_eth_priv *priv = bus->priv;
  1383. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
  1384. return rtl838x_read_sds_phy(mii_id, regnum);
  1385. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1386. err = rtl838x_read_mmd_phy(mii_id,
  1387. mdiobus_c45_devad(regnum),
  1388. regnum, &val);
  1389. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1390. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1391. val, err);
  1392. } else {
  1393. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1394. err = rtl838x_read_phy(mii_id, page, regnum, &val);
  1395. }
  1396. if (err)
  1397. return err;
  1398. return val;
  1399. }
  1400. static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1401. {
  1402. return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
  1403. }
  1404. static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1405. {
  1406. u32 val;
  1407. int err;
  1408. struct rtl838x_eth_priv *priv = bus->priv;
  1409. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1410. return rtl839x_read_sds_phy(mii_id, regnum);
  1411. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1412. err = rtl839x_read_mmd_phy(mii_id,
  1413. mdiobus_c45_devad(regnum),
  1414. regnum, &val);
  1415. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1416. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1417. val, err);
  1418. } else {
  1419. err = rtl839x_read_phy(mii_id, page, regnum, &val);
  1420. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1421. }
  1422. if (err)
  1423. return err;
  1424. return val;
  1425. }
  1426. static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1427. {
  1428. return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
  1429. }
  1430. static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1431. {
  1432. u32 val;
  1433. int err;
  1434. struct rtl838x_eth_priv *priv = bus->priv;
  1435. if (priv->phy_is_internal[mii_id])
  1436. return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1437. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1438. err = rtl930x_read_mmd_phy(mii_id,
  1439. mdiobus_c45_devad(regnum),
  1440. regnum, &val);
  1441. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1442. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1443. val, err);
  1444. } else {
  1445. err = rtl930x_read_phy(mii_id, page, regnum, &val);
  1446. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1447. }
  1448. if (err)
  1449. return err;
  1450. return val;
  1451. }
  1452. static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1453. {
  1454. return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
  1455. }
  1456. static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
  1457. {
  1458. u32 val;
  1459. int err, v;
  1460. struct rtl838x_eth_priv *priv = bus->priv;
  1461. pr_debug("%s: In here, port %d\n", __func__, mii_id);
  1462. if (priv->phy_is_internal[mii_id]) {
  1463. v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
  1464. if (v < 0) {
  1465. err = v;
  1466. } else {
  1467. err = 0;
  1468. val = v;
  1469. }
  1470. } else {
  1471. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1472. err = rtl931x_read_mmd_phy(mii_id,
  1473. mdiobus_c45_devad(regnum),
  1474. regnum, &val);
  1475. pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
  1476. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1477. val, err);
  1478. } else {
  1479. err = rtl931x_read_phy(mii_id, page, regnum, &val);
  1480. pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
  1481. }
  1482. }
  1483. if (err)
  1484. return err;
  1485. return val;
  1486. }
  1487. static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  1488. {
  1489. return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
  1490. }
  1491. static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1492. int regnum, u16 value)
  1493. {
  1494. u32 offset = 0;
  1495. struct rtl838x_eth_priv *priv = bus->priv;
  1496. int err;
  1497. if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
  1498. if (mii_id == 26)
  1499. offset = 0x100;
  1500. sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
  1501. return 0;
  1502. }
  1503. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1504. err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1505. regnum, value);
  1506. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1507. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1508. value, err);
  1509. return err;
  1510. }
  1511. err = rtl838x_write_phy(mii_id, page, regnum, value);
  1512. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1513. return err;
  1514. }
  1515. static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
  1516. int regnum, u16 value)
  1517. {
  1518. return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1519. }
  1520. static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1521. int regnum, u16 value)
  1522. {
  1523. struct rtl838x_eth_priv *priv = bus->priv;
  1524. int err;
  1525. if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
  1526. return rtl839x_write_sds_phy(mii_id, regnum, value);
  1527. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1528. err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1529. regnum, value);
  1530. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1531. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1532. value, err);
  1533. return err;
  1534. }
  1535. err = rtl839x_write_phy(mii_id, page, regnum, value);
  1536. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1537. return err;
  1538. }
  1539. static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
  1540. int regnum, u16 value)
  1541. {
  1542. return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1543. }
  1544. static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1545. int regnum, u16 value)
  1546. {
  1547. struct rtl838x_eth_priv *priv = bus->priv;
  1548. int err;
  1549. if (priv->phy_is_internal[mii_id])
  1550. return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1551. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
  1552. return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1553. regnum, value);
  1554. err = rtl930x_write_phy(mii_id, page, regnum, value);
  1555. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1556. return err;
  1557. }
  1558. static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
  1559. int regnum, u16 value)
  1560. {
  1561. return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1562. }
  1563. static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
  1564. int regnum, u16 value)
  1565. {
  1566. struct rtl838x_eth_priv *priv = bus->priv;
  1567. int err;
  1568. if (priv->phy_is_internal[mii_id])
  1569. return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
  1570. if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
  1571. err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
  1572. regnum, value);
  1573. pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
  1574. mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
  1575. value, err);
  1576. return err;
  1577. }
  1578. err = rtl931x_write_phy(mii_id, page, regnum, value);
  1579. pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
  1580. return err;
  1581. }
  1582. static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
  1583. int regnum, u16 value)
  1584. {
  1585. return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
  1586. }
  1587. static int rtl838x_mdio_reset(struct mii_bus *bus)
  1588. {
  1589. pr_debug("%s called\n", __func__);
  1590. /* Disable MAC polling the PHY so that we can start configuration */
  1591. sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
  1592. /* Enable PHY control via SoC */
  1593. sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
  1594. /* Probably should reset all PHYs here... */
  1595. return 0;
  1596. }
  1597. static int rtl839x_mdio_reset(struct mii_bus *bus)
  1598. {
  1599. return 0;
  1600. pr_debug("%s called\n", __func__);
  1601. /* BUG: The following does not work, but should! */
  1602. /* Disable MAC polling the PHY so that we can start configuration */
  1603. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
  1604. sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
  1605. /* Disable PHY polling via SoC */
  1606. sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
  1607. /* Probably should reset all PHYs here... */
  1608. return 0;
  1609. }
  1610. u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
  1611. 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
  1612. static int rtl930x_mdio_reset(struct mii_bus *bus)
  1613. {
  1614. struct rtl838x_eth_priv *priv = bus->priv;
  1615. u32 c45_mask = 0;
  1616. u32 poll_sel[2];
  1617. u32 poll_ctrl = 0;
  1618. u32 private_poll_mask = 0;
  1619. u32 v;
  1620. bool uses_usxgmii = false; /* For the Aquantia PHYs */
  1621. bool uses_hisgmii = false; /* For the RTL8221/8226 */
  1622. /* Mapping of port to phy-addresses on an SMI bus */
  1623. poll_sel[0] = poll_sel[1] = 0;
  1624. for (int i = 0; i < RTL930X_CPU_PORT; i++) {
  1625. int pos;
  1626. if (priv->smi_bus[i] > 3)
  1627. continue;
  1628. pos = (i % 6) * 5;
  1629. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
  1630. RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
  1631. pos = (i * 2) % 32;
  1632. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1633. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1634. }
  1635. /* Configure which SMI bus is behind which port number */
  1636. sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
  1637. sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
  1638. /* Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) */
  1639. sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
  1640. /* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
  1641. for (int i = 0; i < 4; i++)
  1642. if (priv->smi_bus_isc45[i])
  1643. c45_mask |= BIT(i + 16);
  1644. pr_info("c45_mask: %08x\n", c45_mask);
  1645. sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
  1646. /* Set the MAC type of each port according to the PHY-interface */
  1647. /* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
  1648. v = 0;
  1649. for (int i = 0; i < RTL930X_CPU_PORT; i++) {
  1650. switch (priv->interfaces[i]) {
  1651. case PHY_INTERFACE_MODE_10GBASER:
  1652. break; /* Serdes: Value = 0 */
  1653. case PHY_INTERFACE_MODE_HSGMII:
  1654. private_poll_mask |= BIT(i);
  1655. fallthrough;
  1656. case PHY_INTERFACE_MODE_USXGMII:
  1657. v |= BIT(mac_type_bit[i]);
  1658. uses_usxgmii = true;
  1659. break;
  1660. case PHY_INTERFACE_MODE_QSGMII:
  1661. private_poll_mask |= BIT(i);
  1662. v |= 3 << mac_type_bit[i];
  1663. break;
  1664. default:
  1665. break;
  1666. }
  1667. }
  1668. sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
  1669. /* Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) */
  1670. sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
  1671. /* The following magic values are found in the port configuration, they seem to
  1672. * define different ways of polling a PHY. The below is for the Aquantia PHYs of
  1673. * the XGS1250 and the RTL8226 of the XGS1210
  1674. */
  1675. if (uses_usxgmii) {
  1676. sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1677. sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1678. sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1679. }
  1680. if (uses_hisgmii) {
  1681. sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
  1682. sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
  1683. sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
  1684. }
  1685. pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
  1686. sw_r32(RTL930X_SMI_GLB_CTRL));
  1687. pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
  1688. sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
  1689. pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
  1690. sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
  1691. pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
  1692. sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
  1693. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
  1694. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
  1695. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
  1696. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
  1697. pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
  1698. sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
  1699. pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
  1700. sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
  1701. return 0;
  1702. }
  1703. static int rtl931x_mdio_reset(struct mii_bus *bus)
  1704. {
  1705. struct rtl838x_eth_priv *priv = bus->priv;
  1706. u32 c45_mask = 0;
  1707. u32 poll_sel[4];
  1708. u32 poll_ctrl = 0;
  1709. bool mdc_on[4];
  1710. pr_info("%s called\n", __func__);
  1711. /* Disable port polling for configuration purposes */
  1712. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
  1713. sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
  1714. msleep(100);
  1715. mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
  1716. /* Mapping of port to phy-addresses on an SMI bus */
  1717. poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
  1718. for (int i = 0; i < 56; i++) {
  1719. u32 pos;
  1720. pos = (i % 6) * 5;
  1721. sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
  1722. pos = (i * 2) % 32;
  1723. poll_sel[i / 16] |= priv->smi_bus[i] << pos;
  1724. poll_ctrl |= BIT(20 + priv->smi_bus[i]);
  1725. mdc_on[priv->smi_bus[i]] = true;
  1726. }
  1727. /* Configure which SMI bus is behind which port number */
  1728. for (int i = 0; i < 4; i++) {
  1729. pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
  1730. sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
  1731. }
  1732. /* Configure which SMI busses */
  1733. pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1734. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1735. for (int i = 0; i < 4; i++) {
  1736. /* bus is polled in c45 */
  1737. if (priv->smi_bus_isc45[i])
  1738. c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
  1739. /* Enable bus access via MDC */
  1740. if (mdc_on[i])
  1741. sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
  1742. }
  1743. pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
  1744. pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
  1745. /* We have a 10G PHY enable polling
  1746. * sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
  1747. * sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
  1748. * sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
  1749. */
  1750. sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
  1751. return 0;
  1752. }
  1753. static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
  1754. {
  1755. pr_info("In %s\n", __func__);
  1756. /* Initialize Encapsulation memory and wait until finished */
  1757. sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
  1758. do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
  1759. pr_info("%s: init ENCAP done\n", __func__);
  1760. /* Initialize Managemen Information Base memory and wait until finished */
  1761. sw_w32(0x1, RTL931X_MEM_MIB_INIT);
  1762. do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
  1763. pr_info("%s: init MIB done\n", __func__);
  1764. /* Initialize ACL (PIE) memory and wait until finished */
  1765. sw_w32(0x1, RTL931X_MEM_ACL_INIT);
  1766. do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
  1767. pr_info("%s: init ACL done\n", __func__);
  1768. /* Initialize ALE memory and wait until finished */
  1769. sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
  1770. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
  1771. sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
  1772. sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
  1773. do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
  1774. pr_info("%s: init ALE done\n", __func__);
  1775. /* Enable ESD auto recovery */
  1776. sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
  1777. /* Init SPI, is this for thermal control or what? */
  1778. sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
  1779. return 0;
  1780. }
  1781. static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
  1782. {
  1783. struct device_node *mii_np, *dn;
  1784. u32 pn;
  1785. int ret;
  1786. pr_debug("%s called\n", __func__);
  1787. mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
  1788. if (!mii_np) {
  1789. dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
  1790. return -ENODEV;
  1791. }
  1792. if (!of_device_is_available(mii_np)) {
  1793. ret = -ENODEV;
  1794. goto err_put_node;
  1795. }
  1796. priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
  1797. if (!priv->mii_bus) {
  1798. ret = -ENOMEM;
  1799. goto err_put_node;
  1800. }
  1801. switch(priv->family_id) {
  1802. case RTL8380_FAMILY_ID:
  1803. priv->mii_bus->name = "rtl838x-eth-mdio";
  1804. priv->mii_bus->read = rtl838x_mdio_read;
  1805. priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
  1806. priv->mii_bus->write = rtl838x_mdio_write;
  1807. priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
  1808. priv->mii_bus->reset = rtl838x_mdio_reset;
  1809. break;
  1810. case RTL8390_FAMILY_ID:
  1811. priv->mii_bus->name = "rtl839x-eth-mdio";
  1812. priv->mii_bus->read = rtl839x_mdio_read;
  1813. priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
  1814. priv->mii_bus->write = rtl839x_mdio_write;
  1815. priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
  1816. priv->mii_bus->reset = rtl839x_mdio_reset;
  1817. break;
  1818. case RTL9300_FAMILY_ID:
  1819. priv->mii_bus->name = "rtl930x-eth-mdio";
  1820. priv->mii_bus->read = rtl930x_mdio_read;
  1821. priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
  1822. priv->mii_bus->write = rtl930x_mdio_write;
  1823. priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
  1824. priv->mii_bus->reset = rtl930x_mdio_reset;
  1825. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1826. break;
  1827. case RTL9310_FAMILY_ID:
  1828. priv->mii_bus->name = "rtl931x-eth-mdio";
  1829. priv->mii_bus->read = rtl931x_mdio_read;
  1830. priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
  1831. priv->mii_bus->write = rtl931x_mdio_write;
  1832. priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
  1833. priv->mii_bus->reset = rtl931x_mdio_reset;
  1834. priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
  1835. break;
  1836. }
  1837. priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
  1838. priv->mii_bus->priv = priv;
  1839. priv->mii_bus->parent = &priv->pdev->dev;
  1840. for_each_node_by_name(dn, "ethernet-phy") {
  1841. u32 smi_addr[2];
  1842. if (of_property_read_u32(dn, "reg", &pn))
  1843. continue;
  1844. if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
  1845. smi_addr[0] = 0;
  1846. smi_addr[1] = pn;
  1847. }
  1848. if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
  1849. priv->sds_id[pn] = -1;
  1850. else {
  1851. pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
  1852. }
  1853. if (pn < MAX_PORTS) {
  1854. priv->smi_bus[pn] = smi_addr[0];
  1855. priv->smi_addr[pn] = smi_addr[1];
  1856. } else {
  1857. pr_err("%s: illegal port number %d\n", __func__, pn);
  1858. }
  1859. if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
  1860. priv->smi_bus_isc45[smi_addr[0]] = true;
  1861. if (of_property_read_bool(dn, "phy-is-integrated")) {
  1862. priv->phy_is_internal[pn] = true;
  1863. }
  1864. }
  1865. dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
  1866. if (!dn) {
  1867. dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
  1868. return -ENODEV;
  1869. }
  1870. for_each_node_by_name(dn, "port") {
  1871. if (of_property_read_u32(dn, "reg", &pn))
  1872. continue;
  1873. pr_debug("%s Looking at port %d\n", __func__, pn);
  1874. if (pn > priv->cpu_port)
  1875. continue;
  1876. if (of_get_phy_mode(dn, &priv->interfaces[pn]))
  1877. priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
  1878. pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
  1879. }
  1880. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
  1881. ret = of_mdiobus_register(priv->mii_bus, mii_np);
  1882. err_put_node:
  1883. of_node_put(mii_np);
  1884. return ret;
  1885. }
  1886. static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
  1887. {
  1888. pr_debug("%s called\n", __func__);
  1889. if (!priv->mii_bus)
  1890. return 0;
  1891. mdiobus_unregister(priv->mii_bus);
  1892. mdiobus_free(priv->mii_bus);
  1893. return 0;
  1894. }
  1895. static netdev_features_t rtl838x_fix_features(struct net_device *dev,
  1896. netdev_features_t features)
  1897. {
  1898. return features;
  1899. }
  1900. static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
  1901. {
  1902. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1903. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1904. if (!(features & NETIF_F_RXCSUM))
  1905. sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1906. else
  1907. sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
  1908. }
  1909. return 0;
  1910. }
  1911. static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
  1912. {
  1913. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  1914. if ((features ^ dev->features) & NETIF_F_RXCSUM) {
  1915. if (!(features & NETIF_F_RXCSUM))
  1916. sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
  1917. else
  1918. sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
  1919. }
  1920. return 0;
  1921. }
  1922. static const struct net_device_ops rtl838x_eth_netdev_ops = {
  1923. .ndo_open = rtl838x_eth_open,
  1924. .ndo_stop = rtl838x_eth_stop,
  1925. .ndo_start_xmit = rtl838x_eth_tx,
  1926. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1927. .ndo_set_mac_address = rtl838x_set_mac_address,
  1928. .ndo_validate_addr = eth_validate_addr,
  1929. .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
  1930. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1931. .ndo_set_features = rtl83xx_set_features,
  1932. .ndo_fix_features = rtl838x_fix_features,
  1933. .ndo_setup_tc = rtl83xx_setup_tc,
  1934. };
  1935. static const struct net_device_ops rtl839x_eth_netdev_ops = {
  1936. .ndo_open = rtl838x_eth_open,
  1937. .ndo_stop = rtl838x_eth_stop,
  1938. .ndo_start_xmit = rtl838x_eth_tx,
  1939. .ndo_select_queue = rtl83xx_pick_tx_queue,
  1940. .ndo_set_mac_address = rtl838x_set_mac_address,
  1941. .ndo_validate_addr = eth_validate_addr,
  1942. .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
  1943. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1944. .ndo_set_features = rtl83xx_set_features,
  1945. .ndo_fix_features = rtl838x_fix_features,
  1946. .ndo_setup_tc = rtl83xx_setup_tc,
  1947. };
  1948. static const struct net_device_ops rtl930x_eth_netdev_ops = {
  1949. .ndo_open = rtl838x_eth_open,
  1950. .ndo_stop = rtl838x_eth_stop,
  1951. .ndo_start_xmit = rtl838x_eth_tx,
  1952. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1953. .ndo_set_mac_address = rtl838x_set_mac_address,
  1954. .ndo_validate_addr = eth_validate_addr,
  1955. .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
  1956. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1957. .ndo_set_features = rtl93xx_set_features,
  1958. .ndo_fix_features = rtl838x_fix_features,
  1959. .ndo_setup_tc = rtl83xx_setup_tc,
  1960. };
  1961. static const struct net_device_ops rtl931x_eth_netdev_ops = {
  1962. .ndo_open = rtl838x_eth_open,
  1963. .ndo_stop = rtl838x_eth_stop,
  1964. .ndo_start_xmit = rtl838x_eth_tx,
  1965. .ndo_select_queue = rtl93xx_pick_tx_queue,
  1966. .ndo_set_mac_address = rtl838x_set_mac_address,
  1967. .ndo_validate_addr = eth_validate_addr,
  1968. .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
  1969. .ndo_tx_timeout = rtl838x_eth_tx_timeout,
  1970. .ndo_set_features = rtl93xx_set_features,
  1971. .ndo_fix_features = rtl838x_fix_features,
  1972. };
  1973. static const struct phylink_mac_ops rtl838x_phylink_ops = {
  1974. .validate = rtl838x_validate,
  1975. .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
  1976. .mac_an_restart = rtl838x_mac_an_restart,
  1977. .mac_config = rtl838x_mac_config,
  1978. .mac_link_down = rtl838x_mac_link_down,
  1979. .mac_link_up = rtl838x_mac_link_up,
  1980. };
  1981. static const struct ethtool_ops rtl838x_ethtool_ops = {
  1982. .get_link_ksettings = rtl838x_get_link_ksettings,
  1983. .set_link_ksettings = rtl838x_set_link_ksettings,
  1984. };
  1985. static int __init rtl838x_eth_probe(struct platform_device *pdev)
  1986. {
  1987. struct net_device *dev;
  1988. struct device_node *dn = pdev->dev.of_node;
  1989. struct rtl838x_eth_priv *priv;
  1990. struct resource *res, *mem;
  1991. phy_interface_t phy_mode;
  1992. struct phylink *phylink;
  1993. u8 mac_addr[ETH_ALEN];
  1994. int err = 0, rxrings, rxringlen;
  1995. struct ring_b *ring;
  1996. pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
  1997. (u32)pdev, (u32)(&(pdev->dev)));
  1998. if (!dn) {
  1999. dev_err(&pdev->dev, "No DT found\n");
  2000. return -EINVAL;
  2001. }
  2002. rxrings = (soc_info.family == RTL8380_FAMILY_ID
  2003. || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
  2004. rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
  2005. rxringlen = MAX_ENTRIES / rxrings;
  2006. rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
  2007. dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
  2008. if (!dev) {
  2009. err = -ENOMEM;
  2010. goto err_free;
  2011. }
  2012. SET_NETDEV_DEV(dev, &pdev->dev);
  2013. priv = netdev_priv(dev);
  2014. /* obtain buffer memory space */
  2015. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2016. if (res) {
  2017. mem = devm_request_mem_region(&pdev->dev, res->start,
  2018. resource_size(res), res->name);
  2019. if (!mem) {
  2020. dev_err(&pdev->dev, "cannot request memory space\n");
  2021. err = -ENXIO;
  2022. goto err_free;
  2023. }
  2024. dev->mem_start = mem->start;
  2025. dev->mem_end = mem->end;
  2026. } else {
  2027. dev_err(&pdev->dev, "cannot request IO resource\n");
  2028. err = -ENXIO;
  2029. goto err_free;
  2030. }
  2031. /* Allocate buffer memory */
  2032. priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER +
  2033. sizeof(struct ring_b) + sizeof(struct notify_b),
  2034. (void *)&dev->mem_start, GFP_KERNEL);
  2035. if (!priv->membase) {
  2036. dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
  2037. err = -ENOMEM;
  2038. goto err_free;
  2039. }
  2040. /* Allocate ring-buffer space at the end of the allocated memory */
  2041. ring = priv->membase;
  2042. ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
  2043. spin_lock_init(&priv->lock);
  2044. dev->ethtool_ops = &rtl838x_ethtool_ops;
  2045. dev->min_mtu = ETH_ZLEN;
  2046. dev->max_mtu = 1536;
  2047. dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
  2048. dev->hw_features = NETIF_F_RXCSUM;
  2049. priv->id = soc_info.id;
  2050. priv->family_id = soc_info.family;
  2051. if (priv->id) {
  2052. pr_info("Found SoC ID: %4x: %s, family %x\n",
  2053. priv->id, soc_info.name, priv->family_id);
  2054. } else {
  2055. pr_err("Unknown chip id (%04x)\n", priv->id);
  2056. return -ENODEV;
  2057. }
  2058. switch (priv->family_id) {
  2059. case RTL8380_FAMILY_ID:
  2060. priv->cpu_port = RTL838X_CPU_PORT;
  2061. priv->r = &rtl838x_reg;
  2062. dev->netdev_ops = &rtl838x_eth_netdev_ops;
  2063. break;
  2064. case RTL8390_FAMILY_ID:
  2065. priv->cpu_port = RTL839X_CPU_PORT;
  2066. priv->r = &rtl839x_reg;
  2067. dev->netdev_ops = &rtl839x_eth_netdev_ops;
  2068. break;
  2069. case RTL9300_FAMILY_ID:
  2070. priv->cpu_port = RTL930X_CPU_PORT;
  2071. priv->r = &rtl930x_reg;
  2072. dev->netdev_ops = &rtl930x_eth_netdev_ops;
  2073. break;
  2074. case RTL9310_FAMILY_ID:
  2075. priv->cpu_port = RTL931X_CPU_PORT;
  2076. priv->r = &rtl931x_reg;
  2077. dev->netdev_ops = &rtl931x_eth_netdev_ops;
  2078. rtl931x_chip_init(priv);
  2079. break;
  2080. default:
  2081. pr_err("Unknown SoC family\n");
  2082. return -ENODEV;
  2083. }
  2084. priv->rxringlen = rxringlen;
  2085. priv->rxrings = rxrings;
  2086. /* Obtain device IRQ number */
  2087. dev->irq = platform_get_irq(pdev, 0);
  2088. if (dev->irq < 0) {
  2089. dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
  2090. goto err_free;
  2091. }
  2092. err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
  2093. IRQF_SHARED, dev->name, dev);
  2094. if (err) {
  2095. dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
  2096. __func__, err);
  2097. goto err_free;
  2098. }
  2099. rtl8380_init_mac(priv);
  2100. /* Try to get mac address in the following order:
  2101. * 1) from device tree data
  2102. * 2) from internal registers set by bootloader
  2103. */
  2104. of_get_mac_address(pdev->dev.of_node, mac_addr);
  2105. if (is_valid_ether_addr(mac_addr)) {
  2106. rtl838x_set_mac_hw(dev, mac_addr);
  2107. } else {
  2108. mac_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
  2109. mac_addr[1] = sw_r32(priv->r->mac) & 0xff;
  2110. mac_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
  2111. mac_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
  2112. mac_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
  2113. mac_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
  2114. }
  2115. dev_addr_set(dev, mac_addr);
  2116. /* if the address is invalid, use a random value */
  2117. if (!is_valid_ether_addr(dev->dev_addr)) {
  2118. struct sockaddr sa = { AF_UNSPEC };
  2119. netdev_warn(dev, "Invalid MAC address, using random\n");
  2120. eth_hw_addr_random(dev);
  2121. memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
  2122. if (rtl838x_set_mac_address(dev, &sa))
  2123. netdev_warn(dev, "Failed to set MAC address.\n");
  2124. }
  2125. pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
  2126. sw_r32(priv->r->mac + 4));
  2127. strcpy(dev->name, "eth%d");
  2128. priv->pdev = pdev;
  2129. priv->netdev = dev;
  2130. err = rtl838x_mdio_init(priv);
  2131. if (err)
  2132. goto err_free;
  2133. err = register_netdev(dev);
  2134. if (err)
  2135. goto err_free;
  2136. for (int i = 0; i < priv->rxrings; i++) {
  2137. priv->rx_qs[i].id = i;
  2138. priv->rx_qs[i].priv = priv;
  2139. netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
  2140. }
  2141. platform_set_drvdata(pdev, dev);
  2142. phy_mode = PHY_INTERFACE_MODE_NA;
  2143. err = of_get_phy_mode(dn, &phy_mode);
  2144. if (err < 0) {
  2145. dev_err(&pdev->dev, "incorrect phy-mode\n");
  2146. err = -EINVAL;
  2147. goto err_free;
  2148. }
  2149. priv->phylink_config.dev = &dev->dev;
  2150. priv->phylink_config.type = PHYLINK_NETDEV;
  2151. phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
  2152. phy_mode, &rtl838x_phylink_ops);
  2153. if (IS_ERR(phylink)) {
  2154. err = PTR_ERR(phylink);
  2155. goto err_free;
  2156. }
  2157. priv->phylink = phylink;
  2158. return 0;
  2159. err_free:
  2160. pr_err("Error setting up netdev, freeing it again.\n");
  2161. free_netdev(dev);
  2162. return err;
  2163. }
  2164. static int rtl838x_eth_remove(struct platform_device *pdev)
  2165. {
  2166. struct net_device *dev = platform_get_drvdata(pdev);
  2167. struct rtl838x_eth_priv *priv = netdev_priv(dev);
  2168. if (dev) {
  2169. pr_info("Removing platform driver for rtl838x-eth\n");
  2170. rtl838x_mdio_remove(priv);
  2171. rtl838x_hw_stop(priv);
  2172. netif_tx_stop_all_queues(dev);
  2173. for (int i = 0; i < priv->rxrings; i++)
  2174. netif_napi_del(&priv->rx_qs[i].napi);
  2175. unregister_netdev(dev);
  2176. free_netdev(dev);
  2177. }
  2178. return 0;
  2179. }
  2180. static const struct of_device_id rtl838x_eth_of_ids[] = {
  2181. { .compatible = "realtek,rtl838x-eth"},
  2182. { /* sentinel */ }
  2183. };
  2184. MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
  2185. static struct platform_driver rtl838x_eth_driver = {
  2186. .probe = rtl838x_eth_probe,
  2187. .remove = rtl838x_eth_remove,
  2188. .driver = {
  2189. .name = "rtl838x-eth",
  2190. .pm = NULL,
  2191. .of_match_table = rtl838x_eth_of_ids,
  2192. },
  2193. };
  2194. module_platform_driver(rtl838x_eth_driver);
  2195. MODULE_AUTHOR("B. Koblitz");
  2196. MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
  2197. MODULE_LICENSE("GPL");