0007-NET-MIPS-lantiq-adds-xrx200-net.patch 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. From 0d37b9ab3cff327b7db083785a89f23944c192f4 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Mon, 22 Oct 2012 12:22:23 +0200
  4. Subject: [PATCH 07/22] NET: MIPS: lantiq: adds xrx200-net
  5. ---
  6. drivers/net/ethernet/Kconfig | 8 +-
  7. drivers/net/ethernet/Makefile | 1 +
  8. drivers/net/ethernet/lantiq_pce.h | 163 +++++
  9. drivers/net/ethernet/lantiq_xrx200.c | 1203 ++++++++++++++++++++++++++++++++++
  10. 4 files changed, 1374 insertions(+), 1 deletion(-)
  11. create mode 100644 drivers/net/ethernet/lantiq_pce.h
  12. create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
  13. --- a/drivers/net/ethernet/Kconfig
  14. +++ b/drivers/net/ethernet/Kconfig
  15. @@ -83,7 +83,13 @@ config LANTIQ_ETOP
  16. tristate "Lantiq SoC ETOP driver"
  17. depends on SOC_TYPE_XWAY
  18. ---help---
  19. - Support for the MII0 inside the Lantiq SoC
  20. + Support for the MII0 inside the Lantiq ADSL SoC
  21. +
  22. +config LANTIQ_XRX200
  23. + tristate "Lantiq SoC XRX200 driver"
  24. + depends on SOC_TYPE_XWAY
  25. + ---help---
  26. + Support for the MII0 inside the Lantiq VDSL SoC
  27. source "drivers/net/ethernet/marvell/Kconfig"
  28. source "drivers/net/ethernet/mellanox/Kconfig"
  29. --- a/drivers/net/ethernet/Makefile
  30. +++ b/drivers/net/ethernet/Makefile
  31. @@ -36,6 +36,7 @@ obj-$(CONFIG_IP1000) += icplus/
  32. obj-$(CONFIG_JME) += jme.o
  33. obj-$(CONFIG_KORINA) += korina.o
  34. obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
  35. +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
  36. obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
  37. obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
  38. obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
  39. --- /dev/null
  40. +++ b/drivers/net/ethernet/lantiq_pce.h
  41. @@ -0,0 +1,163 @@
  42. +/*
  43. + * This program is free software; you can redistribute it and/or modify it
  44. + * under the terms of the GNU General Public License version 2 as published
  45. + * by the Free Software Foundation.
  46. + *
  47. + * This program is distributed in the hope that it will be useful,
  48. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  49. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  50. + * GNU General Public License for more details.
  51. + *
  52. + * You should have received a copy of the GNU General Public License
  53. + * along with this program; if not, write to the Free Software
  54. + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  55. + *
  56. + * Copyright (C) 2010 Lantiq Deutschland GmbH
  57. + * Copyright (C) 2012 John Crispin <[email protected]>
  58. + *
  59. + * PCE microcode extracted from UGW5.2 switch api
  60. + */
  61. +
  62. +/* Switch API Micro Code V0.3 */
  63. +enum {
  64. + OUT_MAC0 = 0,
  65. + OUT_MAC1,
  66. + OUT_MAC2,
  67. + OUT_MAC3,
  68. + OUT_MAC4,
  69. + OUT_MAC5,
  70. + OUT_ETHTYP,
  71. + OUT_VTAG0,
  72. + OUT_VTAG1,
  73. + OUT_ITAG0,
  74. + OUT_ITAG1, /*10 */
  75. + OUT_ITAG2,
  76. + OUT_ITAG3,
  77. + OUT_IP0,
  78. + OUT_IP1,
  79. + OUT_IP2,
  80. + OUT_IP3,
  81. + OUT_SIP0,
  82. + OUT_SIP1,
  83. + OUT_SIP2,
  84. + OUT_SIP3, /*20*/
  85. + OUT_SIP4,
  86. + OUT_SIP5,
  87. + OUT_SIP6,
  88. + OUT_SIP7,
  89. + OUT_DIP0,
  90. + OUT_DIP1,
  91. + OUT_DIP2,
  92. + OUT_DIP3,
  93. + OUT_DIP4,
  94. + OUT_DIP5, /*30*/
  95. + OUT_DIP6,
  96. + OUT_DIP7,
  97. + OUT_SESID,
  98. + OUT_PROT,
  99. + OUT_APP0,
  100. + OUT_APP1,
  101. + OUT_IGMP0,
  102. + OUT_IGMP1,
  103. + OUT_IPOFF, /*39*/
  104. + OUT_NONE = 63
  105. +};
  106. +
  107. +/* parser's microcode length type */
  108. +#define INSTR 0
  109. +#define IPV6 1
  110. +#define LENACCU 2
  111. +
  112. +/* parser's microcode flag type */
  113. +enum {
  114. + FLAG_ITAG = 0,
  115. + FLAG_VLAN,
  116. + FLAG_SNAP,
  117. + FLAG_PPPOE,
  118. + FLAG_IPV6,
  119. + FLAG_IPV6FL,
  120. + FLAG_IPV4,
  121. + FLAG_IGMP,
  122. + FLAG_TU,
  123. + FLAG_HOP,
  124. + FLAG_NN1, /*10 */
  125. + FLAG_NN2,
  126. + FLAG_END,
  127. + FLAG_NO, /*13*/
  128. +};
  129. +
  130. +/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
  131. +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
  132. + { {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
  133. +struct pce_microcode {
  134. + unsigned short val[4];
  135. +/* unsigned short val_2;
  136. + unsigned short val_1;
  137. + unsigned short val_0;*/
  138. +} pce_microcode[] = {
  139. + /* value mask ns fields L type flags ipv4_len */
  140. + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
  141. + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
  142. + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
  143. + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
  144. + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  145. + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  146. + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  147. + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  148. + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
  149. + MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  150. + MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
  151. + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
  152. + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
  153. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
  154. + MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
  155. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
  156. + MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
  157. + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
  158. + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
  159. + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
  160. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
  161. + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
  162. + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
  163. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
  164. + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
  165. + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
  166. + MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
  167. + MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
  168. + MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
  169. + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
  170. + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
  171. + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
  172. + MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
  173. + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
  174. + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
  175. + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
  176. + MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT, 1, IPV6, FLAG_NO, 0),
  177. + MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
  178. + MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
  179. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  180. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  181. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  182. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  183. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  184. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  185. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  186. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  187. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  188. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  189. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  190. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  191. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  192. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  193. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  194. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  195. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  196. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  197. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  198. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  199. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  200. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  201. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  202. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  203. + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
  204. +};
  205. --- /dev/null
  206. +++ b/drivers/net/ethernet/lantiq_xrx200.c
  207. @@ -0,0 +1,1203 @@
  208. +/*
  209. + * This program is free software; you can redistribute it and/or modify it
  210. + * under the terms of the GNU General Public License version 2 as published
  211. + * by the Free Software Foundation.
  212. + *
  213. + * This program is distributed in the hope that it will be useful,
  214. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  215. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  216. + * GNU General Public License for more details.
  217. + *
  218. + * You should have received a copy of the GNU General Public License
  219. + * along with this program; if not, write to the Free Software
  220. + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  221. + *
  222. + * Copyright (C) 2010 Lantiq Deutschland
  223. + * Copyright (C) 2012 John Crispin <[email protected]>
  224. + */
  225. +
  226. +#include <linux/etherdevice.h>
  227. +#include <linux/module.h>
  228. +#include <linux/platform_device.h>
  229. +#include <linux/interrupt.h>
  230. +#include <linux/clk.h>
  231. +#include <asm/delay.h>
  232. +
  233. +#include <linux/of_net.h>
  234. +#include <linux/of_mdio.h>
  235. +#include <linux/of_gpio.h>
  236. +
  237. +#include <xway_dma.h>
  238. +#include <lantiq_soc.h>
  239. +
  240. +#include "lantiq_pce.h"
  241. +
  242. +#define SW_POLLING
  243. +#define SW_ROUTING
  244. +#define SW_PORTMAP
  245. +
  246. +#ifdef SW_ROUTING
  247. + #ifdef SW_PORTMAP
  248. +#define XRX200_MAX_DEV 2
  249. + #else
  250. +#define XRX200_MAX_DEV 2
  251. + #endif
  252. +#else
  253. +#define XRX200_MAX_DEV 1
  254. +#endif
  255. +
  256. +#define XRX200_MAX_PORT 7
  257. +#define XRX200_MAX_DMA 8
  258. +
  259. +#define XRX200_HEADROOM 4
  260. +
  261. +#define XRX200_TX_TIMEOUT (10 * HZ)
  262. +
  263. +/* port type */
  264. +#define XRX200_PORT_TYPE_PHY 1
  265. +#define XRX200_PORT_TYPE_MAC 2
  266. +
  267. +/* DMA */
  268. +#define XRX200_DMA_CRC_LEN 0x4
  269. +#define XRX200_DMA_DATA_LEN 0x600
  270. +#define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
  271. +#define XRX200_DMA_RX 0
  272. +#define XRX200_DMA_TX 1
  273. +#define XRX200_DMA_IS_TX(x) (x%2)
  274. +#define XRX200_DMA_IS_RX(x) (!XRX200_DMA_IS_TX(x))
  275. +
  276. +/* fetch / store dma */
  277. +#define FDMA_PCTRL0 0x2A00
  278. +#define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
  279. +#define SDMA_PCTRL0 0x2F00
  280. +#define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
  281. +
  282. +/* buffer management */
  283. +#define BM_PCFG0 0x200
  284. +#define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
  285. +
  286. +/* MDIO */
  287. +#define MDIO_GLOB 0x0000
  288. +#define MDIO_CTRL 0x0020
  289. +#define MDIO_READ 0x0024
  290. +#define MDIO_WRITE 0x0028
  291. +#define MDIO_PHY0 0x0054
  292. +#define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
  293. +#define MDIO_CLK_CFG0 0x002C
  294. +#define MDIO_CLK_CFG1 0x0030
  295. +
  296. +#define MDIO_GLOB_ENABLE 0x8000
  297. +#define MDIO_BUSY BIT(12)
  298. +#define MDIO_RD BIT(11)
  299. +#define MDIO_WR BIT(10)
  300. +#define MDIO_MASK 0x1f
  301. +#define MDIO_ADDRSHIFT 5
  302. +#define MDIO1_25MHZ 9
  303. +
  304. +#define MDIO_PHY_LINK_DOWN 0x4000
  305. +#define MDIO_PHY_LINK_UP 0x2000
  306. +
  307. +#define MDIO_PHY_SPEED_M10 0x0000
  308. +#define MDIO_PHY_SPEED_M100 0x0800
  309. +#define MDIO_PHY_SPEED_G1 0x1000
  310. +
  311. +#define MDIO_PHY_FDUP_EN 0x0600
  312. +#define MDIO_PHY_FDUP_DIS 0x0200
  313. +
  314. +#define MDIO_PHY_LINK_MASK 0x6000
  315. +#define MDIO_PHY_SPEED_MASK 0x1800
  316. +#define MDIO_PHY_FDUP_MASK 0x0600
  317. +#define MDIO_PHY_ADDR_MASK 0x001f
  318. +#define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
  319. + MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
  320. +
  321. +/* MII */
  322. +#define MII_CFG(p) (p * 8)
  323. +
  324. +#define MII_CFG_EN BIT(14)
  325. +
  326. +#define MII_CFG_MODE_MIIP 0x0
  327. +#define MII_CFG_MODE_MIIM 0x1
  328. +#define MII_CFG_MODE_RMIIP 0x2
  329. +#define MII_CFG_MODE_RMIIM 0x3
  330. +#define MII_CFG_MODE_RGMII 0x4
  331. +#define MII_CFG_MODE_MASK 0xf
  332. +
  333. +#define MII_CFG_RATE_M2P5 0x00
  334. +#define MII_CFG_RATE_M25 0x10
  335. +#define MII_CFG_RATE_M125 0x20
  336. +#define MII_CFG_RATE_M50 0x30
  337. +#define MII_CFG_RATE_AUTO 0x40
  338. +#define MII_CFG_RATE_MASK 0x70
  339. +
  340. +/* cpu port mac */
  341. +#define PMAC_HD_CTL 0x0000
  342. +#define PMAC_RX_IPG 0x0024
  343. +#define PMAC_EWAN 0x002c
  344. +
  345. +#define PMAC_IPG_MASK 0xf
  346. +#define PMAC_HD_CTL_AS 0x0008
  347. +#define PMAC_HD_CTL_AC 0x0004
  348. +#define PMAC_HD_CTL_RXSH 0x0040
  349. +#define PMAC_HD_CTL_AST 0x0080
  350. +#define PMAC_HD_CTL_RST 0x0100
  351. +
  352. +/* PCE */
  353. +#define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
  354. +#define PCE_TBL_MASK 0x1120
  355. +#define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
  356. +#define PCE_TBL_ADDR 0x1138
  357. +#define PCE_TBL_CTRL 0x113c
  358. +#define PCE_PMAP1 0x114c
  359. +#define PCE_PMAP2 0x1150
  360. +#define PCE_PMAP3 0x1154
  361. +#define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
  362. +#define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
  363. +
  364. +#define PCE_TBL_BUSY BIT(15)
  365. +#define PCE_TBL_CFG_ADDR_MASK 0x1f
  366. +#define PCE_TBL_CFG_ADWR 0x20
  367. +#define PCE_TBL_CFG_ADWR_MASK 0x60
  368. +#define PCE_INGRESS BIT(11)
  369. +
  370. +/* MAC */
  371. +#define MAC_FLEN_REG (0x2314)
  372. +#define MAC_CTRL_REG(p, x) (0x240c + (((p * 0xc) + x) * 4))
  373. +
  374. +/* buffer management */
  375. +#define BM_PCFG(p) (0x200 + (p * 8))
  376. +
  377. +/* special tag in TX path header */
  378. +#define SPID_SHIFT 24
  379. +#define DPID_SHIFT 16
  380. +#define DPID_ENABLE 1
  381. +#define SPID_CPU_PORT 2
  382. +#define PORT_MAP_SEL BIT(15)
  383. +#define PORT_MAP_EN BIT(14)
  384. +#define PORT_MAP_SHIFT 1
  385. +#define PORT_MAP_MASK 0x3f
  386. +
  387. +#define SPPID_MASK 0x7
  388. +#define SPPID_SHIFT 4
  389. +
  390. +/* MII regs not yet in linux */
  391. +#define MDIO_DEVAD_NONE (-1)
  392. +#define ADVERTIZE_MPD (1 << 10)
  393. +
  394. +struct xrx200_port {
  395. + u8 num;
  396. + u8 phy_addr;
  397. + u16 flags;
  398. + phy_interface_t phy_if;
  399. +
  400. + int link;
  401. + int gpio;
  402. + enum of_gpio_flags gpio_flags;
  403. +
  404. + struct phy_device *phydev;
  405. + struct device_node *phy_node;
  406. +};
  407. +
  408. +struct xrx200_chan {
  409. + int idx;
  410. + int refcount;
  411. + int tx_free;
  412. +
  413. + struct net_device dummy_dev;
  414. + struct net_device *devs[XRX200_MAX_DEV];
  415. +
  416. + struct tasklet_struct tasklet;
  417. + struct napi_struct napi;
  418. + struct ltq_dma_channel dma;
  419. + struct sk_buff *skb[LTQ_DESC_NUM];
  420. +};
  421. +
  422. +struct xrx200_hw {
  423. + struct clk *clk;
  424. + struct mii_bus *mii_bus;
  425. +
  426. + struct xrx200_chan chan[XRX200_MAX_DMA];
  427. +
  428. + struct net_device *devs[XRX200_MAX_DEV];
  429. + int num_devs;
  430. +
  431. + int port_map[XRX200_MAX_PORT];
  432. + unsigned short wan_map;
  433. +
  434. + spinlock_t lock;
  435. +};
  436. +
  437. +struct xrx200_priv {
  438. + struct net_device_stats stats;
  439. + int id;
  440. +
  441. + struct xrx200_port port[XRX200_MAX_PORT];
  442. + int num_port;
  443. + int wan;
  444. + unsigned short port_map;
  445. + const void *mac;
  446. +
  447. + struct xrx200_hw *hw;
  448. +};
  449. +
  450. +static __iomem void *xrx200_switch_membase;
  451. +static __iomem void *xrx200_mii_membase;
  452. +static __iomem void *xrx200_mdio_membase;
  453. +static __iomem void *xrx200_pmac_membase;
  454. +
  455. +#define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
  456. +#define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
  457. +#define ltq_switch_w32_mask(x, y, z) \
  458. + ltq_w32_mask(x, y, xrx200_switch_membase + (z))
  459. +
  460. +#define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
  461. +#define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
  462. +#define ltq_mdio_w32_mask(x, y, z) \
  463. + ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
  464. +
  465. +#define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
  466. +#define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
  467. +#define ltq_mii_w32_mask(x, y, z) \
  468. + ltq_w32_mask(x, y, xrx200_mii_membase + (z))
  469. +
  470. +#define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
  471. +#define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
  472. +#define ltq_pmac_w32_mask(x, y, z) \
  473. + ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
  474. +
  475. +static int xrx200_open(struct net_device *dev)
  476. +{
  477. + struct xrx200_priv *priv = netdev_priv(dev);
  478. + unsigned long flags;
  479. + int i;
  480. +
  481. + for (i = 0; i < XRX200_MAX_DMA; i++) {
  482. + if (!priv->hw->chan[i].dma.irq)
  483. + continue;
  484. + spin_lock_irqsave(&priv->hw->lock, flags);
  485. + if (!priv->hw->chan[i].refcount) {
  486. + if (XRX200_DMA_IS_RX(i))
  487. + napi_enable(&priv->hw->chan[i].napi);
  488. + ltq_dma_open(&priv->hw->chan[i].dma);
  489. + }
  490. + priv->hw->chan[i].refcount++;
  491. + spin_unlock_irqrestore(&priv->hw->lock, flags);
  492. + }
  493. + for (i = 0; i < priv->num_port; i++)
  494. + if (priv->port[i].phydev)
  495. + phy_start(priv->port[i].phydev);
  496. + netif_start_queue(dev);
  497. +
  498. + return 0;
  499. +}
  500. +
  501. +static int xrx200_close(struct net_device *dev)
  502. +{
  503. + struct xrx200_priv *priv = netdev_priv(dev);
  504. + unsigned long flags;
  505. + int i;
  506. +
  507. + netif_stop_queue(dev);
  508. +
  509. + for (i = 0; i < priv->num_port; i++)
  510. + if (priv->port[i].phydev)
  511. + phy_stop(priv->port[i].phydev);
  512. +
  513. + for (i = 0; i < XRX200_MAX_DMA; i++) {
  514. + if (!priv->hw->chan[i].dma.irq)
  515. + continue;
  516. + spin_lock_irqsave(&priv->hw->lock, flags);
  517. + priv->hw->chan[i].refcount--;
  518. + if (!priv->hw->chan[i].refcount) {
  519. + if (XRX200_DMA_IS_RX(i))
  520. + napi_disable(&priv->hw->chan[i].napi);
  521. + ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
  522. + }
  523. + spin_unlock_irqrestore(&priv->hw->lock, flags);
  524. + }
  525. +
  526. + return 0;
  527. +}
  528. +
  529. +static int xrx200_alloc_skb(struct xrx200_chan *ch)
  530. +{
  531. +#define DMA_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  532. + ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
  533. + if (!ch->skb[ch->dma.desc])
  534. + return -ENOMEM;
  535. +
  536. + skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
  537. + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
  538. + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
  539. + DMA_FROM_DEVICE);
  540. + ch->dma.desc_base[ch->dma.desc].addr =
  541. + CPHYSADDR(ch->skb[ch->dma.desc]->data);
  542. + ch->dma.desc_base[ch->dma.desc].ctl =
  543. + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
  544. + XRX200_DMA_DATA_LEN;
  545. + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
  546. +
  547. + return 0;
  548. +}
  549. +
  550. +static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
  551. +{
  552. + struct net_device *dev = ch->devs[id];
  553. + struct xrx200_priv *priv = netdev_priv(dev);
  554. + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  555. + struct sk_buff *skb = ch->skb[ch->dma.desc];
  556. + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - XRX200_DMA_CRC_LEN;
  557. + unsigned long flags;
  558. +
  559. + spin_lock_irqsave(&priv->hw->lock, flags);
  560. + if (xrx200_alloc_skb(ch)) {
  561. + netdev_err(dev,
  562. + "failed to allocate new rx buffer, stopping DMA\n");
  563. + ltq_dma_close(&ch->dma);
  564. + }
  565. +
  566. + ch->dma.desc++;
  567. + ch->dma.desc %= LTQ_DESC_NUM;
  568. + spin_unlock_irqrestore(&priv->hw->lock, flags);
  569. +
  570. + skb_put(skb, len);
  571. +#ifdef SW_ROUTING
  572. + skb_pull(skb, 8);
  573. +#endif
  574. + skb->dev = dev;
  575. + skb->protocol = eth_type_trans(skb, dev);
  576. + netif_receive_skb(skb);
  577. + priv->stats.rx_packets++;
  578. + priv->stats.rx_bytes+=len;
  579. +}
  580. +
  581. +static int xrx200_poll_rx(struct napi_struct *napi, int budget)
  582. +{
  583. + struct xrx200_chan *ch = container_of(napi,
  584. + struct xrx200_chan, napi);
  585. + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
  586. + int rx = 0;
  587. + int complete = 0;
  588. + unsigned long flags;
  589. +
  590. + while ((rx < budget) && !complete) {
  591. + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  592. + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  593. +#ifdef SW_ROUTING
  594. + struct sk_buff *skb = ch->skb[ch->dma.desc];
  595. + u32 *special_tag = (u32*)skb->data;
  596. + int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
  597. + xrx200_hw_receive(ch, priv->hw->port_map[port]);
  598. +#else
  599. + xrx200_hw_receive(ch, 0);
  600. +#endif
  601. + rx++;
  602. + } else {
  603. + complete = 1;
  604. + }
  605. + }
  606. + if (complete || !rx) {
  607. + napi_complete(&ch->napi);
  608. + spin_lock_irqsave(&priv->hw->lock, flags);
  609. + ltq_dma_ack_irq(&ch->dma);
  610. + spin_unlock_irqrestore(&priv->hw->lock, flags);
  611. + }
  612. + return rx;
  613. +}
  614. +
  615. +static void xrx200_tx_housekeeping(unsigned long ptr)
  616. +{
  617. + struct xrx200_hw *hw = (struct xrx200_hw *) ptr;
  618. + struct xrx200_chan *ch = &hw->chan[XRX200_DMA_TX];
  619. + unsigned long flags;
  620. + int i;
  621. +
  622. + spin_lock_irqsave(&hw->lock, flags);
  623. + while ((ch->dma.desc_base[ch->tx_free].ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  624. + dev_kfree_skb_any(ch->skb[ch->tx_free]);
  625. + ch->skb[ch->tx_free] = NULL;
  626. + memset(&ch->dma.desc_base[ch->tx_free], 0,
  627. + sizeof(struct ltq_dma_desc));
  628. + ch->tx_free++;
  629. + ch->tx_free %= LTQ_DESC_NUM;
  630. + }
  631. + spin_unlock_irqrestore(&hw->lock, flags);
  632. +
  633. + for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++) {
  634. + struct netdev_queue *txq =
  635. + netdev_get_tx_queue(ch->devs[i], 0);
  636. + if (netif_tx_queue_stopped(txq))
  637. + netif_tx_start_queue(txq);
  638. + }
  639. +
  640. + spin_lock_irqsave(&hw->lock, flags);
  641. + ltq_dma_ack_irq(&ch->dma);
  642. + spin_unlock_irqrestore(&hw->lock, flags);
  643. +}
  644. +
  645. +static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
  646. +{
  647. + struct xrx200_priv *priv = netdev_priv(dev);
  648. +
  649. + return &priv->stats;
  650. +}
  651. +
  652. +static void xrx200_tx_timeout(struct net_device *dev)
  653. +{
  654. + struct xrx200_priv *priv = netdev_priv(dev);
  655. +
  656. + printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
  657. +
  658. + priv->stats.tx_errors++;
  659. + netif_wake_queue(dev);
  660. +}
  661. +
  662. +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
  663. +{
  664. + int queue = skb_get_queue_mapping(skb);
  665. + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
  666. + struct xrx200_priv *priv = netdev_priv(dev);
  667. + struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
  668. + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  669. + unsigned long flags;
  670. + u32 byte_offset;
  671. + int len;
  672. +#ifdef SW_ROUTING
  673. + #ifdef SW_PORTMAP
  674. + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | PORT_MAP_SEL | PORT_MAP_EN | DPID_ENABLE;
  675. + #else
  676. + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
  677. + #endif
  678. +#endif
  679. +
  680. + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  681. +
  682. + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  683. + netdev_err(dev, "tx ring full\n");
  684. + netif_tx_stop_queue(txq);
  685. + return NETDEV_TX_BUSY;
  686. + }
  687. +#ifdef SW_ROUTING
  688. + #ifdef SW_PORTMAP
  689. + special_tag |= priv->port_map << PORT_MAP_SHIFT;
  690. + #else
  691. + if(priv->id)
  692. + special_tag |= (1 << DPID_SHIFT);
  693. + #endif
  694. + if(skb_headroom(skb) < 4) {
  695. + struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
  696. + dev_kfree_skb_any(skb);
  697. + skb = tmp;
  698. + }
  699. + skb_push(skb, 4);
  700. + memcpy(skb->data, &special_tag, sizeof(u32));
  701. + len += 4;
  702. +#endif
  703. +
  704. + /* dma needs to start on a 16 byte aligned address */
  705. + byte_offset = CPHYSADDR(skb->data) % 16;
  706. + ch->skb[ch->dma.desc] = skb;
  707. +
  708. + dev->trans_start = jiffies;
  709. +
  710. + spin_lock_irqsave(&priv->hw->lock, flags);
  711. + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
  712. + DMA_TO_DEVICE)) - byte_offset;
  713. + wmb();
  714. + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  715. + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
  716. + ch->dma.desc++;
  717. + ch->dma.desc %= LTQ_DESC_NUM;
  718. + spin_unlock_irqrestore(&priv->hw->lock, flags);
  719. +
  720. + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
  721. + netif_tx_stop_queue(txq);
  722. +
  723. + priv->stats.tx_packets++;
  724. + priv->stats.tx_bytes+=len;
  725. +
  726. + return NETDEV_TX_OK;
  727. +}
  728. +
  729. +static irqreturn_t xrx200_dma_irq(int irq, void *priv)
  730. +{
  731. + struct xrx200_hw *hw = priv;
  732. + int ch = irq - XRX200_DMA_IRQ;
  733. +
  734. + if (ch % 2)
  735. + tasklet_schedule(&hw->chan[ch].tasklet);
  736. + else
  737. + napi_schedule(&hw->chan[ch].napi);
  738. +
  739. + return IRQ_HANDLED;
  740. +}
  741. +
  742. +static int xrx200_dma_init(struct xrx200_hw *hw)
  743. +{
  744. + int i, err = 0;
  745. +
  746. + ltq_dma_init_port(DMA_PORT_ETOP);
  747. +
  748. + for (i = 0; i < 8 && !err; i++) {
  749. + int irq = XRX200_DMA_IRQ + i;
  750. + struct xrx200_chan *ch = &hw->chan[i];
  751. +
  752. + ch->idx = ch->dma.nr = i;
  753. +
  754. + if (i == XRX200_DMA_TX) {
  755. + ltq_dma_alloc_tx(&ch->dma);
  756. + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
  757. + } else if (i == XRX200_DMA_RX) {
  758. + ltq_dma_alloc_rx(&ch->dma);
  759. + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
  760. + ch->dma.desc++)
  761. + if (xrx200_alloc_skb(ch))
  762. + err = -ENOMEM;
  763. + ch->dma.desc = 0;
  764. + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
  765. + } else
  766. + continue;
  767. +
  768. + if (!err)
  769. + ch->dma.irq = irq;
  770. + }
  771. +
  772. + return err;
  773. +}
  774. +
  775. +#ifdef SW_POLLING
  776. +static void xrx200_gmac_update(struct xrx200_port *port)
  777. +{
  778. + u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
  779. + u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
  780. + u16 miirate = 0;
  781. +
  782. + switch (port->phydev->speed) {
  783. + case SPEED_1000:
  784. + phyaddr |= MDIO_PHY_SPEED_G1;
  785. + miirate = MII_CFG_RATE_M125;
  786. + break;
  787. +
  788. + case SPEED_100:
  789. + phyaddr |= MDIO_PHY_SPEED_M100;
  790. + switch (miimode) {
  791. + case MII_CFG_MODE_RMIIM:
  792. + case MII_CFG_MODE_RMIIP:
  793. + miirate = MII_CFG_RATE_M50;
  794. + break;
  795. + default:
  796. + miirate = MII_CFG_RATE_M25;
  797. + break;
  798. + }
  799. + break;
  800. +
  801. + default:
  802. + phyaddr |= MDIO_PHY_SPEED_M10;
  803. + miirate = MII_CFG_RATE_M2P5;
  804. + break;
  805. + }
  806. +
  807. + if (port->phydev->link)
  808. + phyaddr |= MDIO_PHY_LINK_UP;
  809. + else
  810. + phyaddr |= MDIO_PHY_LINK_DOWN;
  811. +
  812. + if (port->phydev->duplex == DUPLEX_FULL)
  813. + phyaddr |= MDIO_PHY_FDUP_EN;
  814. + else
  815. + phyaddr |= MDIO_PHY_FDUP_DIS;
  816. +
  817. + ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
  818. + ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
  819. + udelay(1);
  820. +}
  821. +#else
  822. +static void xrx200_gmac_update(struct xrx200_port *port)
  823. +{
  824. +
  825. +}
  826. +#endif
  827. +
  828. +static void xrx200_mdio_link(struct net_device *dev)
  829. +{
  830. + struct xrx200_priv *priv = netdev_priv(dev);
  831. + int i;
  832. +
  833. + for (i = 0; i < priv->num_port; i++) {
  834. + if (!priv->port[i].phydev)
  835. + continue;
  836. +
  837. + if (priv->port[i].link != priv->port[i].phydev->link) {
  838. + xrx200_gmac_update(&priv->port[i]);
  839. + priv->port[i].link = priv->port[i].phydev->link;
  840. + netdev_info(dev, "port %d %s link\n",
  841. + priv->port[i].num,
  842. + (priv->port[i].link)?("got"):("lost"));
  843. + }
  844. + }
  845. +}
  846. +
  847. +static inline int xrx200_mdio_poll(struct mii_bus *bus)
  848. +{
  849. + unsigned cnt = 10000;
  850. +
  851. + while (likely(cnt--)) {
  852. + unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
  853. + if ((ctrl & MDIO_BUSY) == 0)
  854. + return 0;
  855. + }
  856. +
  857. + return 1;
  858. +}
  859. +
  860. +static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
  861. +{
  862. + if (xrx200_mdio_poll(bus))
  863. + return 1;
  864. +
  865. + ltq_mdio_w32(val, MDIO_WRITE);
  866. + ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
  867. + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
  868. + (reg & MDIO_MASK),
  869. + MDIO_CTRL);
  870. +
  871. + return 0;
  872. +}
  873. +
  874. +static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
  875. +{
  876. + if (xrx200_mdio_poll(bus))
  877. + return -1;
  878. +
  879. + ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
  880. + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
  881. + (reg & MDIO_MASK),
  882. + MDIO_CTRL);
  883. +
  884. + if (xrx200_mdio_poll(bus))
  885. + return -1;
  886. +
  887. + return ltq_mdio_r32(MDIO_READ);
  888. +}
  889. +
  890. +static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
  891. +{
  892. + struct xrx200_priv *priv = netdev_priv(dev);
  893. + struct phy_device *phydev = NULL;
  894. + unsigned val;
  895. +
  896. + phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
  897. +
  898. + if (!phydev) {
  899. + netdev_err(dev, "no PHY found\n");
  900. + return -ENODEV;
  901. + }
  902. +
  903. + phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
  904. + 0, port->phy_if);
  905. +
  906. + if (IS_ERR(phydev)) {
  907. + netdev_err(dev, "Could not attach to PHY\n");
  908. + return PTR_ERR(phydev);
  909. + }
  910. +
  911. + phydev->supported &= (SUPPORTED_10baseT_Half
  912. + | SUPPORTED_10baseT_Full
  913. + | SUPPORTED_100baseT_Half
  914. + | SUPPORTED_100baseT_Full
  915. + | SUPPORTED_1000baseT_Half
  916. + | SUPPORTED_1000baseT_Full
  917. + | SUPPORTED_Autoneg
  918. + | SUPPORTED_MII
  919. + | SUPPORTED_TP);
  920. + phydev->advertising = phydev->supported;
  921. + port->phydev = phydev;
  922. +
  923. + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
  924. + dev->name, phydev->drv->name,
  925. + dev_name(&phydev->dev), phydev->irq);
  926. +
  927. +#ifdef SW_POLLING
  928. + phy_read_status(phydev);
  929. +
  930. + val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
  931. + val |= ADVERTIZE_MPD;
  932. + xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
  933. + xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
  934. +
  935. + phy_start_aneg(phydev);
  936. +#endif
  937. + return 0;
  938. +}
  939. +
  940. +static void xrx200_port_config(struct xrx200_priv *priv,
  941. + const struct xrx200_port *port)
  942. +{
  943. + u16 miimode = 0;
  944. +
  945. + switch (port->num) {
  946. + case 0: /* xMII0 */
  947. + case 1: /* xMII1 */
  948. + switch (port->phy_if) {
  949. + case PHY_INTERFACE_MODE_MII:
  950. + if (port->flags & XRX200_PORT_TYPE_PHY)
  951. + /* MII MAC mode, connected to external PHY */
  952. + miimode = MII_CFG_MODE_MIIM;
  953. + else
  954. + /* MII PHY mode, connected to external MAC */
  955. + miimode = MII_CFG_MODE_MIIP;
  956. + break;
  957. + case PHY_INTERFACE_MODE_RMII:
  958. + if (port->flags & XRX200_PORT_TYPE_PHY)
  959. + /* RMII MAC mode, connected to external PHY */
  960. + miimode = MII_CFG_MODE_RMIIM;
  961. + else
  962. + /* RMII PHY mode, connected to external MAC */
  963. + miimode = MII_CFG_MODE_RMIIP;
  964. + break;
  965. + case PHY_INTERFACE_MODE_RGMII:
  966. + /* RGMII MAC mode, connected to external PHY */
  967. + miimode = MII_CFG_MODE_RGMII;
  968. + break;
  969. + default:
  970. + break;
  971. + }
  972. + break;
  973. + case 2: /* internal GPHY0 */
  974. + case 3: /* internal GPHY0 */
  975. + case 4: /* internal GPHY1 */
  976. + switch (port->phy_if) {
  977. + case PHY_INTERFACE_MODE_MII:
  978. + case PHY_INTERFACE_MODE_GMII:
  979. + /* MII MAC mode, connected to internal GPHY */
  980. + miimode = MII_CFG_MODE_MIIM;
  981. + break;
  982. + default:
  983. + break;
  984. + }
  985. + break;
  986. + case 5: /* internal GPHY1 or xMII2 */
  987. + switch (port->phy_if) {
  988. + case PHY_INTERFACE_MODE_MII:
  989. + /* MII MAC mode, connected to internal GPHY */
  990. + miimode = MII_CFG_MODE_MIIM;
  991. + break;
  992. + case PHY_INTERFACE_MODE_RGMII:
  993. + /* RGMII MAC mode, connected to external PHY */
  994. + miimode = MII_CFG_MODE_RGMII;
  995. + break;
  996. + default:
  997. + break;
  998. + }
  999. + break;
  1000. + default:
  1001. + break;
  1002. + }
  1003. +
  1004. + ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
  1005. + MII_CFG(port->num));
  1006. +}
  1007. +
  1008. +static int xrx200_init(struct net_device *dev)
  1009. +{
  1010. + struct xrx200_priv *priv = netdev_priv(dev);
  1011. + struct sockaddr mac;
  1012. + int err, i;
  1013. +
  1014. +#ifndef SW_POLLING
  1015. + unsigned int reg = 0;
  1016. +
  1017. + /* enable auto polling */
  1018. + for (i = 0; i < priv->num_port; i++)
  1019. + reg |= BIT(priv->port[i].num);
  1020. + ltq_mdio_w32(reg, MDIO_CLK_CFG0);
  1021. + ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
  1022. +#endif
  1023. +
  1024. + /* setup each port */
  1025. + for (i = 0; i < priv->num_port; i++)
  1026. + xrx200_port_config(priv, &priv->port[i]);
  1027. +
  1028. + memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
  1029. + if (!is_valid_ether_addr(mac.sa_data)) {
  1030. + pr_warn("net-xrx200: invalid MAC, using random\n");
  1031. + eth_random_addr(mac.sa_data);
  1032. + dev->addr_assign_type |= NET_ADDR_RANDOM;
  1033. + }
  1034. +
  1035. + err = eth_mac_addr(dev, &mac);
  1036. + if (err)
  1037. + goto err_netdev;
  1038. +
  1039. + for (i = 0; i < priv->num_port; i++)
  1040. + if (xrx200_mdio_probe(dev, &priv->port[i]))
  1041. + pr_warn("xrx200-mdio: probing phy of port %d failed\n",
  1042. + priv->port[i].num);
  1043. +
  1044. + return 0;
  1045. +
  1046. +err_netdev:
  1047. + unregister_netdev(dev);
  1048. + free_netdev(dev);
  1049. + return err;
  1050. +}
  1051. +
  1052. +static void xrx200_pci_microcode(void)
  1053. +{
  1054. + int i;
  1055. +
  1056. + ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
  1057. + PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
  1058. + ltq_switch_w32(0, PCE_TBL_MASK);
  1059. +
  1060. + for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
  1061. + ltq_switch_w32(i, PCE_TBL_ADDR);
  1062. + ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
  1063. + ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
  1064. + ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
  1065. + ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
  1066. +
  1067. + // start the table access:
  1068. + ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
  1069. + while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
  1070. + }
  1071. +
  1072. + /* tell the switch that the microcode is loaded */
  1073. + ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
  1074. +}
  1075. +
  1076. +static void xrx200_hw_init(struct xrx200_hw *hw)
  1077. +{
  1078. + int i;
  1079. +
  1080. + /* enable clock gate */
  1081. + clk_enable(hw->clk);
  1082. +
  1083. + ltq_switch_w32(1, 0);
  1084. + mdelay(100);
  1085. + ltq_switch_w32(0, 0);
  1086. + /*
  1087. + * TODO: we should really disbale all phys/miis here and explicitly
  1088. + * enable them in the device secific init function
  1089. + */
  1090. +
  1091. + /* disable port fetch/store dma */
  1092. + for (i = 0; i < 7; i++ ) {
  1093. + ltq_switch_w32(0, FDMA_PCTRLx(i));
  1094. + ltq_switch_w32(0, SDMA_PCTRLx(i));
  1095. + }
  1096. +
  1097. + /* enable Switch */
  1098. + ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
  1099. +
  1100. + /* load the pce microcode */
  1101. + xrx200_pci_microcode();
  1102. +
  1103. + /* Default unknown Broadcat/Multicast/Unicast port maps */
  1104. + ltq_switch_w32(0x7f, PCE_PMAP1);
  1105. + ltq_switch_w32(0x7f, PCE_PMAP2);
  1106. + ltq_switch_w32(0x7f, PCE_PMAP3);
  1107. +
  1108. + /* RMON Counter Enable for all physical ports */
  1109. + for (i = 0; i < 7; i++)
  1110. + ltq_switch_w32(0x1, BM_PCFG(i));
  1111. +
  1112. + /* disable auto polling */
  1113. + ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
  1114. +
  1115. + /* enable port statistic counters */
  1116. + for (i = 0; i < 7; i++)
  1117. + ltq_switch_w32(0x1, BM_PCFGx(i));
  1118. +
  1119. + /* set IPG to 12 */
  1120. + ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
  1121. +
  1122. +#ifdef SW_ROUTING
  1123. + /* enable status header, enable CRC */
  1124. + ltq_pmac_w32_mask(0,
  1125. + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC,
  1126. + PMAC_HD_CTL);
  1127. +#else
  1128. + /* disable status header, enable CRC */
  1129. + ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
  1130. + PMAC_HD_CTL_AC,
  1131. + PMAC_HD_CTL);
  1132. +#endif
  1133. +
  1134. + /* enable port fetch/store dma */
  1135. + for (i = 0; i < 7; i++ ) {
  1136. + ltq_switch_w32_mask(0, 0x01, FDMA_PCTRLx(i));
  1137. + ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
  1138. + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
  1139. + }
  1140. +
  1141. + /* enable special tag insertion on cpu port */
  1142. + ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
  1143. + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
  1144. + ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
  1145. + ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
  1146. +}
  1147. +
  1148. +static void xrx200_hw_cleanup(struct xrx200_hw *hw)
  1149. +{
  1150. + int i;
  1151. +
  1152. + /* disable the switch */
  1153. + ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
  1154. +
  1155. + /* free the channels and IRQs */
  1156. + for (i = 0; i < 2; i++) {
  1157. + ltq_dma_free(&hw->chan[i].dma);
  1158. + if (hw->chan[i].dma.irq)
  1159. + free_irq(hw->chan[i].dma.irq, hw);
  1160. + }
  1161. +
  1162. + /* free the allocated RX ring */
  1163. + for (i = 0; i < LTQ_DESC_NUM; i++)
  1164. + dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
  1165. +
  1166. + /* clear the mdio bus */
  1167. + mdiobus_unregister(hw->mii_bus);
  1168. + mdiobus_free(hw->mii_bus);
  1169. +
  1170. + /* release the clock */
  1171. + clk_disable(hw->clk);
  1172. + clk_put(hw->clk);
  1173. +}
  1174. +
  1175. +static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
  1176. +{
  1177. + hw->mii_bus = mdiobus_alloc();
  1178. + if (!hw->mii_bus)
  1179. + return -ENOMEM;
  1180. +
  1181. + hw->mii_bus->read = xrx200_mdio_rd;
  1182. + hw->mii_bus->write = xrx200_mdio_wr;
  1183. + hw->mii_bus->name = "lantiq,xrx200-mdio";
  1184. + snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
  1185. +
  1186. + if (of_mdiobus_register(hw->mii_bus, np)) {
  1187. + mdiobus_free(hw->mii_bus);
  1188. + return -ENXIO;
  1189. + }
  1190. +
  1191. + return 0;
  1192. +}
  1193. +
  1194. +static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
  1195. +{
  1196. + const __be32 *addr, *id = of_get_property(port, "reg", NULL);
  1197. + struct xrx200_port *p = &priv->port[priv->num_port];
  1198. +
  1199. + if (!id)
  1200. + return;
  1201. +
  1202. + memset(p, 0, sizeof(struct xrx200_port));
  1203. + p->phy_node = of_parse_phandle(port, "phy-handle", 0);
  1204. + addr = of_get_property(p->phy_node, "reg", NULL);
  1205. + if (!addr)
  1206. + return;
  1207. +
  1208. + p->num = *id;
  1209. + p->phy_addr = *addr;
  1210. + p->phy_if = of_get_phy_mode(port);
  1211. + if (p->phy_addr > 0x10)
  1212. + p->flags = XRX200_PORT_TYPE_MAC;
  1213. + else
  1214. + p->flags = XRX200_PORT_TYPE_PHY;
  1215. + priv->num_port++;
  1216. +
  1217. + p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
  1218. + if (gpio_is_valid(p->gpio))
  1219. + if (!gpio_request(p->gpio, "phy-reset")) {
  1220. + gpio_direction_output(p->gpio,
  1221. + (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
  1222. + udelay(100);
  1223. + gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
  1224. + }
  1225. + /* is this port a wan port ? */
  1226. + if (priv->wan)
  1227. + priv->hw->wan_map |= BIT(p->num);
  1228. +
  1229. + priv->port_map |= BIT(p->num);
  1230. +
  1231. + /* store the port id in the hw struct so we can map ports -> devices */
  1232. + priv->hw->port_map[p->num] = priv->hw->num_devs;
  1233. +}
  1234. +
  1235. +static const struct net_device_ops xrx200_netdev_ops = {
  1236. + .ndo_init = xrx200_init,
  1237. + .ndo_open = xrx200_open,
  1238. + .ndo_stop = xrx200_close,
  1239. + .ndo_start_xmit = xrx200_start_xmit,
  1240. + .ndo_set_mac_address = eth_mac_addr,
  1241. + .ndo_validate_addr = eth_validate_addr,
  1242. + .ndo_change_mtu = eth_change_mtu,
  1243. + .ndo_get_stats = xrx200_get_stats,
  1244. + .ndo_tx_timeout = xrx200_tx_timeout,
  1245. +};
  1246. +
  1247. +static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
  1248. +{
  1249. + struct xrx200_priv *priv;
  1250. + struct device_node *port;
  1251. + const __be32 *wan;
  1252. +
  1253. + /* alloc the network device */
  1254. + hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
  1255. + if (!hw->devs[hw->num_devs])
  1256. + return;
  1257. +
  1258. + /* setup the network device */
  1259. + strcpy(hw->devs[hw->num_devs]->name, "eth%d");
  1260. + hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
  1261. + hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
  1262. + hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
  1263. +
  1264. + /* setup our private data */
  1265. + priv = netdev_priv(hw->devs[hw->num_devs]);
  1266. + priv->hw = hw;
  1267. + priv->mac = of_get_mac_address(iface);
  1268. + priv->id = hw->num_devs;
  1269. +
  1270. + /* is this the wan interface ? */
  1271. + wan = of_get_property(iface, "lantiq,wan", NULL);
  1272. + if (wan && (*wan == 1))
  1273. + priv->wan = 1;
  1274. +
  1275. + /* load the ports that are part of the interface */
  1276. + for_each_child_of_node(iface, port)
  1277. + if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
  1278. + xrx200_of_port(priv, port);
  1279. +
  1280. + /* register the actual device */
  1281. + if (!register_netdev(hw->devs[hw->num_devs]))
  1282. + hw->num_devs++;
  1283. +}
  1284. +
  1285. +static struct xrx200_hw xrx200_hw;
  1286. +
  1287. +static int xrx200_probe(struct platform_device *pdev)
  1288. +{
  1289. + struct resource *res[4];
  1290. + struct device_node *mdio_np, *iface_np;
  1291. + int i;
  1292. +
  1293. + /* load the memory ranges */
  1294. + for (i = 0; i < 4; i++) {
  1295. + res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
  1296. + if (!res[i]) {
  1297. + dev_err(&pdev->dev, "failed to get resources\n");
  1298. + return -ENOENT;
  1299. + }
  1300. + }
  1301. + xrx200_switch_membase = devm_request_and_ioremap(&pdev->dev, res[0]);
  1302. + xrx200_mdio_membase = devm_request_and_ioremap(&pdev->dev, res[1]);
  1303. + xrx200_mii_membase = devm_request_and_ioremap(&pdev->dev, res[2]);
  1304. + xrx200_pmac_membase = devm_request_and_ioremap(&pdev->dev, res[3]);
  1305. + if (!xrx200_switch_membase || !xrx200_mdio_membase ||
  1306. + !xrx200_mii_membase || !xrx200_pmac_membase) {
  1307. + dev_err(&pdev->dev, "failed to request and remap io ranges \n");
  1308. + return -ENOMEM;
  1309. + }
  1310. +
  1311. + /* get the clock */
  1312. + xrx200_hw.clk = clk_get(&pdev->dev, NULL);
  1313. + if (IS_ERR(xrx200_hw.clk)) {
  1314. + dev_err(&pdev->dev, "failed to get clock\n");
  1315. + return PTR_ERR(xrx200_hw.clk);
  1316. + }
  1317. +
  1318. + /* bring up the dma engine and IP core */
  1319. + spin_lock_init(&xrx200_hw.lock);
  1320. + xrx200_dma_init(&xrx200_hw);
  1321. + xrx200_hw_init(&xrx200_hw);
  1322. + tasklet_init(&xrx200_hw.chan[XRX200_DMA_TX].tasklet, xrx200_tx_housekeeping, (u32) &xrx200_hw);
  1323. +
  1324. + /* bring up the mdio bus */
  1325. + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
  1326. + "lantiq,xrx200-mdio");
  1327. + if (mdio_np)
  1328. + if (xrx200_of_mdio(&xrx200_hw, mdio_np))
  1329. + dev_err(&pdev->dev, "mdio probe failed\n");
  1330. +
  1331. + /* load the interfaces */
  1332. + for_each_child_of_node(pdev->dev.of_node, iface_np)
  1333. + if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
  1334. + if (xrx200_hw.num_devs < XRX200_MAX_DEV)
  1335. + xrx200_of_iface(&xrx200_hw, iface_np);
  1336. + else
  1337. + dev_err(&pdev->dev,
  1338. + "only %d interfaces allowed\n",
  1339. + XRX200_MAX_DEV);
  1340. + }
  1341. +
  1342. + if (!xrx200_hw.num_devs) {
  1343. + xrx200_hw_cleanup(&xrx200_hw);
  1344. + dev_err(&pdev->dev, "failed to load interfaces\n");
  1345. + return -ENOENT;
  1346. + }
  1347. +
  1348. + /* set wan port mask */
  1349. + ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
  1350. +
  1351. + for (i = 0; i < xrx200_hw.num_devs; i++) {
  1352. + xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
  1353. + xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
  1354. + }
  1355. +
  1356. + /* setup NAPI */
  1357. + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
  1358. + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
  1359. + &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
  1360. +
  1361. + platform_set_drvdata(pdev, &xrx200_hw);
  1362. +
  1363. + return 0;
  1364. +}
  1365. +
  1366. +static int xrx200_remove(struct platform_device *pdev)
  1367. +{
  1368. + struct net_device *dev = platform_get_drvdata(pdev);
  1369. + struct xrx200_priv *priv;
  1370. +
  1371. + if (!dev)
  1372. + return 0;
  1373. +
  1374. + priv = netdev_priv(dev);
  1375. +
  1376. + /* free stack related instances */
  1377. + netif_stop_queue(dev);
  1378. + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
  1379. +
  1380. + /* shut down hardware */
  1381. + xrx200_hw_cleanup(&xrx200_hw);
  1382. +
  1383. + /* remove the actual device */
  1384. + unregister_netdev(dev);
  1385. + free_netdev(dev);
  1386. +
  1387. + return 0;
  1388. +}
  1389. +
  1390. +static const struct of_device_id xrx200_match[] = {
  1391. + { .compatible = "lantiq,xrx200-net" },
  1392. + {},
  1393. +};
  1394. +MODULE_DEVICE_TABLE(of, xrx200_match);
  1395. +
  1396. +static struct platform_driver xrx200_driver = {
  1397. + .probe = xrx200_probe,
  1398. + .remove = xrx200_remove,
  1399. + .driver = {
  1400. + .name = "lantiq,xrx200-net",
  1401. + .of_match_table = xrx200_match,
  1402. + .owner = THIS_MODULE,
  1403. + },
  1404. +};
  1405. +
  1406. +module_platform_driver(xrx200_driver);
  1407. +
  1408. +MODULE_AUTHOR("John Crispin <[email protected]>");
  1409. +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
  1410. +MODULE_LICENSE("GPL");