0026-NET-MIPS-lantiq-update-etop-driver-for-devicetree.patch 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. From 32010516999c75d8e8ea95779137438f4f6d06ae Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Wed, 13 Mar 2013 09:32:16 +0100
  4. Subject: [PATCH 26/40] NET: MIPS: lantiq: update etop driver for devicetree
  5. ---
  6. drivers/net/ethernet/lantiq_etop.c | 496 +++++++++++++++++++++++++-----------
  7. 1 file changed, 351 insertions(+), 145 deletions(-)
  8. --- a/drivers/net/ethernet/lantiq_etop.c
  9. +++ b/drivers/net/ethernet/lantiq_etop.c
  10. @@ -12,7 +12,7 @@
  11. * along with this program; if not, write to the Free Software
  12. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  13. *
  14. - * Copyright (C) 2011 John Crispin <[email protected]>
  15. + * Copyright (C) 2011-12 John Crispin <[email protected]>
  16. */
  17. #include <linux/kernel.h>
  18. @@ -36,6 +36,10 @@
  19. #include <linux/io.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/module.h>
  22. +#include <linux/clk.h>
  23. +#include <linux/of_net.h>
  24. +#include <linux/of_irq.h>
  25. +#include <linux/of_platform.h>
  26. #include <asm/checksum.h>
  27. @@ -71,25 +75,61 @@
  28. #define ETOP_MII_REVERSE 0xe
  29. #define ETOP_PLEN_UNDER 0x40
  30. #define ETOP_CGEN 0x800
  31. +#define ETOP_CFG_MII0 0x01
  32. -/* use 2 static channels for TX/RX */
  33. -#define LTQ_ETOP_TX_CHANNEL 1
  34. -#define LTQ_ETOP_RX_CHANNEL 6
  35. -#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
  36. -#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
  37. +#define LTQ_GBIT_MDIO_CTL 0xCC
  38. +#define LTQ_GBIT_MDIO_DATA 0xd0
  39. +#define LTQ_GBIT_GCTL0 0x68
  40. +#define LTQ_GBIT_PMAC_HD_CTL 0x8c
  41. +#define LTQ_GBIT_P0_CTL 0x4
  42. +#define LTQ_GBIT_PMAC_RX_IPG 0xa8
  43. +#define LTQ_GBIT_RGMII_CTL 0x78
  44. +
  45. +#define PMAC_HD_CTL_AS (1 << 19)
  46. +#define PMAC_HD_CTL_RXSH (1 << 22)
  47. +
  48. +/* Switch Enable (0=disable, 1=enable) */
  49. +#define GCTL0_SE 0x80000000
  50. +/* Disable MDIO auto polling (0=disable, 1=enable) */
  51. +#define PX_CTL_DMDIO 0x00400000
  52. +
  53. +/* MDC clock divider, clock = 25MHz/((MDC_CLOCK + 1) * 2) */
  54. +#define MDC_CLOCK_MASK 0xff000000
  55. +#define MDC_CLOCK_OFFSET 24
  56. +
  57. +/* register information for the gbit's MDIO bus */
  58. +#define MDIO_XR9_REQUEST 0x00008000
  59. +#define MDIO_XR9_READ 0x00000800
  60. +#define MDIO_XR9_WRITE 0x00000400
  61. +#define MDIO_XR9_REG_MASK 0x1f
  62. +#define MDIO_XR9_ADDR_MASK 0x1f
  63. +#define MDIO_XR9_RD_MASK 0xffff
  64. +#define MDIO_XR9_REG_OFFSET 0
  65. +#define MDIO_XR9_ADDR_OFFSET 5
  66. +#define MDIO_XR9_WR_OFFSET 16
  67. +#define LTQ_DMA_ETOP ((of_machine_is_compatible("lantiq,ase")) ? \
  68. + (INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
  69. +
  70. +/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
  71. #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
  72. #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
  73. #define ltq_etop_w32_mask(x, y, z) \
  74. ltq_w32_mask(x, y, ltq_etop_membase + (z))
  75. -#define DRV_VERSION "1.0"
  76. +#define ltq_gbit_r32(x) ltq_r32(ltq_gbit_membase + (x))
  77. +#define ltq_gbit_w32(x, y) ltq_w32(x, ltq_gbit_membase + (y))
  78. +#define ltq_gbit_w32_mask(x, y, z) \
  79. + ltq_w32_mask(x, y, ltq_gbit_membase + (z))
  80. +
  81. +#define DRV_VERSION "1.2"
  82. static void __iomem *ltq_etop_membase;
  83. +static void __iomem *ltq_gbit_membase;
  84. struct ltq_etop_chan {
  85. - int idx;
  86. int tx_free;
  87. + int irq;
  88. struct net_device *netdev;
  89. struct napi_struct napi;
  90. struct ltq_dma_channel dma;
  91. @@ -99,22 +139,35 @@ struct ltq_etop_chan {
  92. struct ltq_etop_priv {
  93. struct net_device *netdev;
  94. struct platform_device *pdev;
  95. - struct ltq_eth_data *pldata;
  96. struct resource *res;
  97. struct mii_bus *mii_bus;
  98. struct phy_device *phydev;
  99. - struct ltq_etop_chan ch[MAX_DMA_CHAN];
  100. - int tx_free[MAX_DMA_CHAN >> 1];
  101. + struct ltq_etop_chan txch;
  102. + struct ltq_etop_chan rxch;
  103. +
  104. + int tx_irq;
  105. + int rx_irq;
  106. +
  107. + const void *mac;
  108. + int mii_mode;
  109. spinlock_t lock;
  110. +
  111. + struct clk *clk_ppe;
  112. + struct clk *clk_switch;
  113. + struct clk *clk_ephy;
  114. + struct clk *clk_ephycgu;
  115. };
  116. +static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
  117. + int phy_reg, u16 phy_data);
  118. +
  119. static int
  120. ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
  121. {
  122. - ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
  123. + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
  124. if (!ch->skb[ch->dma.desc])
  125. return -ENOMEM;
  126. ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
  127. @@ -149,8 +202,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
  128. spin_unlock_irqrestore(&priv->lock, flags);
  129. skb_put(skb, len);
  130. + skb->dev = ch->netdev;
  131. skb->protocol = eth_type_trans(skb, ch->netdev);
  132. netif_receive_skb(skb);
  133. + ch->netdev->stats.rx_packets++;
  134. + ch->netdev->stats.rx_bytes += len;
  135. }
  136. static int
  137. @@ -158,8 +214,10 @@ ltq_etop_poll_rx(struct napi_struct *nap
  138. {
  139. struct ltq_etop_chan *ch = container_of(napi,
  140. struct ltq_etop_chan, napi);
  141. + struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  142. int rx = 0;
  143. int complete = 0;
  144. + unsigned long flags;
  145. while ((rx < budget) && !complete) {
  146. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  147. @@ -173,7 +231,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
  148. }
  149. if (complete || !rx) {
  150. napi_complete(&ch->napi);
  151. + spin_lock_irqsave(&priv->lock, flags);
  152. ltq_dma_ack_irq(&ch->dma);
  153. + spin_unlock_irqrestore(&priv->lock, flags);
  154. }
  155. return rx;
  156. }
  157. @@ -185,12 +245,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
  158. container_of(napi, struct ltq_etop_chan, napi);
  159. struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  160. struct netdev_queue *txq =
  161. - netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
  162. + netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
  163. unsigned long flags;
  164. spin_lock_irqsave(&priv->lock, flags);
  165. while ((ch->dma.desc_base[ch->tx_free].ctl &
  166. (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  167. + ch->netdev->stats.tx_packets++;
  168. + ch->netdev->stats.tx_bytes += ch->skb[ch->tx_free]->len;
  169. dev_kfree_skb_any(ch->skb[ch->tx_free]);
  170. ch->skb[ch->tx_free] = NULL;
  171. memset(&ch->dma.desc_base[ch->tx_free], 0,
  172. @@ -203,7 +265,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
  173. if (netif_tx_queue_stopped(txq))
  174. netif_tx_start_queue(txq);
  175. napi_complete(&ch->napi);
  176. + spin_lock_irqsave(&priv->lock, flags);
  177. ltq_dma_ack_irq(&ch->dma);
  178. + spin_unlock_irqrestore(&priv->lock, flags);
  179. return 1;
  180. }
  181. @@ -211,9 +275,10 @@ static irqreturn_t
  182. ltq_etop_dma_irq(int irq, void *_priv)
  183. {
  184. struct ltq_etop_priv *priv = _priv;
  185. - int ch = irq - LTQ_DMA_CH0_INT;
  186. -
  187. - napi_schedule(&priv->ch[ch].napi);
  188. + if (irq == priv->txch.dma.irq)
  189. + napi_schedule(&priv->txch.napi);
  190. + else
  191. + napi_schedule(&priv->rxch.napi);
  192. return IRQ_HANDLED;
  193. }
  194. @@ -225,7 +290,7 @@ ltq_etop_free_channel(struct net_device
  195. ltq_dma_free(&ch->dma);
  196. if (ch->dma.irq)
  197. free_irq(ch->dma.irq, priv);
  198. - if (IS_RX(ch->idx)) {
  199. + if (ch == &priv->txch) {
  200. int desc;
  201. for (desc = 0; desc < LTQ_DESC_NUM; desc++)
  202. dev_kfree_skb_any(ch->skb[ch->dma.desc]);
  203. @@ -236,23 +301,62 @@ static void
  204. ltq_etop_hw_exit(struct net_device *dev)
  205. {
  206. struct ltq_etop_priv *priv = netdev_priv(dev);
  207. - int i;
  208. - ltq_pmu_disable(PMU_PPE);
  209. - for (i = 0; i < MAX_DMA_CHAN; i++)
  210. - if (IS_TX(i) || IS_RX(i))
  211. - ltq_etop_free_channel(dev, &priv->ch[i]);
  212. + clk_disable(priv->clk_ppe);
  213. +
  214. + if (of_machine_is_compatible("lantiq,ar9"))
  215. + clk_disable(priv->clk_switch);
  216. +
  217. + if (of_machine_is_compatible("lantiq,ase")) {
  218. + clk_disable(priv->clk_ephy);
  219. + clk_disable(priv->clk_ephycgu);
  220. + }
  221. +
  222. + ltq_etop_free_channel(dev, &priv->txch);
  223. + ltq_etop_free_channel(dev, &priv->rxch);
  224. +}
  225. +
  226. +static void
  227. +ltq_etop_gbit_init(struct net_device *dev)
  228. +{
  229. + struct ltq_etop_priv *priv = netdev_priv(dev);
  230. +
  231. + clk_enable(priv->clk_switch);
  232. +
  233. + /* enable gbit port0 on the SoC */
  234. + ltq_gbit_w32_mask((1 << 17), (1 << 18), LTQ_GBIT_P0_CTL);
  235. +
  236. + ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
  237. + /* disable MDIO auto polling mode */
  238. + ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
  239. + /* set 1522 packet size */
  240. + ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
  241. + /* disable pmac & dmac headers */
  242. + ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
  243. + LTQ_GBIT_PMAC_HD_CTL);
  244. + /* Due to traffic halt when burst length 8,
  245. + replace default IPG value with 0x3B */
  246. + ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
  247. + /* set mdc clock to 2.5 MHz */
  248. + ltq_gbit_w32_mask(MDC_CLOCK_MASK, 4 << MDC_CLOCK_OFFSET,
  249. + LTQ_GBIT_RGMII_CTL);
  250. }
  251. static int
  252. ltq_etop_hw_init(struct net_device *dev)
  253. {
  254. struct ltq_etop_priv *priv = netdev_priv(dev);
  255. - int i;
  256. + int mii_mode = priv->mii_mode;
  257. +
  258. + clk_enable(priv->clk_ppe);
  259. - ltq_pmu_enable(PMU_PPE);
  260. + if (of_machine_is_compatible("lantiq,ar9")) {
  261. + ltq_etop_gbit_init(dev);
  262. + /* force the etops link to the gbit to MII */
  263. + mii_mode = PHY_INTERFACE_MODE_MII;
  264. + }
  265. - switch (priv->pldata->mii_mode) {
  266. + switch (mii_mode) {
  267. case PHY_INTERFACE_MODE_RMII:
  268. ltq_etop_w32_mask(ETOP_MII_MASK,
  269. ETOP_MII_REVERSE, LTQ_ETOP_CFG);
  270. @@ -264,39 +368,68 @@ ltq_etop_hw_init(struct net_device *dev)
  271. break;
  272. default:
  273. + if (of_machine_is_compatible("lantiq,ase")) {
  274. + clk_enable(priv->clk_ephy);
  275. + /* disable external MII */
  276. + ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
  277. + /* enable clock for internal PHY */
  278. + clk_enable(priv->clk_ephycgu);
  279. + /* we need to write this magic to the internal phy to
  280. + make it work */
  281. + ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
  282. + pr_info("Selected EPHY mode\n");
  283. + break;
  284. + }
  285. netdev_err(dev, "unknown mii mode %d\n",
  286. - priv->pldata->mii_mode);
  287. + mii_mode);
  288. return -ENOTSUPP;
  289. }
  290. /* enable crc generation */
  291. ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
  292. + return 0;
  293. +}
  294. +
  295. +static int
  296. +ltq_etop_dma_init(struct net_device *dev)
  297. +{
  298. + struct ltq_etop_priv *priv = netdev_priv(dev);
  299. + int tx = priv->tx_irq - LTQ_DMA_ETOP;
  300. + int rx = priv->rx_irq - LTQ_DMA_ETOP;
  301. + int err;
  302. +
  303. ltq_dma_init_port(DMA_PORT_ETOP);
  304. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  305. - int irq = LTQ_DMA_CH0_INT + i;
  306. - struct ltq_etop_chan *ch = &priv->ch[i];
  307. -
  308. - ch->idx = ch->dma.nr = i;
  309. -
  310. - if (IS_TX(i)) {
  311. - ltq_dma_alloc_tx(&ch->dma);
  312. - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
  313. - "etop_tx", priv);
  314. - } else if (IS_RX(i)) {
  315. - ltq_dma_alloc_rx(&ch->dma);
  316. - for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
  317. - ch->dma.desc++)
  318. - if (ltq_etop_alloc_skb(ch))
  319. - return -ENOMEM;
  320. - ch->dma.desc = 0;
  321. - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
  322. - "etop_rx", priv);
  323. + priv->txch.dma.nr = tx;
  324. + ltq_dma_alloc_tx(&priv->txch.dma);
  325. + err = request_irq(priv->tx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
  326. + "eth_tx", priv);
  327. + if (err) {
  328. + netdev_err(dev, "failed to allocate tx irq\n");
  329. + goto err_out;
  330. + }
  331. + priv->txch.dma.irq = priv->tx_irq;
  332. +
  333. + priv->rxch.dma.nr = rx;
  334. + ltq_dma_alloc_rx(&priv->rxch.dma);
  335. + for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
  336. + priv->rxch.dma.desc++) {
  337. + if (ltq_etop_alloc_skb(&priv->rxch)) {
  338. + netdev_err(dev, "failed to allocate skbs\n");
  339. + err = -ENOMEM;
  340. + goto err_out;
  341. }
  342. - ch->dma.irq = irq;
  343. }
  344. - return 0;
  345. + priv->rxch.dma.desc = 0;
  346. + err = request_irq(priv->rx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
  347. + "eth_rx", priv);
  348. + if (err)
  349. + netdev_err(dev, "failed to allocate rx irq\n");
  350. + else
  351. + priv->rxch.dma.irq = priv->rx_irq;
  352. +err_out:
  353. + return err;
  354. }
  355. static void
  356. @@ -312,7 +445,10 @@ ltq_etop_get_settings(struct net_device
  357. {
  358. struct ltq_etop_priv *priv = netdev_priv(dev);
  359. - return phy_ethtool_gset(priv->phydev, cmd);
  360. + if (priv->phydev)
  361. + return phy_ethtool_gset(priv->phydev, cmd);
  362. + else
  363. + return 0;
  364. }
  365. static int
  366. @@ -320,7 +456,10 @@ ltq_etop_set_settings(struct net_device
  367. {
  368. struct ltq_etop_priv *priv = netdev_priv(dev);
  369. - return phy_ethtool_sset(priv->phydev, cmd);
  370. + if (priv->phydev)
  371. + return phy_ethtool_sset(priv->phydev, cmd);
  372. + else
  373. + return 0;
  374. }
  375. static int
  376. @@ -328,7 +467,10 @@ ltq_etop_nway_reset(struct net_device *d
  377. {
  378. struct ltq_etop_priv *priv = netdev_priv(dev);
  379. - return phy_start_aneg(priv->phydev);
  380. + if (priv->phydev)
  381. + return phy_start_aneg(priv->phydev);
  382. + else
  383. + return 0;
  384. }
  385. static const struct ethtool_ops ltq_etop_ethtool_ops = {
  386. @@ -339,6 +481,39 @@ static const struct ethtool_ops ltq_etop
  387. };
  388. static int
  389. +ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
  390. + int phy_reg, u16 phy_data)
  391. +{
  392. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
  393. + (phy_data << MDIO_XR9_WR_OFFSET) |
  394. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  395. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  396. +
  397. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  398. + ;
  399. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  400. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  401. + ;
  402. + return 0;
  403. +}
  404. +
  405. +static int
  406. +ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
  407. +{
  408. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
  409. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  410. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  411. +
  412. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  413. + ;
  414. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  415. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  416. + ;
  417. + val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
  418. + return val;
  419. +}
  420. +
  421. +static int
  422. ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
  423. {
  424. u32 val = MDIO_REQUEST |
  425. @@ -379,14 +554,18 @@ ltq_etop_mdio_probe(struct net_device *d
  426. {
  427. struct ltq_etop_priv *priv = netdev_priv(dev);
  428. struct phy_device *phydev = NULL;
  429. - int phy_addr;
  430. + u32 phy_supported = (SUPPORTED_10baseT_Half
  431. + | SUPPORTED_10baseT_Full
  432. + | SUPPORTED_100baseT_Half
  433. + | SUPPORTED_100baseT_Full
  434. + | SUPPORTED_Autoneg
  435. + | SUPPORTED_MII
  436. + | SUPPORTED_TP);
  437. - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
  438. - if (priv->mii_bus->phy_map[phy_addr]) {
  439. - phydev = priv->mii_bus->phy_map[phy_addr];
  440. - break;
  441. - }
  442. - }
  443. + if (of_machine_is_compatible("lantiq,ase"))
  444. + phydev = priv->mii_bus->phy_map[8];
  445. + else
  446. + phydev = priv->mii_bus->phy_map[0];
  447. if (!phydev) {
  448. netdev_err(dev, "no PHY found\n");
  449. @@ -394,21 +573,18 @@ ltq_etop_mdio_probe(struct net_device *d
  450. }
  451. phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
  452. - 0, priv->pldata->mii_mode);
  453. + 0, priv->mii_mode);
  454. if (IS_ERR(phydev)) {
  455. netdev_err(dev, "Could not attach to PHY\n");
  456. return PTR_ERR(phydev);
  457. }
  458. - phydev->supported &= (SUPPORTED_10baseT_Half
  459. - | SUPPORTED_10baseT_Full
  460. - | SUPPORTED_100baseT_Half
  461. - | SUPPORTED_100baseT_Full
  462. - | SUPPORTED_Autoneg
  463. - | SUPPORTED_MII
  464. - | SUPPORTED_TP);
  465. + if (of_machine_is_compatible("lantiq,ar9"))
  466. + phy_supported |= SUPPORTED_1000baseT_Half
  467. + | SUPPORTED_1000baseT_Full;
  468. + phydev->supported &= phy_supported;
  469. phydev->advertising = phydev->supported;
  470. priv->phydev = phydev;
  471. pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
  472. @@ -433,8 +609,13 @@ ltq_etop_mdio_init(struct net_device *de
  473. }
  474. priv->mii_bus->priv = dev;
  475. - priv->mii_bus->read = ltq_etop_mdio_rd;
  476. - priv->mii_bus->write = ltq_etop_mdio_wr;
  477. + if (of_machine_is_compatible("lantiq,ar9")) {
  478. + priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
  479. + priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
  480. + } else {
  481. + priv->mii_bus->read = ltq_etop_mdio_rd;
  482. + priv->mii_bus->write = ltq_etop_mdio_wr;
  483. + }
  484. priv->mii_bus->name = "ltq_mii";
  485. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  486. priv->pdev->name, priv->pdev->id);
  487. @@ -483,17 +664,19 @@ static int
  488. ltq_etop_open(struct net_device *dev)
  489. {
  490. struct ltq_etop_priv *priv = netdev_priv(dev);
  491. - int i;
  492. + unsigned long flags;
  493. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  494. - struct ltq_etop_chan *ch = &priv->ch[i];
  495. + napi_enable(&priv->txch.napi);
  496. + napi_enable(&priv->rxch.napi);
  497. +
  498. + spin_lock_irqsave(&priv->lock, flags);
  499. + ltq_dma_open(&priv->txch.dma);
  500. + ltq_dma_open(&priv->rxch.dma);
  501. + spin_unlock_irqrestore(&priv->lock, flags);
  502. +
  503. + if (priv->phydev)
  504. + phy_start(priv->phydev);
  505. - if (!IS_TX(i) && (!IS_RX(i)))
  506. - continue;
  507. - ltq_dma_open(&ch->dma);
  508. - napi_enable(&ch->napi);
  509. - }
  510. - phy_start(priv->phydev);
  511. netif_tx_start_all_queues(dev);
  512. return 0;
  513. }
  514. @@ -502,18 +685,19 @@ static int
  515. ltq_etop_stop(struct net_device *dev)
  516. {
  517. struct ltq_etop_priv *priv = netdev_priv(dev);
  518. - int i;
  519. + unsigned long flags;
  520. netif_tx_stop_all_queues(dev);
  521. - phy_stop(priv->phydev);
  522. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  523. - struct ltq_etop_chan *ch = &priv->ch[i];
  524. -
  525. - if (!IS_RX(i) && !IS_TX(i))
  526. - continue;
  527. - napi_disable(&ch->napi);
  528. - ltq_dma_close(&ch->dma);
  529. - }
  530. + if (priv->phydev)
  531. + phy_stop(priv->phydev);
  532. + napi_disable(&priv->txch.napi);
  533. + napi_disable(&priv->rxch.napi);
  534. +
  535. + spin_lock_irqsave(&priv->lock, flags);
  536. + ltq_dma_close(&priv->txch.dma);
  537. + ltq_dma_close(&priv->rxch.dma);
  538. + spin_unlock_irqrestore(&priv->lock, flags);
  539. +
  540. return 0;
  541. }
  542. @@ -523,16 +707,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
  543. int queue = skb_get_queue_mapping(skb);
  544. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
  545. struct ltq_etop_priv *priv = netdev_priv(dev);
  546. - struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
  547. - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  548. - int len;
  549. + struct ltq_dma_desc *desc =
  550. + &priv->txch.dma.desc_base[priv->txch.dma.desc];
  551. unsigned long flags;
  552. u32 byte_offset;
  553. + int len;
  554. len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  555. - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  556. - dev_kfree_skb_any(skb);
  557. + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
  558. + priv->txch.skb[priv->txch.dma.desc]) {
  559. netdev_err(dev, "tx ring full\n");
  560. netif_tx_stop_queue(txq);
  561. return NETDEV_TX_BUSY;
  562. @@ -540,7 +724,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
  563. /* dma needs to start on a 16 byte aligned address */
  564. byte_offset = CPHYSADDR(skb->data) % 16;
  565. - ch->skb[ch->dma.desc] = skb;
  566. + priv->txch.skb[priv->txch.dma.desc] = skb;
  567. dev->trans_start = jiffies;
  568. @@ -550,11 +734,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
  569. wmb();
  570. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  571. LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
  572. - ch->dma.desc++;
  573. - ch->dma.desc %= LTQ_DESC_NUM;
  574. + priv->txch.dma.desc++;
  575. + priv->txch.dma.desc %= LTQ_DESC_NUM;
  576. spin_unlock_irqrestore(&priv->lock, flags);
  577. - if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
  578. + if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
  579. netif_tx_stop_queue(txq);
  580. return NETDEV_TX_OK;
  581. @@ -633,34 +817,32 @@ ltq_etop_init(struct net_device *dev)
  582. struct ltq_etop_priv *priv = netdev_priv(dev);
  583. struct sockaddr mac;
  584. int err;
  585. - bool random_mac = false;
  586. ether_setup(dev);
  587. dev->watchdog_timeo = 10 * HZ;
  588. err = ltq_etop_hw_init(dev);
  589. if (err)
  590. goto err_hw;
  591. + err = ltq_etop_dma_init(dev);
  592. + if (err)
  593. + goto err_hw;
  594. +
  595. ltq_etop_change_mtu(dev, 1500);
  596. - memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
  597. + memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
  598. if (!is_valid_ether_addr(mac.sa_data)) {
  599. pr_warn("etop: invalid MAC, using random\n");
  600. - eth_random_addr(mac.sa_data);
  601. - random_mac = true;
  602. + random_ether_addr(mac.sa_data);
  603. }
  604. err = ltq_etop_set_mac_address(dev, &mac);
  605. if (err)
  606. goto err_netdev;
  607. -
  608. - /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
  609. - if (random_mac)
  610. - dev->addr_assign_type |= NET_ADDR_RANDOM;
  611. -
  612. ltq_etop_set_multicast_list(dev);
  613. - err = ltq_etop_mdio_init(dev);
  614. - if (err)
  615. - goto err_netdev;
  616. + if (!ltq_etop_mdio_init(dev))
  617. + dev->ethtool_ops = &ltq_etop_ethtool_ops;
  618. + else
  619. + pr_warn("etop: mdio probe failed\n");;
  620. return 0;
  621. err_netdev:
  622. @@ -680,6 +862,9 @@ ltq_etop_tx_timeout(struct net_device *d
  623. err = ltq_etop_hw_init(dev);
  624. if (err)
  625. goto err_hw;
  626. + err = ltq_etop_dma_init(dev);
  627. + if (err)
  628. + goto err_hw;
  629. dev->trans_start = jiffies;
  630. netif_wake_queue(dev);
  631. return;
  632. @@ -703,14 +888,19 @@ static const struct net_device_ops ltq_e
  633. .ndo_tx_timeout = ltq_etop_tx_timeout,
  634. };
  635. -static int __init
  636. +static int __devinit
  637. ltq_etop_probe(struct platform_device *pdev)
  638. {
  639. struct net_device *dev;
  640. struct ltq_etop_priv *priv;
  641. - struct resource *res;
  642. + struct resource *res, *gbit_res, irqres[2];
  643. int err;
  644. - int i;
  645. +
  646. + err = of_irq_to_resource_table(pdev->dev.of_node, irqres, 2);
  647. + if (err != 2) {
  648. + dev_err(&pdev->dev, "failed to get etop irqs\n");
  649. + return -EINVAL;
  650. + }
  651. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  652. if (!res) {
  653. @@ -736,30 +926,58 @@ ltq_etop_probe(struct platform_device *p
  654. goto err_out;
  655. }
  656. - dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  657. - if (!dev) {
  658. - err = -ENOMEM;
  659. - goto err_out;
  660. + if (of_machine_is_compatible("lantiq,ar9")) {
  661. + gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  662. + if (!gbit_res) {
  663. + dev_err(&pdev->dev, "failed to get gbit resource\n");
  664. + err = -ENOENT;
  665. + goto err_out;
  666. + }
  667. + ltq_gbit_membase = devm_ioremap_nocache(&pdev->dev,
  668. + gbit_res->start, resource_size(gbit_res));
  669. + if (!ltq_gbit_membase) {
  670. + dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
  671. + pdev->id);
  672. + err = -ENOMEM;
  673. + goto err_out;
  674. + }
  675. }
  676. +
  677. + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  678. strcpy(dev->name, "eth%d");
  679. dev->netdev_ops = &ltq_eth_netdev_ops;
  680. - dev->ethtool_ops = &ltq_etop_ethtool_ops;
  681. priv = netdev_priv(dev);
  682. priv->res = res;
  683. priv->pdev = pdev;
  684. - priv->pldata = dev_get_platdata(&pdev->dev);
  685. priv->netdev = dev;
  686. + priv->tx_irq = irqres[0].start;
  687. + priv->rx_irq = irqres[1].start;
  688. + priv->mii_mode = of_get_phy_mode(pdev->dev.of_node);
  689. + priv->mac = of_get_mac_address(pdev->dev.of_node);
  690. +
  691. + priv->clk_ppe = clk_get(&pdev->dev, NULL);
  692. + if (IS_ERR(priv->clk_ppe))
  693. + return PTR_ERR(priv->clk_ppe);
  694. + if (of_machine_is_compatible("lantiq,ar9")) {
  695. + priv->clk_switch = clk_get(&pdev->dev, "switch");
  696. + if (IS_ERR(priv->clk_switch))
  697. + return PTR_ERR(priv->clk_switch);
  698. + }
  699. + if (of_machine_is_compatible("lantiq,ase")) {
  700. + priv->clk_ephy = clk_get(&pdev->dev, "ephy");
  701. + if (IS_ERR(priv->clk_ephy))
  702. + return PTR_ERR(priv->clk_ephy);
  703. + priv->clk_ephycgu = clk_get(&pdev->dev, "ephycgu");
  704. + if (IS_ERR(priv->clk_ephycgu))
  705. + return PTR_ERR(priv->clk_ephycgu);
  706. + }
  707. +
  708. spin_lock_init(&priv->lock);
  709. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  710. - if (IS_TX(i))
  711. - netif_napi_add(dev, &priv->ch[i].napi,
  712. - ltq_etop_poll_tx, 8);
  713. - else if (IS_RX(i))
  714. - netif_napi_add(dev, &priv->ch[i].napi,
  715. - ltq_etop_poll_rx, 32);
  716. - priv->ch[i].netdev = dev;
  717. - }
  718. + netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
  719. + netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
  720. + priv->txch.netdev = dev;
  721. + priv->rxch.netdev = dev;
  722. err = register_netdev(dev);
  723. if (err)
  724. @@ -788,32 +1006,23 @@ ltq_etop_remove(struct platform_device *
  725. return 0;
  726. }
  727. +static const struct of_device_id ltq_etop_match[] = {
  728. + { .compatible = "lantiq,etop-xway" },
  729. + {},
  730. +};
  731. +MODULE_DEVICE_TABLE(of, ltq_etop_match);
  732. +
  733. static struct platform_driver ltq_mii_driver = {
  734. + .probe = ltq_etop_probe,
  735. .remove = ltq_etop_remove,
  736. .driver = {
  737. .name = "ltq_etop",
  738. .owner = THIS_MODULE,
  739. + .of_match_table = ltq_etop_match,
  740. },
  741. };
  742. -int __init
  743. -init_ltq_etop(void)
  744. -{
  745. - int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
  746. -
  747. - if (ret)
  748. - pr_err("ltq_etop: Error registering platform driver!");
  749. - return ret;
  750. -}
  751. -
  752. -static void __exit
  753. -exit_ltq_etop(void)
  754. -{
  755. - platform_driver_unregister(&ltq_mii_driver);
  756. -}
  757. -
  758. -module_init(init_ltq_etop);
  759. -module_exit(exit_ltq_etop);
  760. +module_platform_driver(ltq_mii_driver);
  761. MODULE_AUTHOR("John Crispin <[email protected]>");
  762. MODULE_DESCRIPTION("Lantiq SoC ETOP");