0028-NET-lantiq-various-etop-fixes.patch 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. From 870ed9cae083ff8a60a739ef7e74c5a1800533be Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Tue, 9 Sep 2014 22:45:34 +0200
  4. Subject: [PATCH 28/36] NET: lantiq: various etop fixes
  5. Signed-off-by: John Crispin <[email protected]>
  6. ---
  7. drivers/net/ethernet/lantiq_etop.c | 555 +++++++++++++++++++++++++-----------
  8. 1 file changed, 389 insertions(+), 166 deletions(-)
  9. --- a/drivers/net/ethernet/lantiq_etop.c
  10. +++ b/drivers/net/ethernet/lantiq_etop.c
  11. @@ -1,7 +1,7 @@
  12. // SPDX-License-Identifier: GPL-2.0-only
  13. /*
  14. *
  15. - * Copyright (C) 2011 John Crispin <[email protected]>
  16. + * Copyright (C) 2011-12 John Crispin <[email protected]>
  17. */
  18. #include <linux/kernel.h>
  19. @@ -20,11 +20,16 @@
  20. #include <linux/mm.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/ethtool.h>
  23. +#include <linux/if_vlan.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/io.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/module.h>
  29. +#include <linux/clk.h>
  30. +#include <linux/of_net.h>
  31. +#include <linux/of_irq.h>
  32. +#include <linux/of_platform.h>
  33. #include <asm/checksum.h>
  34. @@ -32,7 +37,7 @@
  35. #include <xway_dma.h>
  36. #include <lantiq_platform.h>
  37. -#define LTQ_ETOP_MDIO 0x11804
  38. +#define LTQ_ETOP_MDIO_ACC 0x11804
  39. #define MDIO_REQUEST 0x80000000
  40. #define MDIO_READ 0x40000000
  41. #define MDIO_ADDR_MASK 0x1f
  42. @@ -41,44 +46,91 @@
  43. #define MDIO_REG_OFFSET 0x10
  44. #define MDIO_VAL_MASK 0xffff
  45. -#define PPE32_CGEN 0x800
  46. -#define LQ_PPE32_ENET_MAC_CFG 0x1840
  47. +#define LTQ_ETOP_MDIO_CFG 0x11800
  48. +#define MDIO_CFG_MASK 0x6
  49. +
  50. +#define LTQ_ETOP_CFG 0x11808
  51. +#define LTQ_ETOP_IGPLEN 0x11820
  52. +#define LTQ_ETOP_MAC_CFG 0x11840
  53. #define LTQ_ETOP_ENETS0 0x11850
  54. #define LTQ_ETOP_MAC_DA0 0x1186C
  55. #define LTQ_ETOP_MAC_DA1 0x11870
  56. -#define LTQ_ETOP_CFG 0x16020
  57. -#define LTQ_ETOP_IGPLEN 0x16080
  58. +
  59. +#define MAC_CFG_MASK 0xfff
  60. +#define MAC_CFG_CGEN (1 << 11)
  61. +#define MAC_CFG_DUPLEX (1 << 2)
  62. +#define MAC_CFG_SPEED (1 << 1)
  63. +#define MAC_CFG_LINK (1 << 0)
  64. #define MAX_DMA_CHAN 0x8
  65. #define MAX_DMA_CRC_LEN 0x4
  66. #define MAX_DMA_DATA_LEN 0x600
  67. #define ETOP_FTCU BIT(28)
  68. -#define ETOP_MII_MASK 0xf
  69. -#define ETOP_MII_NORMAL 0xd
  70. -#define ETOP_MII_REVERSE 0xe
  71. #define ETOP_PLEN_UNDER 0x40
  72. -#define ETOP_CGEN 0x800
  73. +#define ETOP_CFG_MII0 0x01
  74. -/* use 2 static channels for TX/RX */
  75. -#define LTQ_ETOP_TX_CHANNEL 1
  76. -#define LTQ_ETOP_RX_CHANNEL 6
  77. -#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
  78. -#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
  79. +#define ETOP_CFG_MASK 0xfff
  80. +#define ETOP_CFG_FEN0 (1 << 8)
  81. +#define ETOP_CFG_SEN0 (1 << 6)
  82. +#define ETOP_CFG_OFF1 (1 << 3)
  83. +#define ETOP_CFG_REMII0 (1 << 1)
  84. +#define ETOP_CFG_OFF0 (1 << 0)
  85. +
  86. +#define LTQ_GBIT_MDIO_CTL 0xCC
  87. +#define LTQ_GBIT_MDIO_DATA 0xd0
  88. +#define LTQ_GBIT_GCTL0 0x68
  89. +#define LTQ_GBIT_PMAC_HD_CTL 0x8c
  90. +#define LTQ_GBIT_P0_CTL 0x4
  91. +#define LTQ_GBIT_PMAC_RX_IPG 0xa8
  92. +#define LTQ_GBIT_RGMII_CTL 0x78
  93. +
  94. +#define PMAC_HD_CTL_AS (1 << 19)
  95. +#define PMAC_HD_CTL_RXSH (1 << 22)
  96. +
  97. +/* Switch Enable (0=disable, 1=enable) */
  98. +#define GCTL0_SE 0x80000000
  99. +/* Disable MDIO auto polling (0=disable, 1=enable) */
  100. +#define PX_CTL_DMDIO 0x00400000
  101. +
  102. +/* MDC clock divider, clock = 25MHz/((MDC_CLOCK + 1) * 2) */
  103. +#define MDC_CLOCK_MASK 0xff000000
  104. +#define MDC_CLOCK_OFFSET 24
  105. +
  106. +/* register information for the gbit's MDIO bus */
  107. +#define MDIO_XR9_REQUEST 0x00008000
  108. +#define MDIO_XR9_READ 0x00000800
  109. +#define MDIO_XR9_WRITE 0x00000400
  110. +#define MDIO_XR9_REG_MASK 0x1f
  111. +#define MDIO_XR9_ADDR_MASK 0x1f
  112. +#define MDIO_XR9_RD_MASK 0xffff
  113. +#define MDIO_XR9_REG_OFFSET 0
  114. +#define MDIO_XR9_ADDR_OFFSET 5
  115. +#define MDIO_XR9_WR_OFFSET 16
  116. +#define LTQ_DMA_ETOP ((of_machine_is_compatible("lantiq,ase")) ? \
  117. + (INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
  118. +
  119. +/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
  120. #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
  121. #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
  122. #define ltq_etop_w32_mask(x, y, z) \
  123. ltq_w32_mask(x, y, ltq_etop_membase + (z))
  124. -#define DRV_VERSION "1.0"
  125. +#define ltq_gbit_r32(x) ltq_r32(ltq_gbit_membase + (x))
  126. +#define ltq_gbit_w32(x, y) ltq_w32(x, ltq_gbit_membase + (y))
  127. +#define ltq_gbit_w32_mask(x, y, z) \
  128. + ltq_w32_mask(x, y, ltq_gbit_membase + (z))
  129. +
  130. +#define DRV_VERSION "1.2"
  131. static void __iomem *ltq_etop_membase;
  132. +static void __iomem *ltq_gbit_membase;
  133. struct ltq_etop_chan {
  134. - int idx;
  135. int tx_free;
  136. + int irq;
  137. struct net_device *netdev;
  138. struct napi_struct napi;
  139. struct ltq_dma_channel dma;
  140. @@ -88,23 +140,36 @@ struct ltq_etop_chan {
  141. struct ltq_etop_priv {
  142. struct net_device *netdev;
  143. struct platform_device *pdev;
  144. - struct ltq_eth_data *pldata;
  145. struct resource *res;
  146. struct mii_bus *mii_bus;
  147. - struct ltq_etop_chan ch[MAX_DMA_CHAN];
  148. - int tx_free[MAX_DMA_CHAN >> 1];
  149. + struct ltq_etop_chan txch;
  150. + struct ltq_etop_chan rxch;
  151. - spinlock_t lock;
  152. + int tx_irq;
  153. + int rx_irq;
  154. +
  155. + unsigned char mac[6];
  156. + phy_interface_t mii_mode;
  157. +
  158. + spinlock_t lock;
  159. +
  160. + struct clk *clk_ppe;
  161. + struct clk *clk_switch;
  162. + struct clk *clk_ephy;
  163. + struct clk *clk_ephycgu;
  164. };
  165. +static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
  166. + int phy_reg, u16 phy_data);
  167. +
  168. static int
  169. ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
  170. {
  171. struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  172. - ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
  173. + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
  174. if (!ch->skb[ch->dma.desc])
  175. return -ENOMEM;
  176. ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
  177. @@ -139,8 +204,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
  178. spin_unlock_irqrestore(&priv->lock, flags);
  179. skb_put(skb, len);
  180. + skb->dev = ch->netdev;
  181. skb->protocol = eth_type_trans(skb, ch->netdev);
  182. netif_receive_skb(skb);
  183. + ch->netdev->stats.rx_packets++;
  184. + ch->netdev->stats.rx_bytes += len;
  185. }
  186. static int
  187. @@ -148,7 +216,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
  188. {
  189. struct ltq_etop_chan *ch = container_of(napi,
  190. struct ltq_etop_chan, napi);
  191. + struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  192. int work_done = 0;
  193. + unsigned long flags;
  194. while (work_done < budget) {
  195. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  196. @@ -160,7 +230,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
  197. }
  198. if (work_done < budget) {
  199. napi_complete_done(&ch->napi, work_done);
  200. + spin_lock_irqsave(&priv->lock, flags);
  201. ltq_dma_ack_irq(&ch->dma);
  202. + spin_unlock_irqrestore(&priv->lock, flags);
  203. }
  204. return work_done;
  205. }
  206. @@ -172,12 +244,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
  207. container_of(napi, struct ltq_etop_chan, napi);
  208. struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  209. struct netdev_queue *txq =
  210. - netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
  211. + netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
  212. unsigned long flags;
  213. spin_lock_irqsave(&priv->lock, flags);
  214. while ((ch->dma.desc_base[ch->tx_free].ctl &
  215. (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  216. + ch->netdev->stats.tx_packets++;
  217. + ch->netdev->stats.tx_bytes += ch->skb[ch->tx_free]->len;
  218. dev_kfree_skb_any(ch->skb[ch->tx_free]);
  219. ch->skb[ch->tx_free] = NULL;
  220. memset(&ch->dma.desc_base[ch->tx_free], 0,
  221. @@ -190,7 +264,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
  222. if (netif_tx_queue_stopped(txq))
  223. netif_tx_start_queue(txq);
  224. napi_complete(&ch->napi);
  225. + spin_lock_irqsave(&priv->lock, flags);
  226. ltq_dma_ack_irq(&ch->dma);
  227. + spin_unlock_irqrestore(&priv->lock, flags);
  228. return 1;
  229. }
  230. @@ -198,9 +274,10 @@ static irqreturn_t
  231. ltq_etop_dma_irq(int irq, void *_priv)
  232. {
  233. struct ltq_etop_priv *priv = _priv;
  234. - int ch = irq - LTQ_DMA_CH0_INT;
  235. -
  236. - napi_schedule(&priv->ch[ch].napi);
  237. + if (irq == priv->txch.dma.irq)
  238. + napi_schedule(&priv->txch.napi);
  239. + else
  240. + napi_schedule(&priv->rxch.napi);
  241. return IRQ_HANDLED;
  242. }
  243. @@ -212,7 +289,7 @@ ltq_etop_free_channel(struct net_device
  244. ltq_dma_free(&ch->dma);
  245. if (ch->dma.irq)
  246. free_irq(ch->dma.irq, priv);
  247. - if (IS_RX(ch->idx)) {
  248. + if (ch == &priv->txch) {
  249. int desc;
  250. for (desc = 0; desc < LTQ_DESC_NUM; desc++)
  251. dev_kfree_skb_any(ch->skb[ch->dma.desc]);
  252. @@ -223,66 +300,135 @@ static void
  253. ltq_etop_hw_exit(struct net_device *dev)
  254. {
  255. struct ltq_etop_priv *priv = netdev_priv(dev);
  256. - int i;
  257. - ltq_pmu_disable(PMU_PPE);
  258. - for (i = 0; i < MAX_DMA_CHAN; i++)
  259. - if (IS_TX(i) || IS_RX(i))
  260. - ltq_etop_free_channel(dev, &priv->ch[i]);
  261. + clk_disable(priv->clk_ppe);
  262. +
  263. + if (of_machine_is_compatible("lantiq,ar9"))
  264. + clk_disable(priv->clk_switch);
  265. +
  266. + if (of_machine_is_compatible("lantiq,ase")) {
  267. + clk_disable(priv->clk_ephy);
  268. + clk_disable(priv->clk_ephycgu);
  269. + }
  270. +
  271. + ltq_etop_free_channel(dev, &priv->txch);
  272. + ltq_etop_free_channel(dev, &priv->rxch);
  273. +}
  274. +
  275. +static void
  276. +ltq_etop_gbit_init(struct net_device *dev)
  277. +{
  278. + struct ltq_etop_priv *priv = netdev_priv(dev);
  279. +
  280. + clk_enable(priv->clk_switch);
  281. +
  282. + /* enable gbit port0 on the SoC */
  283. + ltq_gbit_w32_mask((1 << 17), (1 << 18), LTQ_GBIT_P0_CTL);
  284. +
  285. + ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
  286. + /* disable MDIO auto polling mode */
  287. + ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
  288. + /* set 1522 packet size */
  289. + ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
  290. + /* disable pmac & dmac headers */
  291. + ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
  292. + LTQ_GBIT_PMAC_HD_CTL);
  293. + /* Due to traffic halt when burst length 8,
  294. + replace default IPG value with 0x3B */
  295. + ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
  296. + /* set mdc clock to 2.5 MHz */
  297. + ltq_gbit_w32_mask(MDC_CLOCK_MASK, 4 << MDC_CLOCK_OFFSET,
  298. + LTQ_GBIT_RGMII_CTL);
  299. }
  300. static int
  301. ltq_etop_hw_init(struct net_device *dev)
  302. {
  303. struct ltq_etop_priv *priv = netdev_priv(dev);
  304. - int i;
  305. + phy_interface_t mii_mode = priv->mii_mode;
  306. - ltq_pmu_enable(PMU_PPE);
  307. + clk_enable(priv->clk_ppe);
  308. - switch (priv->pldata->mii_mode) {
  309. + if (of_machine_is_compatible("lantiq,ar9")) {
  310. + ltq_etop_gbit_init(dev);
  311. + /* force the etops link to the gbit to MII */
  312. + mii_mode = PHY_INTERFACE_MODE_MII;
  313. + }
  314. + ltq_etop_w32_mask(MDIO_CFG_MASK, 0, LTQ_ETOP_MDIO_CFG);
  315. + ltq_etop_w32_mask(MAC_CFG_MASK, MAC_CFG_CGEN | MAC_CFG_DUPLEX |
  316. + MAC_CFG_SPEED | MAC_CFG_LINK, LTQ_ETOP_MAC_CFG);
  317. +
  318. + switch (mii_mode) {
  319. case PHY_INTERFACE_MODE_RMII:
  320. - ltq_etop_w32_mask(ETOP_MII_MASK,
  321. - ETOP_MII_REVERSE, LTQ_ETOP_CFG);
  322. + ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_REMII0 | ETOP_CFG_OFF1 |
  323. + ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
  324. break;
  325. case PHY_INTERFACE_MODE_MII:
  326. - ltq_etop_w32_mask(ETOP_MII_MASK,
  327. - ETOP_MII_NORMAL, LTQ_ETOP_CFG);
  328. + ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_OFF1 |
  329. + ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
  330. break;
  331. default:
  332. + if (of_machine_is_compatible("lantiq,ase")) {
  333. + clk_enable(priv->clk_ephy);
  334. + /* disable external MII */
  335. + ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
  336. + /* enable clock for internal PHY */
  337. + clk_enable(priv->clk_ephycgu);
  338. + /* we need to write this magic to the internal phy to
  339. + make it work */
  340. + ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
  341. + pr_info("Selected EPHY mode\n");
  342. + break;
  343. + }
  344. netdev_err(dev, "unknown mii mode %d\n",
  345. - priv->pldata->mii_mode);
  346. + mii_mode);
  347. return -ENOTSUPP;
  348. }
  349. - /* enable crc generation */
  350. - ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
  351. + return 0;
  352. +}
  353. +
  354. +static int
  355. +ltq_etop_dma_init(struct net_device *dev)
  356. +{
  357. + struct ltq_etop_priv *priv = netdev_priv(dev);
  358. + int tx = priv->tx_irq - LTQ_DMA_ETOP;
  359. + int rx = priv->rx_irq - LTQ_DMA_ETOP;
  360. + int err;
  361. ltq_dma_init_port(DMA_PORT_ETOP);
  362. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  363. - int irq = LTQ_DMA_CH0_INT + i;
  364. - struct ltq_etop_chan *ch = &priv->ch[i];
  365. -
  366. - ch->idx = ch->dma.nr = i;
  367. - ch->dma.dev = &priv->pdev->dev;
  368. -
  369. - if (IS_TX(i)) {
  370. - ltq_dma_alloc_tx(&ch->dma);
  371. - request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
  372. - } else if (IS_RX(i)) {
  373. - ltq_dma_alloc_rx(&ch->dma);
  374. - for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
  375. - ch->dma.desc++)
  376. - if (ltq_etop_alloc_skb(ch))
  377. - return -ENOMEM;
  378. - ch->dma.desc = 0;
  379. - request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
  380. + priv->txch.dma.nr = tx;
  381. + priv->txch.dma.dev = &priv->pdev->dev;
  382. + ltq_dma_alloc_tx(&priv->txch.dma);
  383. + err = request_irq(priv->tx_irq, ltq_etop_dma_irq, 0, "eth_tx", priv);
  384. + if (err) {
  385. + netdev_err(dev, "failed to allocate tx irq\n");
  386. + goto err_out;
  387. + }
  388. + priv->txch.dma.irq = priv->tx_irq;
  389. +
  390. + priv->rxch.dma.nr = rx;
  391. + priv->rxch.dma.dev = &priv->pdev->dev;
  392. + ltq_dma_alloc_rx(&priv->rxch.dma);
  393. + for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
  394. + priv->rxch.dma.desc++) {
  395. + if (ltq_etop_alloc_skb(&priv->rxch)) {
  396. + netdev_err(dev, "failed to allocate skbs\n");
  397. + err = -ENOMEM;
  398. + goto err_out;
  399. }
  400. - ch->dma.irq = irq;
  401. }
  402. - return 0;
  403. + priv->rxch.dma.desc = 0;
  404. + err = request_irq(priv->rx_irq, ltq_etop_dma_irq, 0, "eth_rx", priv);
  405. + if (err)
  406. + netdev_err(dev, "failed to allocate rx irq\n");
  407. + else
  408. + priv->rxch.dma.irq = priv->rx_irq;
  409. +err_out:
  410. + return err;
  411. }
  412. static void
  413. @@ -301,6 +447,39 @@ static const struct ethtool_ops ltq_etop
  414. };
  415. static int
  416. +ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
  417. + int phy_reg, u16 phy_data)
  418. +{
  419. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
  420. + (phy_data << MDIO_XR9_WR_OFFSET) |
  421. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  422. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  423. +
  424. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  425. + ;
  426. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  427. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  428. + ;
  429. + return 0;
  430. +}
  431. +
  432. +static int
  433. +ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
  434. +{
  435. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
  436. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  437. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  438. +
  439. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  440. + ;
  441. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  442. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  443. + ;
  444. + val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
  445. + return val;
  446. +}
  447. +
  448. +static int
  449. ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
  450. {
  451. u32 val = MDIO_REQUEST |
  452. @@ -308,9 +487,9 @@ ltq_etop_mdio_wr(struct mii_bus *bus, in
  453. ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
  454. phy_data;
  455. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  456. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  457. ;
  458. - ltq_etop_w32(val, LTQ_ETOP_MDIO);
  459. + ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
  460. return 0;
  461. }
  462. @@ -321,12 +500,12 @@ ltq_etop_mdio_rd(struct mii_bus *bus, in
  463. ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
  464. ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
  465. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  466. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  467. ;
  468. - ltq_etop_w32(val, LTQ_ETOP_MDIO);
  469. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  470. + ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
  471. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  472. ;
  473. - val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
  474. + val = ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_VAL_MASK;
  475. return val;
  476. }
  477. @@ -342,7 +521,10 @@ ltq_etop_mdio_probe(struct net_device *d
  478. struct ltq_etop_priv *priv = netdev_priv(dev);
  479. struct phy_device *phydev;
  480. - phydev = phy_find_first(priv->mii_bus);
  481. + if (of_machine_is_compatible("lantiq,ase"))
  482. + phydev = mdiobus_get_phy(priv->mii_bus, 8);
  483. + else
  484. + phydev = mdiobus_get_phy(priv->mii_bus, 0);
  485. if (!phydev) {
  486. netdev_err(dev, "no PHY found\n");
  487. @@ -350,14 +532,17 @@ ltq_etop_mdio_probe(struct net_device *d
  488. }
  489. phydev = phy_connect(dev, phydev_name(phydev),
  490. - &ltq_etop_mdio_link, priv->pldata->mii_mode);
  491. + &ltq_etop_mdio_link, priv->mii_mode);
  492. if (IS_ERR(phydev)) {
  493. netdev_err(dev, "Could not attach to PHY\n");
  494. return PTR_ERR(phydev);
  495. }
  496. - phy_set_max_speed(phydev, SPEED_100);
  497. + if (of_machine_is_compatible("lantiq,ar9"))
  498. + phy_set_max_speed(phydev, SPEED_1000);
  499. + else
  500. + phy_set_max_speed(phydev, SPEED_100);
  501. phy_attached_info(phydev);
  502. @@ -378,8 +563,13 @@ ltq_etop_mdio_init(struct net_device *de
  503. }
  504. priv->mii_bus->priv = dev;
  505. - priv->mii_bus->read = ltq_etop_mdio_rd;
  506. - priv->mii_bus->write = ltq_etop_mdio_wr;
  507. + if (of_machine_is_compatible("lantiq,ar9")) {
  508. + priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
  509. + priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
  510. + } else {
  511. + priv->mii_bus->read = ltq_etop_mdio_rd;
  512. + priv->mii_bus->write = ltq_etop_mdio_wr;
  513. + }
  514. priv->mii_bus->name = "ltq_mii";
  515. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  516. priv->pdev->name, priv->pdev->id);
  517. @@ -416,18 +606,21 @@ static int
  518. ltq_etop_open(struct net_device *dev)
  519. {
  520. struct ltq_etop_priv *priv = netdev_priv(dev);
  521. - int i;
  522. + unsigned long flags;
  523. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  524. - struct ltq_etop_chan *ch = &priv->ch[i];
  525. + napi_enable(&priv->txch.napi);
  526. + napi_enable(&priv->rxch.napi);
  527. +
  528. + spin_lock_irqsave(&priv->lock, flags);
  529. + ltq_dma_open(&priv->txch.dma);
  530. + ltq_dma_enable_irq(&priv->txch.dma);
  531. + ltq_dma_open(&priv->rxch.dma);
  532. + ltq_dma_enable_irq(&priv->rxch.dma);
  533. + spin_unlock_irqrestore(&priv->lock, flags);
  534. +
  535. + if (dev->phydev)
  536. + phy_start(dev->phydev);
  537. - if (!IS_TX(i) && (!IS_RX(i)))
  538. - continue;
  539. - ltq_dma_open(&ch->dma);
  540. - ltq_dma_enable_irq(&ch->dma);
  541. - napi_enable(&ch->napi);
  542. - }
  543. - phy_start(dev->phydev);
  544. netif_tx_start_all_queues(dev);
  545. return 0;
  546. }
  547. @@ -436,18 +629,19 @@ static int
  548. ltq_etop_stop(struct net_device *dev)
  549. {
  550. struct ltq_etop_priv *priv = netdev_priv(dev);
  551. - int i;
  552. + unsigned long flags;
  553. netif_tx_stop_all_queues(dev);
  554. - phy_stop(dev->phydev);
  555. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  556. - struct ltq_etop_chan *ch = &priv->ch[i];
  557. -
  558. - if (!IS_RX(i) && !IS_TX(i))
  559. - continue;
  560. - napi_disable(&ch->napi);
  561. - ltq_dma_close(&ch->dma);
  562. - }
  563. + if (dev->phydev)
  564. + phy_stop(dev->phydev);
  565. + napi_disable(&priv->txch.napi);
  566. + napi_disable(&priv->rxch.napi);
  567. +
  568. + spin_lock_irqsave(&priv->lock, flags);
  569. + ltq_dma_close(&priv->txch.dma);
  570. + ltq_dma_close(&priv->rxch.dma);
  571. + spin_unlock_irqrestore(&priv->lock, flags);
  572. +
  573. return 0;
  574. }
  575. @@ -457,16 +651,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
  576. int queue = skb_get_queue_mapping(skb);
  577. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
  578. struct ltq_etop_priv *priv = netdev_priv(dev);
  579. - struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
  580. - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  581. - int len;
  582. + struct ltq_dma_desc *desc =
  583. + &priv->txch.dma.desc_base[priv->txch.dma.desc];
  584. unsigned long flags;
  585. u32 byte_offset;
  586. + int len;
  587. len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  588. - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  589. - dev_kfree_skb_any(skb);
  590. + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
  591. + priv->txch.skb[priv->txch.dma.desc]) {
  592. netdev_err(dev, "tx ring full\n");
  593. netif_tx_stop_queue(txq);
  594. return NETDEV_TX_BUSY;
  595. @@ -474,7 +668,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
  596. /* dma needs to start on a 16 byte aligned address */
  597. byte_offset = CPHYSADDR(skb->data) % 16;
  598. - ch->skb[ch->dma.desc] = skb;
  599. + priv->txch.skb[priv->txch.dma.desc] = skb;
  600. netif_trans_update(dev);
  601. @@ -484,11 +678,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
  602. wmb();
  603. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  604. LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
  605. - ch->dma.desc++;
  606. - ch->dma.desc %= LTQ_DESC_NUM;
  607. + priv->txch.dma.desc++;
  608. + priv->txch.dma.desc %= LTQ_DESC_NUM;
  609. spin_unlock_irqrestore(&priv->lock, flags);
  610. - if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
  611. + if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
  612. netif_tx_stop_queue(txq);
  613. return NETDEV_TX_OK;
  614. @@ -499,11 +693,14 @@ ltq_etop_change_mtu(struct net_device *d
  615. {
  616. struct ltq_etop_priv *priv = netdev_priv(dev);
  617. unsigned long flags;
  618. + int max;
  619. dev->mtu = new_mtu;
  620. + max = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
  621. +
  622. spin_lock_irqsave(&priv->lock, flags);
  623. - ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
  624. + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | max, LTQ_ETOP_IGPLEN);
  625. spin_unlock_irqrestore(&priv->lock, flags);
  626. return 0;
  627. @@ -556,6 +753,9 @@ ltq_etop_init(struct net_device *dev)
  628. if (err)
  629. goto err_hw;
  630. ltq_etop_change_mtu(dev, 1500);
  631. + err = ltq_etop_dma_init(dev);
  632. + if (err)
  633. + goto err_hw;
  634. memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
  635. if (!is_valid_ether_addr(mac.sa_data)) {
  636. @@ -573,9 +773,10 @@ ltq_etop_init(struct net_device *dev)
  637. dev->addr_assign_type = NET_ADDR_RANDOM;
  638. ltq_etop_set_multicast_list(dev);
  639. - err = ltq_etop_mdio_init(dev);
  640. - if (err)
  641. - goto err_netdev;
  642. + if (!ltq_etop_mdio_init(dev))
  643. + dev->ethtool_ops = &ltq_etop_ethtool_ops;
  644. + else
  645. + pr_warn("etop: mdio probe failed\n");;
  646. return 0;
  647. err_netdev:
  648. @@ -595,6 +796,9 @@ ltq_etop_tx_timeout(struct net_device *d
  649. err = ltq_etop_hw_init(dev);
  650. if (err)
  651. goto err_hw;
  652. + err = ltq_etop_dma_init(dev);
  653. + if (err)
  654. + goto err_hw;
  655. netif_trans_update(dev);
  656. netif_wake_queue(dev);
  657. return;
  658. @@ -618,14 +822,18 @@ static const struct net_device_ops ltq_e
  659. .ndo_tx_timeout = ltq_etop_tx_timeout,
  660. };
  661. -static int __init
  662. -ltq_etop_probe(struct platform_device *pdev)
  663. +static int ltq_etop_probe(struct platform_device *pdev)
  664. {
  665. struct net_device *dev;
  666. struct ltq_etop_priv *priv;
  667. - struct resource *res;
  668. + struct resource *res, *gbit_res, irqres[2];
  669. int err;
  670. - int i;
  671. +
  672. + err = of_irq_to_resource_table(pdev->dev.of_node, irqres, 2);
  673. + if (err != 2) {
  674. + dev_err(&pdev->dev, "failed to get etop irqs\n");
  675. + return -EINVAL;
  676. + }
  677. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  678. if (!res) {
  679. @@ -651,31 +859,62 @@ ltq_etop_probe(struct platform_device *p
  680. goto err_out;
  681. }
  682. - dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  683. - if (!dev) {
  684. - err = -ENOMEM;
  685. - goto err_out;
  686. + if (of_machine_is_compatible("lantiq,ar9")) {
  687. + gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  688. + if (!gbit_res) {
  689. + dev_err(&pdev->dev, "failed to get gbit resource\n");
  690. + err = -ENOENT;
  691. + goto err_out;
  692. + }
  693. + ltq_gbit_membase = devm_ioremap(&pdev->dev,
  694. + gbit_res->start, resource_size(gbit_res));
  695. + if (!ltq_gbit_membase) {
  696. + dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
  697. + pdev->id);
  698. + err = -ENOMEM;
  699. + goto err_out;
  700. + }
  701. }
  702. +
  703. + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  704. strcpy(dev->name, "eth%d");
  705. dev->netdev_ops = &ltq_eth_netdev_ops;
  706. - dev->ethtool_ops = &ltq_etop_ethtool_ops;
  707. priv = netdev_priv(dev);
  708. priv->res = res;
  709. priv->pdev = pdev;
  710. - priv->pldata = dev_get_platdata(&pdev->dev);
  711. priv->netdev = dev;
  712. + priv->tx_irq = irqres[0].start;
  713. + priv->rx_irq = irqres[1].start;
  714. + err = of_get_phy_mode(pdev->dev.of_node, &priv->mii_mode);
  715. + if (err)
  716. + pr_err("Can't find phy-mode for port\n");
  717. +
  718. + of_get_mac_address(pdev->dev.of_node, priv->mac);
  719. +
  720. + priv->clk_ppe = clk_get(&pdev->dev, NULL);
  721. + if (IS_ERR(priv->clk_ppe))
  722. + return PTR_ERR(priv->clk_ppe);
  723. + if (of_machine_is_compatible("lantiq,ar9")) {
  724. + priv->clk_switch = clk_get(&pdev->dev, "switch");
  725. + if (IS_ERR(priv->clk_switch))
  726. + return PTR_ERR(priv->clk_switch);
  727. + }
  728. + if (of_machine_is_compatible("lantiq,ase")) {
  729. + priv->clk_ephy = clk_get(&pdev->dev, "ephy");
  730. + if (IS_ERR(priv->clk_ephy))
  731. + return PTR_ERR(priv->clk_ephy);
  732. + priv->clk_ephycgu = clk_get(&pdev->dev, "ephycgu");
  733. + if (IS_ERR(priv->clk_ephycgu))
  734. + return PTR_ERR(priv->clk_ephycgu);
  735. + }
  736. +
  737. spin_lock_init(&priv->lock);
  738. SET_NETDEV_DEV(dev, &pdev->dev);
  739. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  740. - if (IS_TX(i))
  741. - netif_napi_add(dev, &priv->ch[i].napi,
  742. - ltq_etop_poll_tx, 8);
  743. - else if (IS_RX(i))
  744. - netif_napi_add(dev, &priv->ch[i].napi,
  745. - ltq_etop_poll_rx, 32);
  746. - priv->ch[i].netdev = dev;
  747. - }
  748. + netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
  749. + netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
  750. + priv->txch.netdev = dev;
  751. + priv->rxch.netdev = dev;
  752. err = register_netdev(dev);
  753. if (err)
  754. @@ -704,31 +943,22 @@ ltq_etop_remove(struct platform_device *
  755. return 0;
  756. }
  757. +static const struct of_device_id ltq_etop_match[] = {
  758. + { .compatible = "lantiq,etop-xway" },
  759. + {},
  760. +};
  761. +MODULE_DEVICE_TABLE(of, ltq_etop_match);
  762. +
  763. static struct platform_driver ltq_mii_driver = {
  764. + .probe = ltq_etop_probe,
  765. .remove = ltq_etop_remove,
  766. .driver = {
  767. .name = "ltq_etop",
  768. + .of_match_table = ltq_etop_match,
  769. },
  770. };
  771. -int __init
  772. -init_ltq_etop(void)
  773. -{
  774. - int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
  775. -
  776. - if (ret)
  777. - pr_err("ltq_etop: Error registering platform driver!");
  778. - return ret;
  779. -}
  780. -
  781. -static void __exit
  782. -exit_ltq_etop(void)
  783. -{
  784. - platform_driver_unregister(&ltq_mii_driver);
  785. -}
  786. -
  787. -module_init(init_ltq_etop);
  788. -module_exit(exit_ltq_etop);
  789. +module_platform_driver(ltq_mii_driver);
  790. MODULE_AUTHOR("John Crispin <[email protected]>");
  791. MODULE_DESCRIPTION("Lantiq SoC ETOP");