ramips_main.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; version 2 of the License
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  14. *
  15. * Copyright (C) 2009 John Crispin <[email protected]>
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/init.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/phy.h>
  27. #include <ramips_eth_platform.h>
  28. #include "ramips_eth.h"
  29. #define TX_TIMEOUT (20 * HZ / 100)
  30. #define MAX_RX_LENGTH 1600
  31. #ifdef CONFIG_RALINK_RT305X
  32. #include "ramips_esw.c"
  33. #else
  34. static inline int rt305x_esw_init(void) { return 0; }
  35. static inline void rt305x_esw_exit(void) { }
  36. #endif
  37. #define phys_to_bus(a) (a & 0x1FFFFFFF)
  38. #ifdef CONFIG_RAMIPS_ETH_DEBUG
  39. #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
  40. #else
  41. #define RADEBUG(fmt, args...) do {} while (0)
  42. #endif
  43. static struct net_device * ramips_dev;
  44. static void __iomem *ramips_fe_base = 0;
  45. static inline void
  46. ramips_fe_wr(u32 val, unsigned reg)
  47. {
  48. __raw_writel(val, ramips_fe_base + reg);
  49. }
  50. static inline u32
  51. ramips_fe_rr(unsigned reg)
  52. {
  53. return __raw_readl(ramips_fe_base + reg);
  54. }
  55. static inline void
  56. ramips_fe_int_disable(u32 mask)
  57. {
  58. ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
  59. RAMIPS_FE_INT_ENABLE);
  60. /* flush write */
  61. ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
  62. }
  63. static inline void
  64. ramips_fe_int_enable(u32 mask)
  65. {
  66. ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
  67. RAMIPS_FE_INT_ENABLE);
  68. /* flush write */
  69. ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
  70. }
  71. static inline void
  72. ramips_hw_set_macaddr(unsigned char *mac)
  73. {
  74. ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
  75. ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  76. RAMIPS_GDMA1_MAC_ADRL);
  77. }
  78. static struct sk_buff *
  79. ramips_alloc_skb(struct raeth_priv *re)
  80. {
  81. struct sk_buff *skb;
  82. skb = netdev_alloc_skb(re->netdev, MAX_RX_LENGTH + NET_IP_ALIGN);
  83. if (!skb)
  84. return NULL;
  85. skb_reserve(skb, NET_IP_ALIGN);
  86. return skb;
  87. }
  88. static void
  89. ramips_ring_setup(struct raeth_priv *re)
  90. {
  91. int len;
  92. int i;
  93. memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));
  94. len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
  95. memset(re->tx, 0, len);
  96. for (i = 0; i < NUM_TX_DESC; i++) {
  97. struct raeth_tx_info *txi;
  98. struct ramips_tx_dma *txd;
  99. txd = &re->tx[i];
  100. txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
  101. txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
  102. txi = &re->tx_info[i];
  103. txi->tx_desc = txd;
  104. if (txi->tx_skb != NULL) {
  105. netdev_warn(re->netdev,
  106. "dirty skb for TX desc %d\n", i);
  107. txi->tx_skb = NULL;
  108. }
  109. }
  110. len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
  111. memset(re->rx, 0, len);
  112. for (i = 0; i < NUM_RX_DESC; i++) {
  113. struct raeth_rx_info *rxi;
  114. struct ramips_rx_dma *rxd;
  115. dma_addr_t dma_addr;
  116. rxd = &re->rx[i];
  117. rxi = &re->rx_info[i];
  118. BUG_ON(rxi->rx_skb == NULL);
  119. dma_addr = dma_map_single(&re->netdev->dev, rxi->rx_skb->data,
  120. MAX_RX_LENGTH, DMA_FROM_DEVICE);
  121. rxi->rx_dma = dma_addr;
  122. rxi->rx_desc = rxd;
  123. rxd->rxd1 = (unsigned int) dma_addr;
  124. rxd->rxd2 = RX_DMA_LSO;
  125. }
  126. /* flush descriptors */
  127. wmb();
  128. }
  129. static void
  130. ramips_ring_cleanup(struct raeth_priv *re)
  131. {
  132. int i;
  133. for (i = 0; i < NUM_RX_DESC; i++) {
  134. struct raeth_rx_info *rxi;
  135. rxi = &re->rx_info[i];
  136. if (rxi->rx_skb)
  137. dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
  138. MAX_RX_LENGTH, DMA_FROM_DEVICE);
  139. }
  140. for (i = 0; i < NUM_TX_DESC; i++) {
  141. struct raeth_tx_info *txi;
  142. txi = &re->tx_info[i];
  143. if (txi->tx_skb) {
  144. dev_kfree_skb_any(txi->tx_skb);
  145. txi->tx_skb = NULL;
  146. }
  147. }
  148. }
  149. #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
  150. #define RAMIPS_MDIO_RETRY 1000
  151. static unsigned char *ramips_speed_str(struct raeth_priv *re)
  152. {
  153. switch (re->speed) {
  154. case SPEED_1000:
  155. return "1000";
  156. case SPEED_100:
  157. return "100";
  158. case SPEED_10:
  159. return "10";
  160. }
  161. return "?";
  162. }
  163. static void ramips_link_adjust(struct raeth_priv *re)
  164. {
  165. struct ramips_eth_platform_data *pdata;
  166. u32 mdio_cfg;
  167. pdata = re->parent->platform_data;
  168. if (!re->link) {
  169. netif_carrier_off(re->netdev);
  170. netdev_info(re->netdev, "link down\n");
  171. return;
  172. }
  173. mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
  174. RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
  175. RAMIPS_MDIO_CFG_GP1_FRC_EN;
  176. if (re->duplex == DUPLEX_FULL)
  177. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
  178. if (re->tx_fc)
  179. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
  180. if (re->rx_fc)
  181. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
  182. switch (re->speed) {
  183. case SPEED_10:
  184. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
  185. break;
  186. case SPEED_100:
  187. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
  188. break;
  189. case SPEED_1000:
  190. mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
  191. break;
  192. default:
  193. BUG();
  194. }
  195. ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
  196. netif_carrier_on(re->netdev);
  197. netdev_info(re->netdev, "link up (%sMbps/%s duplex)\n",
  198. ramips_speed_str(re),
  199. (DUPLEX_FULL == re->duplex) ? "Full" : "Half");
  200. }
  201. static int
  202. ramips_mdio_wait_ready(struct raeth_priv *re)
  203. {
  204. int retries;
  205. retries = RAMIPS_MDIO_RETRY;
  206. while (1) {
  207. u32 t;
  208. t = ramips_fe_rr(RAMIPS_MDIO_ACCESS);
  209. if ((t & (0x1 << 31)) == 0)
  210. return 0;
  211. if (retries-- == 0)
  212. break;
  213. udelay(1);
  214. }
  215. dev_err(re->parent, "MDIO operation timed out\n");
  216. return -ETIMEDOUT;
  217. }
  218. static int
  219. ramips_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  220. {
  221. struct raeth_priv *re = bus->priv;
  222. int err;
  223. u32 t;
  224. err = ramips_mdio_wait_ready(re);
  225. if (err)
  226. return 0xffff;
  227. t = (phy_addr << 24) | (phy_reg << 16);
  228. ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
  229. t |= (1 << 31);
  230. ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
  231. err = ramips_mdio_wait_ready(re);
  232. if (err)
  233. return 0xffff;
  234. RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
  235. phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
  236. return ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff;
  237. }
  238. static int
  239. ramips_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
  240. {
  241. struct raeth_priv *re = bus->priv;
  242. int err;
  243. u32 t;
  244. RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
  245. phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
  246. err = ramips_mdio_wait_ready(re);
  247. if (err)
  248. return err;
  249. t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val;
  250. ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
  251. t |= (1 << 31);
  252. ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
  253. return ramips_mdio_wait_ready(re);
  254. }
  255. static int
  256. ramips_mdio_reset(struct mii_bus *bus)
  257. {
  258. /* TODO */
  259. return 0;
  260. }
  261. static int
  262. ramips_mdio_init(struct raeth_priv *re)
  263. {
  264. int err;
  265. int i;
  266. re->mii_bus = mdiobus_alloc();
  267. if (re->mii_bus == NULL)
  268. return -ENOMEM;
  269. re->mii_bus->name = "ramips_mdio";
  270. re->mii_bus->read = ramips_mdio_read;
  271. re->mii_bus->write = ramips_mdio_write;
  272. re->mii_bus->reset = ramips_mdio_reset;
  273. re->mii_bus->irq = re->mii_irq;
  274. re->mii_bus->priv = re;
  275. re->mii_bus->parent = re->parent;
  276. snprintf(re->mii_bus->id, MII_BUS_ID_SIZE, "%s", "ramips_mdio");
  277. re->mii_bus->phy_mask = 0;
  278. for (i = 0; i < PHY_MAX_ADDR; i++)
  279. re->mii_irq[i] = PHY_POLL;
  280. err = mdiobus_register(re->mii_bus);
  281. if (err)
  282. goto err_free_bus;
  283. return 0;
  284. err_free_bus:
  285. kfree(re->mii_bus);
  286. return err;
  287. }
  288. static void
  289. ramips_mdio_cleanup(struct raeth_priv *re)
  290. {
  291. mdiobus_unregister(re->mii_bus);
  292. kfree(re->mii_bus);
  293. }
  294. static void
  295. ramips_phy_link_adjust(struct net_device *dev)
  296. {
  297. struct raeth_priv *re = netdev_priv(dev);
  298. struct phy_device *phydev = re->phy_dev;
  299. unsigned long flags;
  300. int status_change = 0;
  301. spin_lock_irqsave(&re->phy_lock, flags);
  302. if (phydev->link)
  303. if (re->duplex != phydev->duplex ||
  304. re->speed != phydev->speed)
  305. status_change = 1;
  306. if (phydev->link != re->link)
  307. status_change = 1;
  308. re->link = phydev->link;
  309. re->duplex = phydev->duplex;
  310. re->speed = phydev->speed;
  311. if (status_change)
  312. ramips_link_adjust(re);
  313. spin_unlock_irqrestore(&re->phy_lock, flags);
  314. }
  315. static int
  316. ramips_phy_connect_multi(struct raeth_priv *re)
  317. {
  318. struct net_device *netdev = re->netdev;
  319. struct ramips_eth_platform_data *pdata;
  320. struct phy_device *phydev = NULL;
  321. int phy_addr;
  322. int ret = 0;
  323. pdata = re->parent->platform_data;
  324. for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
  325. if (!(pdata->phy_mask & (1 << phy_addr)))
  326. continue;
  327. if (re->mii_bus->phy_map[phy_addr] == NULL)
  328. continue;
  329. RADEBUG("%s: PHY found at %s, uid=%08x\n",
  330. netdev->name,
  331. dev_name(&re->mii_bus->phy_map[phy_addr]->dev),
  332. re->mii_bus->phy_map[phy_addr]->phy_id);
  333. if (phydev == NULL)
  334. phydev = re->mii_bus->phy_map[phy_addr];
  335. }
  336. if (!phydev) {
  337. netdev_err(netdev, "no PHY found with phy_mask=%08x\n",
  338. pdata->phy_mask);
  339. return -ENODEV;
  340. }
  341. re->phy_dev = phy_connect(netdev, dev_name(&phydev->dev),
  342. ramips_phy_link_adjust, 0,
  343. pdata->phy_if_mode);
  344. if (IS_ERR(re->phy_dev)) {
  345. netdev_err(netdev, "could not connect to PHY at %s\n",
  346. dev_name(&phydev->dev));
  347. return PTR_ERR(re->phy_dev);
  348. }
  349. phydev->supported &= PHY_GBIT_FEATURES;
  350. phydev->advertising = phydev->supported;
  351. RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
  352. netdev->name, dev_name(&phydev->dev),
  353. phydev->phy_id, phydev->drv->name);
  354. re->link = 0;
  355. re->speed = 0;
  356. re->duplex = -1;
  357. re->rx_fc = 0;
  358. re->tx_fc = 0;
  359. return ret;
  360. }
  361. static int
  362. ramips_phy_connect_fixed(struct raeth_priv *re)
  363. {
  364. struct ramips_eth_platform_data *pdata;
  365. pdata = re->parent->platform_data;
  366. switch (pdata->speed) {
  367. case SPEED_10:
  368. case SPEED_100:
  369. case SPEED_1000:
  370. break;
  371. default:
  372. netdev_err(re->netdev, "invalid speed specified\n");
  373. return -EINVAL;
  374. }
  375. RADEBUG("%s: using fixed link parameters\n", re->netdev->name);
  376. re->speed = pdata->speed;
  377. re->duplex = pdata->duplex;
  378. re->tx_fc = pdata->tx_fc;
  379. re->rx_fc = pdata->tx_fc;
  380. return 0;
  381. }
  382. static int
  383. ramips_phy_connect(struct raeth_priv *re)
  384. {
  385. struct ramips_eth_platform_data *pdata;
  386. pdata = re->parent->platform_data;
  387. if (pdata->phy_mask)
  388. return ramips_phy_connect_multi(re);
  389. return ramips_phy_connect_fixed(re);
  390. }
  391. static void
  392. ramips_phy_disconnect(struct raeth_priv *re)
  393. {
  394. if (re->phy_dev)
  395. phy_disconnect(re->phy_dev);
  396. }
  397. static void
  398. ramips_phy_start(struct raeth_priv *re)
  399. {
  400. unsigned long flags;
  401. if (re->phy_dev) {
  402. phy_start(re->phy_dev);
  403. } else {
  404. spin_lock_irqsave(&re->phy_lock, flags);
  405. re->link = 1;
  406. ramips_link_adjust(re);
  407. spin_unlock_irqrestore(&re->phy_lock, flags);
  408. }
  409. }
  410. static void
  411. ramips_phy_stop(struct raeth_priv *re)
  412. {
  413. unsigned long flags;
  414. if (re->phy_dev)
  415. phy_stop(re->phy_dev);
  416. spin_lock_irqsave(&re->phy_lock, flags);
  417. re->link = 0;
  418. ramips_link_adjust(re);
  419. spin_unlock_irqrestore(&re->phy_lock, flags);
  420. }
  421. #else
  422. static inline int
  423. ramips_mdio_init(struct raeth_priv *re)
  424. {
  425. return 0;
  426. }
  427. static inline void
  428. ramips_mdio_cleanup(struct raeth_priv *re)
  429. {
  430. }
  431. static inline int
  432. ramips_phy_connect(struct raeth_priv *re)
  433. {
  434. return 0;
  435. }
  436. static inline void
  437. ramips_phy_disconnect(struct raeth_priv *re)
  438. {
  439. }
  440. static inline void
  441. ramips_phy_start(struct raeth_priv *re)
  442. {
  443. }
  444. static inline void
  445. ramips_phy_stop(struct raeth_priv *re)
  446. {
  447. }
  448. #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
  449. static void
  450. ramips_ring_free(struct raeth_priv *re)
  451. {
  452. int len;
  453. int i;
  454. if (re->rx_info) {
  455. for (i = 0; i < NUM_RX_DESC; i++) {
  456. struct raeth_rx_info *rxi;
  457. rxi = &re->rx_info[i];
  458. if (rxi->rx_skb)
  459. dev_kfree_skb_any(rxi->rx_skb);
  460. }
  461. kfree(re->rx_info);
  462. }
  463. if (re->rx) {
  464. len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
  465. dma_free_coherent(&re->netdev->dev, len, re->rx,
  466. re->rx_desc_dma);
  467. }
  468. if (re->tx) {
  469. len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
  470. dma_free_coherent(&re->netdev->dev, len, re->tx,
  471. re->tx_desc_dma);
  472. }
  473. kfree(re->tx_info);
  474. }
  475. static int
  476. ramips_ring_alloc(struct raeth_priv *re)
  477. {
  478. int len;
  479. int err = -ENOMEM;
  480. int i;
  481. re->tx_info = kzalloc(NUM_TX_DESC * sizeof(struct raeth_tx_info),
  482. GFP_ATOMIC);
  483. if (!re->tx_info)
  484. goto err_cleanup;
  485. re->rx_info = kzalloc(NUM_RX_DESC * sizeof(struct raeth_rx_info),
  486. GFP_ATOMIC);
  487. if (!re->rx_info)
  488. goto err_cleanup;
  489. /* allocate tx ring */
  490. len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
  491. re->tx = dma_alloc_coherent(&re->netdev->dev, len,
  492. &re->tx_desc_dma, GFP_ATOMIC);
  493. if (!re->tx)
  494. goto err_cleanup;
  495. /* allocate rx ring */
  496. len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
  497. re->rx = dma_alloc_coherent(&re->netdev->dev, len,
  498. &re->rx_desc_dma, GFP_ATOMIC);
  499. if (!re->rx)
  500. goto err_cleanup;
  501. for (i = 0; i < NUM_RX_DESC; i++) {
  502. struct sk_buff *skb;
  503. skb = ramips_alloc_skb(re);
  504. if (!skb)
  505. goto err_cleanup;
  506. re->rx_info[i].rx_skb = skb;
  507. }
  508. return 0;
  509. err_cleanup:
  510. ramips_ring_free(re);
  511. return err;
  512. }
  513. static void
  514. ramips_setup_dma(struct raeth_priv *re)
  515. {
  516. ramips_fe_wr(re->tx_desc_dma, RAMIPS_TX_BASE_PTR0);
  517. ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
  518. ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
  519. ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
  520. ramips_fe_wr(re->rx_desc_dma, RAMIPS_RX_BASE_PTR0);
  521. ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
  522. ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
  523. ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
  524. }
  525. static int
  526. ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  527. {
  528. struct raeth_priv *re = netdev_priv(dev);
  529. struct raeth_tx_info *txi, *txi_next;
  530. struct ramips_tx_dma *txd, *txd_next;
  531. unsigned long tx;
  532. unsigned int tx_next;
  533. dma_addr_t mapped_addr;
  534. if (re->plat->min_pkt_len) {
  535. if (skb->len < re->plat->min_pkt_len) {
  536. if (skb_padto(skb, re->plat->min_pkt_len)) {
  537. printk(KERN_ERR
  538. "ramips_eth: skb_padto failed\n");
  539. kfree_skb(skb);
  540. return 0;
  541. }
  542. skb_put(skb, re->plat->min_pkt_len - skb->len);
  543. }
  544. }
  545. dev->trans_start = jiffies;
  546. mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
  547. DMA_TO_DEVICE);
  548. spin_lock(&re->page_lock);
  549. tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
  550. tx_next = (tx + 1) % NUM_TX_DESC;
  551. txi = &re->tx_info[tx];
  552. txd = txi->tx_desc;
  553. txi_next = &re->tx_info[tx_next];
  554. txd_next = txi_next->tx_desc;
  555. if ((txi->tx_skb) || (txi_next->tx_skb) ||
  556. !(txd->txd2 & TX_DMA_DONE) ||
  557. !(txd_next->txd2 & TX_DMA_DONE))
  558. goto out;
  559. txi->tx_skb = skb;
  560. txd->txd1 = (unsigned int) mapped_addr;
  561. wmb();
  562. txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
  563. dev->stats.tx_packets++;
  564. dev->stats.tx_bytes += skb->len;
  565. ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
  566. spin_unlock(&re->page_lock);
  567. return NETDEV_TX_OK;
  568. out:
  569. spin_unlock(&re->page_lock);
  570. dev->stats.tx_dropped++;
  571. kfree_skb(skb);
  572. return NETDEV_TX_OK;
  573. }
  574. static void
  575. ramips_eth_rx_hw(unsigned long ptr)
  576. {
  577. struct net_device *dev = (struct net_device *) ptr;
  578. struct raeth_priv *re = netdev_priv(dev);
  579. int rx;
  580. int max_rx = 16;
  581. rx = ramips_fe_rr(RAMIPS_RX_CALC_IDX0);
  582. while (max_rx) {
  583. struct raeth_rx_info *rxi;
  584. struct ramips_rx_dma *rxd;
  585. struct sk_buff *rx_skb, *new_skb;
  586. int pktlen;
  587. rx = (rx + 1) % NUM_RX_DESC;
  588. rxi = &re->rx_info[rx];
  589. rxd = rxi->rx_desc;
  590. if (!(rxd->rxd2 & RX_DMA_DONE))
  591. break;
  592. rx_skb = rxi->rx_skb;
  593. pktlen = RX_DMA_PLEN0(rxd->rxd2);
  594. new_skb = ramips_alloc_skb(re);
  595. /* Reuse the buffer on allocation failures */
  596. if (new_skb) {
  597. dma_addr_t dma_addr;
  598. dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
  599. MAX_RX_LENGTH, DMA_FROM_DEVICE);
  600. skb_put(rx_skb, pktlen);
  601. rx_skb->dev = dev;
  602. rx_skb->protocol = eth_type_trans(rx_skb, dev);
  603. rx_skb->ip_summed = CHECKSUM_NONE;
  604. dev->stats.rx_packets++;
  605. dev->stats.rx_bytes += pktlen;
  606. netif_rx(rx_skb);
  607. rxi->rx_skb = new_skb;
  608. dma_addr = dma_map_single(&re->netdev->dev,
  609. new_skb->data,
  610. MAX_RX_LENGTH,
  611. DMA_FROM_DEVICE);
  612. rxi->rx_dma = dma_addr;
  613. rxd->rxd1 = (unsigned int) dma_addr;
  614. wmb();
  615. } else {
  616. dev->stats.rx_dropped++;
  617. }
  618. rxd->rxd2 = RX_DMA_LSO;
  619. ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
  620. max_rx--;
  621. }
  622. if (max_rx == 0)
  623. tasklet_schedule(&re->rx_tasklet);
  624. else
  625. ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
  626. }
  627. static void
  628. ramips_eth_tx_housekeeping(unsigned long ptr)
  629. {
  630. struct net_device *dev = (struct net_device*)ptr;
  631. struct raeth_priv *re = netdev_priv(dev);
  632. spin_lock(&re->page_lock);
  633. while (1) {
  634. struct raeth_tx_info *txi;
  635. struct ramips_tx_dma *txd;
  636. txi = &re->tx_info[re->skb_free_idx];
  637. txd = txi->tx_desc;
  638. if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
  639. break;
  640. dev_kfree_skb_irq(txi->tx_skb);
  641. txi->tx_skb = NULL;
  642. re->skb_free_idx++;
  643. if (re->skb_free_idx >= NUM_TX_DESC)
  644. re->skb_free_idx = 0;
  645. }
  646. spin_unlock(&re->page_lock);
  647. ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
  648. }
  649. static void
  650. ramips_eth_timeout(struct net_device *dev)
  651. {
  652. struct raeth_priv *re = netdev_priv(dev);
  653. tasklet_schedule(&re->tx_housekeeping_tasklet);
  654. }
  655. static irqreturn_t
  656. ramips_eth_irq(int irq, void *dev)
  657. {
  658. struct raeth_priv *re = netdev_priv(dev);
  659. unsigned int status;
  660. status = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
  661. status &= ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
  662. if (!status)
  663. return IRQ_NONE;
  664. ramips_fe_wr(status, RAMIPS_FE_INT_STATUS);
  665. if (status & RAMIPS_RX_DLY_INT) {
  666. ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
  667. tasklet_schedule(&re->rx_tasklet);
  668. }
  669. if (status & RAMIPS_TX_DLY_INT) {
  670. ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
  671. tasklet_schedule(&re->tx_housekeeping_tasklet);
  672. }
  673. raeth_debugfs_update_int_stats(re, status);
  674. return IRQ_HANDLED;
  675. }
  676. static int
  677. ramips_eth_open(struct net_device *dev)
  678. {
  679. struct raeth_priv *re = netdev_priv(dev);
  680. int err;
  681. err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
  682. dev->name, dev);
  683. if (err)
  684. return err;
  685. err = ramips_ring_alloc(re);
  686. if (err)
  687. goto err_free_irq;
  688. ramips_ring_setup(re);
  689. ramips_hw_set_macaddr(dev->dev_addr);
  690. ramips_setup_dma(re);
  691. ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
  692. (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
  693. RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
  694. RAMIPS_PDMA_GLO_CFG);
  695. ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
  696. ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
  697. ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
  698. RAMIPS_FE_GLO_CFG);
  699. tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
  700. (unsigned long)dev);
  701. tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
  702. ramips_phy_start(re);
  703. ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
  704. ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
  705. ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
  706. ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
  707. RAMIPS_GDMA1_FWD_CFG);
  708. ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
  709. ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
  710. RAMIPS_CDMA_CSG_CFG);
  711. ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
  712. ramips_fe_wr(1, RAMIPS_FE_RST_GL);
  713. ramips_fe_wr(0, RAMIPS_FE_RST_GL);
  714. netif_start_queue(dev);
  715. return 0;
  716. err_free_irq:
  717. free_irq(dev->irq, dev);
  718. return err;
  719. }
  720. static int
  721. ramips_eth_stop(struct net_device *dev)
  722. {
  723. struct raeth_priv *re = netdev_priv(dev);
  724. ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
  725. ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
  726. RAMIPS_PDMA_GLO_CFG);
  727. /* disable all interrupts in the hw */
  728. ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
  729. ramips_phy_stop(re);
  730. free_irq(dev->irq, dev);
  731. netif_stop_queue(dev);
  732. tasklet_kill(&re->tx_housekeeping_tasklet);
  733. tasklet_kill(&re->rx_tasklet);
  734. ramips_ring_cleanup(re);
  735. ramips_ring_free(re);
  736. RADEBUG("ramips_eth: stopped\n");
  737. return 0;
  738. }
  739. static int __init
  740. ramips_eth_probe(struct net_device *dev)
  741. {
  742. struct raeth_priv *re = netdev_priv(dev);
  743. int err;
  744. BUG_ON(!re->plat->reset_fe);
  745. re->plat->reset_fe();
  746. net_srandom(jiffies);
  747. memcpy(dev->dev_addr, re->plat->mac, ETH_ALEN);
  748. ether_setup(dev);
  749. dev->mtu = 1500;
  750. dev->watchdog_timeo = TX_TIMEOUT;
  751. spin_lock_init(&re->page_lock);
  752. spin_lock_init(&re->phy_lock);
  753. err = ramips_mdio_init(re);
  754. if (err)
  755. return err;
  756. err = ramips_phy_connect(re);
  757. if (err)
  758. goto err_mdio_cleanup;
  759. err = raeth_debugfs_init(re);
  760. if (err)
  761. goto err_phy_disconnect;
  762. return 0;
  763. err_phy_disconnect:
  764. ramips_phy_disconnect(re);
  765. err_mdio_cleanup:
  766. ramips_mdio_cleanup(re);
  767. return err;
  768. }
  769. static void
  770. ramips_eth_uninit(struct net_device *dev)
  771. {
  772. struct raeth_priv *re = netdev_priv(dev);
  773. raeth_debugfs_exit(re);
  774. ramips_phy_disconnect(re);
  775. ramips_mdio_cleanup(re);
  776. }
  777. static const struct net_device_ops ramips_eth_netdev_ops = {
  778. .ndo_init = ramips_eth_probe,
  779. .ndo_uninit = ramips_eth_uninit,
  780. .ndo_open = ramips_eth_open,
  781. .ndo_stop = ramips_eth_stop,
  782. .ndo_start_xmit = ramips_eth_hard_start_xmit,
  783. .ndo_tx_timeout = ramips_eth_timeout,
  784. .ndo_change_mtu = eth_change_mtu,
  785. .ndo_set_mac_address = eth_mac_addr,
  786. .ndo_validate_addr = eth_validate_addr,
  787. };
  788. static int
  789. ramips_eth_plat_probe(struct platform_device *plat)
  790. {
  791. struct raeth_priv *re;
  792. struct ramips_eth_platform_data *data = plat->dev.platform_data;
  793. struct resource *res;
  794. int err;
  795. if (!data) {
  796. dev_err(&plat->dev, "no platform data specified\n");
  797. return -EINVAL;
  798. }
  799. res = platform_get_resource(plat, IORESOURCE_MEM, 0);
  800. if (!res) {
  801. dev_err(&plat->dev, "no memory resource found\n");
  802. return -ENXIO;
  803. }
  804. ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
  805. if (!ramips_fe_base)
  806. return -ENOMEM;
  807. ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
  808. if (!ramips_dev) {
  809. dev_err(&plat->dev, "alloc_etherdev failed\n");
  810. err = -ENOMEM;
  811. goto err_unmap;
  812. }
  813. strcpy(ramips_dev->name, "eth%d");
  814. ramips_dev->irq = platform_get_irq(plat, 0);
  815. if (ramips_dev->irq < 0) {
  816. dev_err(&plat->dev, "no IRQ resource found\n");
  817. err = -ENXIO;
  818. goto err_free_dev;
  819. }
  820. ramips_dev->addr_len = ETH_ALEN;
  821. ramips_dev->base_addr = (unsigned long)ramips_fe_base;
  822. ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
  823. re = netdev_priv(ramips_dev);
  824. re->netdev = ramips_dev;
  825. re->parent = &plat->dev;
  826. re->speed = data->speed;
  827. re->duplex = data->duplex;
  828. re->rx_fc = data->rx_fc;
  829. re->tx_fc = data->tx_fc;
  830. re->plat = data;
  831. err = register_netdev(ramips_dev);
  832. if (err) {
  833. dev_err(&plat->dev, "error bringing up device\n");
  834. goto err_free_dev;
  835. }
  836. RADEBUG("ramips_eth: loaded\n");
  837. return 0;
  838. err_free_dev:
  839. kfree(ramips_dev);
  840. err_unmap:
  841. iounmap(ramips_fe_base);
  842. return err;
  843. }
  844. static int
  845. ramips_eth_plat_remove(struct platform_device *plat)
  846. {
  847. unregister_netdev(ramips_dev);
  848. free_netdev(ramips_dev);
  849. RADEBUG("ramips_eth: unloaded\n");
  850. return 0;
  851. }
  852. static struct platform_driver ramips_eth_driver = {
  853. .probe = ramips_eth_plat_probe,
  854. .remove = ramips_eth_plat_remove,
  855. .driver = {
  856. .name = "ramips_eth",
  857. .owner = THIS_MODULE,
  858. },
  859. };
  860. static int __init
  861. ramips_eth_init(void)
  862. {
  863. int ret;
  864. ret = raeth_debugfs_root_init();
  865. if (ret)
  866. goto err_out;
  867. ret = rt305x_esw_init();
  868. if (ret)
  869. goto err_debugfs_exit;
  870. ret = platform_driver_register(&ramips_eth_driver);
  871. if (ret) {
  872. printk(KERN_ERR
  873. "ramips_eth: Error registering platfom driver!\n");
  874. goto esw_cleanup;
  875. }
  876. return 0;
  877. esw_cleanup:
  878. rt305x_esw_exit();
  879. err_debugfs_exit:
  880. raeth_debugfs_root_exit();
  881. err_out:
  882. return ret;
  883. }
  884. static void __exit
  885. ramips_eth_cleanup(void)
  886. {
  887. platform_driver_unregister(&ramips_eth_driver);
  888. rt305x_esw_exit();
  889. raeth_debugfs_root_exit();
  890. }
  891. module_init(ramips_eth_init);
  892. module_exit(ramips_eth_cleanup);
  893. MODULE_LICENSE("GPL");
  894. MODULE_AUTHOR("John Crispin <[email protected]>");
  895. MODULE_DESCRIPTION("ethernet driver for ramips boards");