ar40xx.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all copies.
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  10. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  12. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  13. * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/bitfield.h>
  16. #include <linux/module.h>
  17. #include <linux/list.h>
  18. #include <linux/bitops.h>
  19. #include <linux/switch.h>
  20. #include <linux/delay.h>
  21. #include <linux/phy.h>
  22. #include <linux/clk.h>
  23. #include <linux/reset.h>
  24. #include <linux/lockdep.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_address.h>
  28. #include <linux/of_mdio.h>
  29. #include <linux/mdio.h>
  30. #include <linux/gpio.h>
  31. #include "ar40xx.h"
  32. static struct ar40xx_priv *ar40xx_priv;
  33. #define MIB_DESC(_s , _o, _n) \
  34. { \
  35. .size = (_s), \
  36. .offset = (_o), \
  37. .name = (_n), \
  38. }
  39. static const struct ar40xx_mib_desc ar40xx_mibs[] = {
  40. MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
  41. MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
  42. MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
  43. MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
  44. MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
  45. MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
  46. MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
  47. MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
  48. MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
  49. MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
  50. MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
  51. MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
  52. MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
  53. MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
  54. MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
  55. MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
  56. MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
  57. MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
  58. MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
  59. MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
  60. MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
  61. MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
  62. MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
  63. MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
  64. MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
  65. MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
  66. MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
  67. MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
  68. MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
  69. MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
  70. MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
  71. MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
  72. MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
  73. MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
  74. MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
  75. MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
  76. MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
  77. MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
  78. MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
  79. };
  80. static u32
  81. ar40xx_read(struct ar40xx_priv *priv, int reg)
  82. {
  83. return readl(priv->hw_addr + reg);
  84. }
  85. static u32
  86. ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
  87. {
  88. return readl(priv->psgmii_hw_addr + reg);
  89. }
  90. static void
  91. ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
  92. {
  93. writel(val, priv->hw_addr + reg);
  94. }
  95. static u32
  96. ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
  97. {
  98. u32 ret;
  99. ret = ar40xx_read(priv, reg);
  100. ret &= ~mask;
  101. ret |= val;
  102. ar40xx_write(priv, reg, ret);
  103. return ret;
  104. }
  105. static void
  106. ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
  107. {
  108. writel(val, priv->psgmii_hw_addr + reg);
  109. }
  110. static void
  111. ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
  112. u16 dbg_addr, u16 dbg_data)
  113. {
  114. struct mii_bus *bus = priv->mii_bus;
  115. mutex_lock(&bus->mdio_lock);
  116. bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
  117. bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
  118. mutex_unlock(&bus->mdio_lock);
  119. }
  120. static void
  121. ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
  122. u16 dbg_addr, u16 *dbg_data)
  123. {
  124. struct mii_bus *bus = priv->mii_bus;
  125. mutex_lock(&bus->mdio_lock);
  126. bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
  127. *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
  128. mutex_unlock(&bus->mdio_lock);
  129. }
  130. static void
  131. ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
  132. u16 mmd_num, u16 reg_id, u16 reg_val)
  133. {
  134. struct mii_bus *bus = priv->mii_bus;
  135. mutex_lock(&bus->mdio_lock);
  136. bus->write(bus, phy_id,
  137. AR40XX_MII_ATH_MMD_ADDR, mmd_num);
  138. bus->write(bus, phy_id,
  139. AR40XX_MII_ATH_MMD_DATA, reg_id);
  140. bus->write(bus, phy_id,
  141. AR40XX_MII_ATH_MMD_ADDR,
  142. 0x4000 | mmd_num);
  143. bus->write(bus, phy_id,
  144. AR40XX_MII_ATH_MMD_DATA, reg_val);
  145. mutex_unlock(&bus->mdio_lock);
  146. }
  147. static u16
  148. ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
  149. u16 mmd_num, u16 reg_id)
  150. {
  151. u16 value;
  152. struct mii_bus *bus = priv->mii_bus;
  153. mutex_lock(&bus->mdio_lock);
  154. bus->write(bus, phy_id,
  155. AR40XX_MII_ATH_MMD_ADDR, mmd_num);
  156. bus->write(bus, phy_id,
  157. AR40XX_MII_ATH_MMD_DATA, reg_id);
  158. bus->write(bus, phy_id,
  159. AR40XX_MII_ATH_MMD_ADDR,
  160. 0x4000 | mmd_num);
  161. value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
  162. mutex_unlock(&bus->mdio_lock);
  163. return value;
  164. }
  165. /* Start of swconfig support */
  166. static void
  167. ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
  168. {
  169. u32 i, in_reset, retries = 500;
  170. struct mii_bus *bus = priv->mii_bus;
  171. /* Assume RESET was recently issued to some or all of the phys */
  172. in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
  173. while (retries--) {
  174. /* 1ms should be plenty of time.
  175. * 802.3 spec allows for a max wait time of 500ms
  176. */
  177. usleep_range(1000, 2000);
  178. for (i = 0; i < AR40XX_NUM_PHYS; i++) {
  179. int val;
  180. /* skip devices which have completed reset */
  181. if (!(in_reset & BIT(i)))
  182. continue;
  183. val = mdiobus_read(bus, i, MII_BMCR);
  184. if (val < 0)
  185. continue;
  186. /* mark when phy is no longer in reset state */
  187. if (!(val & BMCR_RESET))
  188. in_reset &= ~BIT(i);
  189. }
  190. if (!in_reset)
  191. return;
  192. }
  193. dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
  194. in_reset);
  195. }
  196. static void
  197. ar40xx_phy_init(struct ar40xx_priv *priv)
  198. {
  199. int i;
  200. struct mii_bus *bus;
  201. u16 val;
  202. bus = priv->mii_bus;
  203. for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
  204. ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
  205. val &= ~AR40XX_PHY_MANU_CTRL_EN;
  206. ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
  207. mdiobus_write(bus, i,
  208. MII_ADVERTISE, ADVERTISE_ALL |
  209. ADVERTISE_PAUSE_CAP |
  210. ADVERTISE_PAUSE_ASYM);
  211. mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
  212. mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
  213. }
  214. ar40xx_phy_poll_reset(priv);
  215. }
  216. static void
  217. ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
  218. {
  219. struct mii_bus *bus;
  220. int i;
  221. u16 val;
  222. bus = priv->mii_bus;
  223. for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
  224. mdiobus_write(bus, i, MII_CTRL1000, 0);
  225. mdiobus_write(bus, i, MII_ADVERTISE, 0);
  226. mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
  227. ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
  228. val |= AR40XX_PHY_MANU_CTRL_EN;
  229. ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
  230. /* disable transmit */
  231. ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
  232. val &= 0xf00f;
  233. ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
  234. }
  235. }
  236. static void
  237. ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
  238. {
  239. int port;
  240. /* reset all mirror registers */
  241. ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
  242. AR40XX_FWD_CTRL0_MIRROR_PORT,
  243. (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
  244. for (port = 0; port < AR40XX_NUM_PORTS; port++) {
  245. ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
  246. AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
  247. ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
  248. AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
  249. }
  250. /* now enable mirroring if necessary */
  251. if (priv->source_port >= AR40XX_NUM_PORTS ||
  252. priv->monitor_port >= AR40XX_NUM_PORTS ||
  253. priv->source_port == priv->monitor_port) {
  254. return;
  255. }
  256. ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
  257. AR40XX_FWD_CTRL0_MIRROR_PORT,
  258. (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
  259. if (priv->mirror_rx)
  260. ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
  261. AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
  262. if (priv->mirror_tx)
  263. ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
  264. 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
  265. }
  266. static int
  267. ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
  268. {
  269. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  270. u8 ports = priv->vlan_table[val->port_vlan];
  271. int i;
  272. val->len = 0;
  273. for (i = 0; i < dev->ports; i++) {
  274. struct switch_port *p;
  275. if (!(ports & BIT(i)))
  276. continue;
  277. p = &val->value.ports[val->len++];
  278. p->id = i;
  279. if ((priv->vlan_tagged & BIT(i)) ||
  280. (priv->pvid[i] != val->port_vlan))
  281. p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
  282. else
  283. p->flags = 0;
  284. }
  285. return 0;
  286. }
  287. static int
  288. ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
  289. {
  290. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  291. u8 *vt = &priv->vlan_table[val->port_vlan];
  292. int i;
  293. *vt = 0;
  294. for (i = 0; i < val->len; i++) {
  295. struct switch_port *p = &val->value.ports[i];
  296. if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
  297. if (val->port_vlan == priv->pvid[p->id])
  298. priv->vlan_tagged |= BIT(p->id);
  299. } else {
  300. priv->vlan_tagged &= ~BIT(p->id);
  301. priv->pvid[p->id] = val->port_vlan;
  302. }
  303. *vt |= BIT(p->id);
  304. }
  305. return 0;
  306. }
  307. static int
  308. ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
  309. unsigned timeout)
  310. {
  311. int i;
  312. for (i = 0; i < timeout; i++) {
  313. u32 t;
  314. t = ar40xx_read(priv, reg);
  315. if ((t & mask) == val)
  316. return 0;
  317. usleep_range(1000, 2000);
  318. }
  319. return -ETIMEDOUT;
  320. }
  321. static int
  322. ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
  323. {
  324. int ret;
  325. lockdep_assert_held(&priv->mib_lock);
  326. /* Capture the hardware statistics for all ports */
  327. ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
  328. AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
  329. /* Wait for the capturing to complete. */
  330. ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
  331. AR40XX_MIB_BUSY, 0, 10);
  332. return ret;
  333. }
  334. static void
  335. ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
  336. {
  337. unsigned int base;
  338. u64 *mib_stats;
  339. int i;
  340. u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
  341. WARN_ON(port >= priv->dev.ports);
  342. lockdep_assert_held(&priv->mib_lock);
  343. base = AR40XX_REG_PORT_STATS_START +
  344. AR40XX_REG_PORT_STATS_LEN * port;
  345. mib_stats = &priv->mib_stats[port * num_mibs];
  346. if (flush) {
  347. u32 len;
  348. len = num_mibs * sizeof(*mib_stats);
  349. memset(mib_stats, 0, len);
  350. return;
  351. }
  352. for (i = 0; i < num_mibs; i++) {
  353. const struct ar40xx_mib_desc *mib;
  354. u64 t;
  355. mib = &ar40xx_mibs[i];
  356. t = ar40xx_read(priv, base + mib->offset);
  357. if (mib->size == 2) {
  358. u64 hi;
  359. hi = ar40xx_read(priv, base + mib->offset + 4);
  360. t |= hi << 32;
  361. }
  362. mib_stats[i] += t;
  363. }
  364. }
  365. static int
  366. ar40xx_mib_capture(struct ar40xx_priv *priv)
  367. {
  368. return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
  369. }
  370. static int
  371. ar40xx_mib_flush(struct ar40xx_priv *priv)
  372. {
  373. return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
  374. }
  375. static int
  376. ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
  377. const struct switch_attr *attr,
  378. struct switch_val *val)
  379. {
  380. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  381. unsigned int len;
  382. int ret;
  383. u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
  384. mutex_lock(&priv->mib_lock);
  385. len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
  386. memset(priv->mib_stats, 0, len);
  387. ret = ar40xx_mib_flush(priv);
  388. mutex_unlock(&priv->mib_lock);
  389. return ret;
  390. }
  391. static int
  392. ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
  393. struct switch_val *val)
  394. {
  395. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  396. priv->vlan = !!val->value.i;
  397. return 0;
  398. }
  399. static int
  400. ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
  401. struct switch_val *val)
  402. {
  403. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  404. val->value.i = priv->vlan;
  405. return 0;
  406. }
  407. static int
  408. ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
  409. const struct switch_attr *attr,
  410. struct switch_val *val)
  411. {
  412. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  413. mutex_lock(&priv->reg_mutex);
  414. priv->mirror_rx = !!val->value.i;
  415. ar40xx_set_mirror_regs(priv);
  416. mutex_unlock(&priv->reg_mutex);
  417. return 0;
  418. }
  419. static int
  420. ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
  421. const struct switch_attr *attr,
  422. struct switch_val *val)
  423. {
  424. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  425. mutex_lock(&priv->reg_mutex);
  426. val->value.i = priv->mirror_rx;
  427. mutex_unlock(&priv->reg_mutex);
  428. return 0;
  429. }
  430. static int
  431. ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
  432. const struct switch_attr *attr,
  433. struct switch_val *val)
  434. {
  435. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  436. mutex_lock(&priv->reg_mutex);
  437. priv->mirror_tx = !!val->value.i;
  438. ar40xx_set_mirror_regs(priv);
  439. mutex_unlock(&priv->reg_mutex);
  440. return 0;
  441. }
  442. static int
  443. ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
  444. const struct switch_attr *attr,
  445. struct switch_val *val)
  446. {
  447. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  448. mutex_lock(&priv->reg_mutex);
  449. val->value.i = priv->mirror_tx;
  450. mutex_unlock(&priv->reg_mutex);
  451. return 0;
  452. }
  453. static int
  454. ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
  455. const struct switch_attr *attr,
  456. struct switch_val *val)
  457. {
  458. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  459. mutex_lock(&priv->reg_mutex);
  460. priv->monitor_port = val->value.i;
  461. ar40xx_set_mirror_regs(priv);
  462. mutex_unlock(&priv->reg_mutex);
  463. return 0;
  464. }
  465. static int
  466. ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
  467. const struct switch_attr *attr,
  468. struct switch_val *val)
  469. {
  470. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  471. mutex_lock(&priv->reg_mutex);
  472. val->value.i = priv->monitor_port;
  473. mutex_unlock(&priv->reg_mutex);
  474. return 0;
  475. }
  476. static int
  477. ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
  478. const struct switch_attr *attr,
  479. struct switch_val *val)
  480. {
  481. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  482. mutex_lock(&priv->reg_mutex);
  483. priv->source_port = val->value.i;
  484. ar40xx_set_mirror_regs(priv);
  485. mutex_unlock(&priv->reg_mutex);
  486. return 0;
  487. }
  488. static int
  489. ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
  490. const struct switch_attr *attr,
  491. struct switch_val *val)
  492. {
  493. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  494. mutex_lock(&priv->reg_mutex);
  495. val->value.i = priv->source_port;
  496. mutex_unlock(&priv->reg_mutex);
  497. return 0;
  498. }
  499. static int
  500. ar40xx_sw_set_linkdown(struct switch_dev *dev,
  501. const struct switch_attr *attr,
  502. struct switch_val *val)
  503. {
  504. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  505. if (val->value.i == 1)
  506. ar40xx_port_phy_linkdown(priv);
  507. else
  508. ar40xx_phy_init(priv);
  509. return 0;
  510. }
  511. static int
  512. ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
  513. const struct switch_attr *attr,
  514. struct switch_val *val)
  515. {
  516. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  517. int port;
  518. int ret;
  519. port = val->port_vlan;
  520. if (port >= dev->ports)
  521. return -EINVAL;
  522. mutex_lock(&priv->mib_lock);
  523. ret = ar40xx_mib_capture(priv);
  524. if (ret)
  525. goto unlock;
  526. ar40xx_mib_fetch_port_stat(priv, port, true);
  527. unlock:
  528. mutex_unlock(&priv->mib_lock);
  529. return ret;
  530. }
  531. static int
  532. ar40xx_sw_get_port_mib(struct switch_dev *dev,
  533. const struct switch_attr *attr,
  534. struct switch_val *val)
  535. {
  536. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  537. u64 *mib_stats;
  538. int port;
  539. int ret;
  540. char *buf = priv->buf;
  541. int i, len = 0;
  542. u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
  543. port = val->port_vlan;
  544. if (port >= dev->ports)
  545. return -EINVAL;
  546. mutex_lock(&priv->mib_lock);
  547. ret = ar40xx_mib_capture(priv);
  548. if (ret)
  549. goto unlock;
  550. ar40xx_mib_fetch_port_stat(priv, port, false);
  551. len += snprintf(buf + len, sizeof(priv->buf) - len,
  552. "Port %d MIB counters\n",
  553. port);
  554. mib_stats = &priv->mib_stats[port * num_mibs];
  555. for (i = 0; i < num_mibs; i++)
  556. len += snprintf(buf + len, sizeof(priv->buf) - len,
  557. "%-12s: %llu\n",
  558. ar40xx_mibs[i].name,
  559. mib_stats[i]);
  560. val->value.s = buf;
  561. val->len = len;
  562. unlock:
  563. mutex_unlock(&priv->mib_lock);
  564. return ret;
  565. }
  566. static int
  567. ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
  568. struct switch_val *val)
  569. {
  570. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  571. priv->vlan_id[val->port_vlan] = val->value.i;
  572. return 0;
  573. }
  574. static int
  575. ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
  576. struct switch_val *val)
  577. {
  578. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  579. val->value.i = priv->vlan_id[val->port_vlan];
  580. return 0;
  581. }
  582. static int
  583. ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
  584. {
  585. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  586. *vlan = priv->pvid[port];
  587. return 0;
  588. }
  589. static int
  590. ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
  591. {
  592. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  593. /* make sure no invalid PVIDs get set */
  594. if (vlan >= dev->vlans)
  595. return -EINVAL;
  596. priv->pvid[port] = vlan;
  597. return 0;
  598. }
  599. static void
  600. ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
  601. struct switch_port_link *link)
  602. {
  603. u32 status;
  604. u32 speed;
  605. memset(link, 0, sizeof(*link));
  606. status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
  607. link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
  608. if (link->aneg || (port != AR40XX_PORT_CPU))
  609. link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
  610. else
  611. link->link = true;
  612. if (!link->link)
  613. return;
  614. link->duplex = !!(status & AR40XX_PORT_DUPLEX);
  615. link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
  616. link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
  617. speed = (status & AR40XX_PORT_SPEED) >>
  618. AR40XX_PORT_STATUS_SPEED_S;
  619. switch (speed) {
  620. case AR40XX_PORT_SPEED_10M:
  621. link->speed = SWITCH_PORT_SPEED_10;
  622. break;
  623. case AR40XX_PORT_SPEED_100M:
  624. link->speed = SWITCH_PORT_SPEED_100;
  625. break;
  626. case AR40XX_PORT_SPEED_1000M:
  627. link->speed = SWITCH_PORT_SPEED_1000;
  628. break;
  629. default:
  630. link->speed = SWITCH_PORT_SPEED_UNKNOWN;
  631. break;
  632. }
  633. }
  634. static int
  635. ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
  636. struct switch_port_link *link)
  637. {
  638. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  639. ar40xx_read_port_link(priv, port, link);
  640. return 0;
  641. }
  642. static const struct switch_attr ar40xx_sw_attr_globals[] = {
  643. {
  644. .type = SWITCH_TYPE_INT,
  645. .name = "enable_vlan",
  646. .description = "Enable VLAN mode",
  647. .set = ar40xx_sw_set_vlan,
  648. .get = ar40xx_sw_get_vlan,
  649. .max = 1
  650. },
  651. {
  652. .type = SWITCH_TYPE_NOVAL,
  653. .name = "reset_mibs",
  654. .description = "Reset all MIB counters",
  655. .set = ar40xx_sw_set_reset_mibs,
  656. },
  657. {
  658. .type = SWITCH_TYPE_INT,
  659. .name = "enable_mirror_rx",
  660. .description = "Enable mirroring of RX packets",
  661. .set = ar40xx_sw_set_mirror_rx_enable,
  662. .get = ar40xx_sw_get_mirror_rx_enable,
  663. .max = 1
  664. },
  665. {
  666. .type = SWITCH_TYPE_INT,
  667. .name = "enable_mirror_tx",
  668. .description = "Enable mirroring of TX packets",
  669. .set = ar40xx_sw_set_mirror_tx_enable,
  670. .get = ar40xx_sw_get_mirror_tx_enable,
  671. .max = 1
  672. },
  673. {
  674. .type = SWITCH_TYPE_INT,
  675. .name = "mirror_monitor_port",
  676. .description = "Mirror monitor port",
  677. .set = ar40xx_sw_set_mirror_monitor_port,
  678. .get = ar40xx_sw_get_mirror_monitor_port,
  679. .max = AR40XX_NUM_PORTS - 1
  680. },
  681. {
  682. .type = SWITCH_TYPE_INT,
  683. .name = "mirror_source_port",
  684. .description = "Mirror source port",
  685. .set = ar40xx_sw_set_mirror_source_port,
  686. .get = ar40xx_sw_get_mirror_source_port,
  687. .max = AR40XX_NUM_PORTS - 1
  688. },
  689. {
  690. .type = SWITCH_TYPE_INT,
  691. .name = "linkdown",
  692. .description = "Link down all the PHYs",
  693. .set = ar40xx_sw_set_linkdown,
  694. .max = 1
  695. },
  696. };
  697. static const struct switch_attr ar40xx_sw_attr_port[] = {
  698. {
  699. .type = SWITCH_TYPE_NOVAL,
  700. .name = "reset_mib",
  701. .description = "Reset single port MIB counters",
  702. .set = ar40xx_sw_set_port_reset_mib,
  703. },
  704. {
  705. .type = SWITCH_TYPE_STRING,
  706. .name = "mib",
  707. .description = "Get port's MIB counters",
  708. .set = NULL,
  709. .get = ar40xx_sw_get_port_mib,
  710. },
  711. };
  712. const struct switch_attr ar40xx_sw_attr_vlan[] = {
  713. {
  714. .type = SWITCH_TYPE_INT,
  715. .name = "vid",
  716. .description = "VLAN ID (0-4094)",
  717. .set = ar40xx_sw_set_vid,
  718. .get = ar40xx_sw_get_vid,
  719. .max = 4094,
  720. },
  721. };
  722. /* End of swconfig support */
  723. static int
  724. ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
  725. {
  726. int timeout = 20;
  727. u32 t;
  728. while (1) {
  729. t = ar40xx_read(priv, reg);
  730. if ((t & mask) == val)
  731. return 0;
  732. if (timeout-- <= 0)
  733. break;
  734. usleep_range(10, 20);
  735. }
  736. pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
  737. (unsigned int)reg, t, mask, val);
  738. return -ETIMEDOUT;
  739. }
  740. static int
  741. ar40xx_atu_flush(struct ar40xx_priv *priv)
  742. {
  743. int ret;
  744. ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
  745. AR40XX_ATU_FUNC_BUSY, 0);
  746. if (!ret)
  747. ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
  748. AR40XX_ATU_FUNC_OP_FLUSH |
  749. AR40XX_ATU_FUNC_BUSY);
  750. return ret;
  751. }
  752. static void
  753. ar40xx_ess_reset(struct ar40xx_priv *priv)
  754. {
  755. reset_control_assert(priv->ess_rst);
  756. mdelay(10);
  757. reset_control_deassert(priv->ess_rst);
  758. /* Waiting for all inner tables init done.
  759. * It cost 5~10ms.
  760. */
  761. mdelay(10);
  762. pr_info("ESS reset ok!\n");
  763. }
  764. /* Start of psgmii self test */
  765. static void
  766. ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
  767. {
  768. u32 n;
  769. struct mii_bus *bus = priv->mii_bus;
  770. /* reset phy psgmii */
  771. /* fix phy psgmii RX 20bit */
  772. mdiobus_write(bus, 5, 0x0, 0x005b);
  773. /* reset phy psgmii */
  774. mdiobus_write(bus, 5, 0x0, 0x001b);
  775. /* release reset phy psgmii */
  776. mdiobus_write(bus, 5, 0x0, 0x005b);
  777. for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
  778. u16 status;
  779. status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
  780. if (status & BIT(0))
  781. break;
  782. /* Polling interval to check PSGMII PLL in malibu is ready
  783. * the worst time is 8.67ms
  784. * for 25MHz reference clock
  785. * [512+(128+2048)*49]*80ns+100us
  786. */
  787. mdelay(2);
  788. }
  789. /*check malibu psgmii calibration done end..*/
  790. /*freeze phy psgmii RX CDR*/
  791. mdiobus_write(bus, 5, 0x1a, 0x2230);
  792. ar40xx_ess_reset(priv);
  793. /*check psgmii calibration done start*/
  794. for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
  795. u32 status;
  796. status = ar40xx_psgmii_read(priv, 0xa0);
  797. if (status & BIT(0))
  798. break;
  799. /* Polling interval to check PSGMII PLL in ESS is ready */
  800. mdelay(2);
  801. }
  802. /* check dakota psgmii calibration done end..*/
  803. /* relesae phy psgmii RX CDR */
  804. mdiobus_write(bus, 5, 0x1a, 0x3230);
  805. /* release phy psgmii RX 20bit */
  806. mdiobus_write(bus, 5, 0x0, 0x005f);
  807. }
  808. static void
  809. ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
  810. {
  811. int j;
  812. u32 tx_ok, tx_error;
  813. u32 rx_ok, rx_error;
  814. u32 tx_ok_high16;
  815. u32 rx_ok_high16;
  816. u32 tx_all_ok, rx_all_ok;
  817. struct mii_bus *bus = priv->mii_bus;
  818. mdiobus_write(bus, phy, 0x0, 0x9000);
  819. mdiobus_write(bus, phy, 0x0, 0x4140);
  820. for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
  821. u16 status;
  822. status = mdiobus_read(bus, phy, 0x11);
  823. if (status & AR40XX_PHY_SPEC_STATUS_LINK)
  824. break;
  825. /* the polling interval to check if the PHY link up or not
  826. * maxwait_timer: 750 ms +/-10 ms
  827. * minwait_timer : 1 us +/- 0.1us
  828. * time resides in minwait_timer ~ maxwait_timer
  829. * see IEEE 802.3 section 40.4.5.2
  830. */
  831. mdelay(8);
  832. }
  833. /* enable check */
  834. ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
  835. ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
  836. /* start traffic */
  837. ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
  838. /* wait for all traffic end
  839. * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
  840. */
  841. mdelay(50);
  842. /* check counter */
  843. tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
  844. tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
  845. tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
  846. rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
  847. rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
  848. rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
  849. tx_all_ok = tx_ok + (tx_ok_high16 << 16);
  850. rx_all_ok = rx_ok + (rx_ok_high16 << 16);
  851. if (tx_all_ok == 0x1000 && tx_error == 0) {
  852. /* success */
  853. priv->phy_t_status &= (~BIT(phy));
  854. } else {
  855. pr_info("PHY %d single test PSGMII issue happen!\n", phy);
  856. priv->phy_t_status |= BIT(phy);
  857. }
  858. mdiobus_write(bus, phy, 0x0, 0x1840);
  859. }
  860. static void
  861. ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
  862. {
  863. int phy, j;
  864. struct mii_bus *bus = priv->mii_bus;
  865. mdiobus_write(bus, 0x1f, 0x0, 0x9000);
  866. mdiobus_write(bus, 0x1f, 0x0, 0x4140);
  867. for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
  868. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
  869. u16 status;
  870. status = mdiobus_read(bus, phy, 0x11);
  871. if (!(status & BIT(10)))
  872. break;
  873. }
  874. if (phy >= (AR40XX_NUM_PORTS - 1))
  875. break;
  876. /* The polling interva to check if the PHY link up or not */
  877. mdelay(8);
  878. }
  879. /* enable check */
  880. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
  881. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
  882. /* start traffic */
  883. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
  884. /* wait for all traffic end
  885. * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
  886. */
  887. mdelay(50);
  888. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
  889. u32 tx_ok, tx_error;
  890. u32 rx_ok, rx_error;
  891. u32 tx_ok_high16;
  892. u32 rx_ok_high16;
  893. u32 tx_all_ok, rx_all_ok;
  894. /* check counter */
  895. tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
  896. tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
  897. tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
  898. rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
  899. rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
  900. rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
  901. tx_all_ok = tx_ok + (tx_ok_high16<<16);
  902. rx_all_ok = rx_ok + (rx_ok_high16<<16);
  903. if (tx_all_ok == 0x1000 && tx_error == 0) {
  904. /* success */
  905. priv->phy_t_status &= ~BIT(phy + 8);
  906. } else {
  907. pr_info("PHY%d test see issue!\n", phy);
  908. priv->phy_t_status |= BIT(phy + 8);
  909. }
  910. }
  911. pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
  912. }
  913. void
  914. ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
  915. {
  916. u32 i, phy;
  917. struct mii_bus *bus = priv->mii_bus;
  918. ar40xx_malibu_psgmii_ess_reset(priv);
  919. /* switch to access MII reg for copper */
  920. mdiobus_write(bus, 4, 0x1f, 0x8500);
  921. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
  922. /*enable phy mdio broadcast write*/
  923. ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
  924. }
  925. /* force no link by power down */
  926. mdiobus_write(bus, 0x1f, 0x0, 0x1840);
  927. /*packet number*/
  928. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
  929. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
  930. /*fix mdi status */
  931. mdiobus_write(bus, 0x1f, 0x10, 0x6800);
  932. for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
  933. priv->phy_t_status = 0;
  934. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
  935. ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
  936. AR40XX_PORT_LOOKUP_LOOPBACK,
  937. AR40XX_PORT_LOOKUP_LOOPBACK);
  938. }
  939. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
  940. ar40xx_psgmii_single_phy_testing(priv, phy);
  941. ar40xx_psgmii_all_phy_testing(priv);
  942. if (priv->phy_t_status)
  943. ar40xx_malibu_psgmii_ess_reset(priv);
  944. else
  945. break;
  946. }
  947. if (i >= AR40XX_PSGMII_CALB_NUM)
  948. pr_info("PSGMII cannot recover\n");
  949. else
  950. pr_debug("PSGMII recovered after %d times reset\n", i);
  951. /* configuration recover */
  952. /* packet number */
  953. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
  954. /* disable check */
  955. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
  956. /* disable traffic */
  957. ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
  958. }
  959. void
  960. ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
  961. {
  962. int phy;
  963. struct mii_bus *bus = priv->mii_bus;
  964. /* disable phy internal loopback */
  965. mdiobus_write(bus, 0x1f, 0x10, 0x6860);
  966. mdiobus_write(bus, 0x1f, 0x0, 0x9040);
  967. for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
  968. /* disable mac loop back */
  969. ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
  970. AR40XX_PORT_LOOKUP_LOOPBACK, 0);
  971. /* disable phy mdio broadcast write */
  972. ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
  973. }
  974. /* clear fdb entry */
  975. ar40xx_atu_flush(priv);
  976. }
  977. /* End of psgmii self test */
  978. static void
  979. ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
  980. {
  981. if (mode == PORT_WRAPPER_PSGMII) {
  982. ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
  983. ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
  984. }
  985. }
  986. static
  987. int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
  988. {
  989. u32 t;
  990. t = AR40XX_PORT_STATUS_TXFLOW |
  991. AR40XX_PORT_STATUS_RXFLOW |
  992. AR40XX_PORT_TXHALF_FLOW |
  993. AR40XX_PORT_DUPLEX |
  994. AR40XX_PORT_SPEED_1000M;
  995. ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
  996. usleep_range(10, 20);
  997. t |= AR40XX_PORT_TX_EN |
  998. AR40XX_PORT_RX_EN;
  999. ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
  1000. return 0;
  1001. }
  1002. static void
  1003. ar40xx_init_port(struct ar40xx_priv *priv, int port)
  1004. {
  1005. u32 t;
  1006. ar40xx_write(priv, AR40XX_REG_PORT_STATUS(port), 0);
  1007. ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
  1008. ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
  1009. t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
  1010. ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
  1011. t = AR40XX_PORT_LOOKUP_LEARN;
  1012. t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
  1013. ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
  1014. }
  1015. void
  1016. ar40xx_init_globals(struct ar40xx_priv *priv)
  1017. {
  1018. u32 t;
  1019. /* enable CPU port and disable mirror port */
  1020. t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
  1021. AR40XX_FWD_CTRL0_MIRROR_PORT;
  1022. ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
  1023. /* forward multicast and broadcast frames to CPU */
  1024. t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
  1025. (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
  1026. (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
  1027. ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
  1028. /* enable jumbo frames */
  1029. ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
  1030. AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
  1031. /* Enable MIB counters */
  1032. ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
  1033. AR40XX_MODULE_EN_MIB);
  1034. /* Disable AZ */
  1035. ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
  1036. /* set flowctrl thershold for cpu port */
  1037. t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
  1038. AR40XX_PORT0_FC_THRESH_OFF_DFLT;
  1039. ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
  1040. }
  1041. static int
  1042. ar40xx_hw_init(struct ar40xx_priv *priv)
  1043. {
  1044. u32 i;
  1045. ar40xx_ess_reset(priv);
  1046. if (!priv->mii_bus)
  1047. return -1;
  1048. ar40xx_psgmii_self_test(priv);
  1049. ar40xx_psgmii_self_test_clean(priv);
  1050. ar40xx_mac_mode_init(priv, priv->mac_mode);
  1051. for (i = 0; i < priv->dev.ports; i++)
  1052. ar40xx_init_port(priv, i);
  1053. ar40xx_init_globals(priv);
  1054. return 0;
  1055. }
  1056. /* Start of qm error WAR */
  1057. static
  1058. int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
  1059. {
  1060. u32 reg;
  1061. if (port_id < 0 || port_id > 6)
  1062. return -1;
  1063. reg = AR40XX_REG_PORT_STATUS(port_id);
  1064. return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
  1065. (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
  1066. }
  1067. static
  1068. int ar40xx_get_qm_status(struct ar40xx_priv *priv,
  1069. u32 port_id, u32 *qm_buffer_err)
  1070. {
  1071. u32 reg;
  1072. u32 qm_val;
  1073. if (port_id < 1 || port_id > 5) {
  1074. *qm_buffer_err = 0;
  1075. return -1;
  1076. }
  1077. if (port_id < 4) {
  1078. reg = AR40XX_REG_QM_PORT0_3_QNUM;
  1079. ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
  1080. qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
  1081. /* every 8 bits for each port */
  1082. *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
  1083. } else {
  1084. reg = AR40XX_REG_QM_PORT4_6_QNUM;
  1085. ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
  1086. qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
  1087. /* every 8 bits for each port */
  1088. *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
  1089. }
  1090. return 0;
  1091. }
  1092. static void
  1093. ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
  1094. {
  1095. static int task_count;
  1096. u32 i;
  1097. u32 reg, value;
  1098. u32 link, speed, duplex;
  1099. u32 qm_buffer_err;
  1100. u16 port_phy_status[AR40XX_NUM_PORTS];
  1101. static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
  1102. static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
  1103. struct mii_bus *bus = NULL;
  1104. if (!priv || !priv->mii_bus)
  1105. return;
  1106. bus = priv->mii_bus;
  1107. ++task_count;
  1108. for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
  1109. port_phy_status[i] =
  1110. mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
  1111. speed = FIELD_GET(AR40XX_PHY_SPEC_STATUS_SPEED,
  1112. port_phy_status[i]);
  1113. link = FIELD_GET(AR40XX_PHY_SPEC_STATUS_LINK,
  1114. port_phy_status[i]);
  1115. duplex = FIELD_GET(AR40XX_PHY_SPEC_STATUS_DUPLEX,
  1116. port_phy_status[i]);
  1117. if (link != priv->ar40xx_port_old_link[i]) {
  1118. ++link_cnt[i];
  1119. /* Up --> Down */
  1120. if ((priv->ar40xx_port_old_link[i] ==
  1121. AR40XX_PORT_LINK_UP) &&
  1122. (link == AR40XX_PORT_LINK_DOWN)) {
  1123. /* LINK_EN disable(MAC force mode)*/
  1124. reg = AR40XX_REG_PORT_STATUS(i);
  1125. ar40xx_rmw(priv, reg,
  1126. AR40XX_PORT_AUTO_LINK_EN, 0);
  1127. /* Check queue buffer */
  1128. qm_err_cnt[i] = 0;
  1129. ar40xx_get_qm_status(priv, i, &qm_buffer_err);
  1130. if (qm_buffer_err) {
  1131. priv->ar40xx_port_qm_buf[i] =
  1132. AR40XX_QM_NOT_EMPTY;
  1133. } else {
  1134. u16 phy_val = 0;
  1135. priv->ar40xx_port_qm_buf[i] =
  1136. AR40XX_QM_EMPTY;
  1137. ar40xx_force_1g_full(priv, i);
  1138. /* Ref:QCA8337 Datasheet,Clearing
  1139. * MENU_CTRL_EN prevents phy to
  1140. * stuck in 100BT mode when
  1141. * bringing up the link
  1142. */
  1143. ar40xx_phy_dbg_read(priv, i-1,
  1144. AR40XX_PHY_DEBUG_0,
  1145. &phy_val);
  1146. phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
  1147. ar40xx_phy_dbg_write(priv, i-1,
  1148. AR40XX_PHY_DEBUG_0,
  1149. phy_val);
  1150. }
  1151. priv->ar40xx_port_old_link[i] = link;
  1152. } else if ((priv->ar40xx_port_old_link[i] ==
  1153. AR40XX_PORT_LINK_DOWN) &&
  1154. (link == AR40XX_PORT_LINK_UP)) {
  1155. /* Down --> Up */
  1156. if (priv->port_link_up[i] < 1) {
  1157. ++priv->port_link_up[i];
  1158. } else {
  1159. /* Change port status */
  1160. reg = AR40XX_REG_PORT_STATUS(i);
  1161. value = ar40xx_read(priv, reg);
  1162. priv->port_link_up[i] = 0;
  1163. value &= ~(AR40XX_PORT_DUPLEX |
  1164. AR40XX_PORT_SPEED);
  1165. value |= speed | (duplex ? BIT(6) : 0);
  1166. ar40xx_write(priv, reg, value);
  1167. /* clock switch need such time
  1168. * to avoid glitch
  1169. */
  1170. usleep_range(100, 200);
  1171. value |= AR40XX_PORT_AUTO_LINK_EN;
  1172. ar40xx_write(priv, reg, value);
  1173. /* HW need such time to make sure link
  1174. * stable before enable MAC
  1175. */
  1176. usleep_range(100, 200);
  1177. if (speed == AR40XX_PORT_SPEED_100M) {
  1178. u16 phy_val = 0;
  1179. /* Enable @100M, if down to 10M
  1180. * clock will change smoothly
  1181. */
  1182. ar40xx_phy_dbg_read(priv, i-1,
  1183. 0,
  1184. &phy_val);
  1185. phy_val |=
  1186. AR40XX_PHY_MANU_CTRL_EN;
  1187. ar40xx_phy_dbg_write(priv, i-1,
  1188. 0,
  1189. phy_val);
  1190. }
  1191. priv->ar40xx_port_old_link[i] = link;
  1192. }
  1193. }
  1194. }
  1195. if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
  1196. /* Check QM */
  1197. ar40xx_get_qm_status(priv, i, &qm_buffer_err);
  1198. if (qm_buffer_err) {
  1199. ++qm_err_cnt[i];
  1200. } else {
  1201. priv->ar40xx_port_qm_buf[i] =
  1202. AR40XX_QM_EMPTY;
  1203. qm_err_cnt[i] = 0;
  1204. ar40xx_force_1g_full(priv, i);
  1205. }
  1206. }
  1207. }
  1208. }
  1209. static void
  1210. ar40xx_qm_err_check_work_task(struct work_struct *work)
  1211. {
  1212. struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
  1213. qm_dwork.work);
  1214. mutex_lock(&priv->qm_lock);
  1215. ar40xx_sw_mac_polling_task(priv);
  1216. mutex_unlock(&priv->qm_lock);
  1217. schedule_delayed_work(&priv->qm_dwork,
  1218. msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
  1219. }
  1220. static int
  1221. ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
  1222. {
  1223. mutex_init(&priv->qm_lock);
  1224. INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
  1225. schedule_delayed_work(&priv->qm_dwork,
  1226. msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
  1227. return 0;
  1228. }
  1229. /* End of qm error WAR */
  1230. static int
  1231. ar40xx_vlan_init(struct ar40xx_priv *priv)
  1232. {
  1233. int port;
  1234. unsigned long bmp;
  1235. /* By default Enable VLAN */
  1236. priv->vlan = 1;
  1237. priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
  1238. priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
  1239. priv->vlan_tagged = priv->cpu_bmp;
  1240. bmp = priv->lan_bmp;
  1241. for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
  1242. priv->pvid[port] = AR40XX_LAN_VLAN;
  1243. bmp = priv->wan_bmp;
  1244. for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
  1245. priv->pvid[port] = AR40XX_WAN_VLAN;
  1246. return 0;
  1247. }
  1248. static void
  1249. ar40xx_mib_work_func(struct work_struct *work)
  1250. {
  1251. struct ar40xx_priv *priv;
  1252. int err;
  1253. priv = container_of(work, struct ar40xx_priv, mib_work.work);
  1254. mutex_lock(&priv->mib_lock);
  1255. err = ar40xx_mib_capture(priv);
  1256. if (err)
  1257. goto next_port;
  1258. ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
  1259. next_port:
  1260. priv->mib_next_port++;
  1261. if (priv->mib_next_port >= priv->dev.ports)
  1262. priv->mib_next_port = 0;
  1263. mutex_unlock(&priv->mib_lock);
  1264. schedule_delayed_work(&priv->mib_work,
  1265. msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
  1266. }
  1267. static void
  1268. ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
  1269. {
  1270. u32 t;
  1271. u32 egress, ingress;
  1272. u32 pvid = priv->vlan_id[priv->pvid[port]];
  1273. if (priv->vlan) {
  1274. egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
  1275. ingress = AR40XX_IN_SECURE;
  1276. } else {
  1277. egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
  1278. ingress = AR40XX_IN_PORT_ONLY;
  1279. }
  1280. t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
  1281. t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
  1282. ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
  1283. t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
  1284. t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
  1285. ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
  1286. t = members;
  1287. t |= AR40XX_PORT_LOOKUP_LEARN;
  1288. t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
  1289. t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
  1290. ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
  1291. }
  1292. static void
  1293. ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
  1294. {
  1295. if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
  1296. AR40XX_VTU_FUNC1_BUSY, 0))
  1297. return;
  1298. if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
  1299. ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
  1300. op |= AR40XX_VTU_FUNC1_BUSY;
  1301. ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
  1302. }
  1303. static void
  1304. ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
  1305. {
  1306. u32 op;
  1307. u32 val;
  1308. int i;
  1309. op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
  1310. val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
  1311. for (i = 0; i < AR40XX_NUM_PORTS; i++) {
  1312. u32 mode;
  1313. if ((port_mask & BIT(i)) == 0)
  1314. mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
  1315. else if (priv->vlan == 0)
  1316. mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
  1317. else if ((priv->vlan_tagged & BIT(i)) ||
  1318. (priv->vlan_id[priv->pvid[i]] != vid))
  1319. mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
  1320. else
  1321. mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
  1322. val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
  1323. }
  1324. ar40xx_vtu_op(priv, op, val);
  1325. }
  1326. static void
  1327. ar40xx_vtu_flush(struct ar40xx_priv *priv)
  1328. {
  1329. ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
  1330. }
  1331. static int
  1332. ar40xx_sw_hw_apply(struct switch_dev *dev)
  1333. {
  1334. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  1335. u8 portmask[AR40XX_NUM_PORTS];
  1336. int i, j;
  1337. mutex_lock(&priv->reg_mutex);
  1338. /* flush all vlan entries */
  1339. ar40xx_vtu_flush(priv);
  1340. memset(portmask, 0, sizeof(portmask));
  1341. if (priv->vlan) {
  1342. for (j = 0; j < AR40XX_MAX_VLANS; j++) {
  1343. u8 vp = priv->vlan_table[j];
  1344. if (!vp)
  1345. continue;
  1346. for (i = 0; i < dev->ports; i++) {
  1347. u8 mask = BIT(i);
  1348. if (vp & mask)
  1349. portmask[i] |= vp & ~mask;
  1350. }
  1351. ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
  1352. priv->vlan_table[j]);
  1353. }
  1354. } else {
  1355. /* 8021q vlan disabled */
  1356. for (i = 0; i < dev->ports; i++) {
  1357. if (i == AR40XX_PORT_CPU)
  1358. continue;
  1359. portmask[i] = BIT(AR40XX_PORT_CPU);
  1360. portmask[AR40XX_PORT_CPU] |= BIT(i);
  1361. }
  1362. }
  1363. /* update the port destination mask registers and tag settings */
  1364. for (i = 0; i < dev->ports; i++)
  1365. ar40xx_setup_port(priv, i, portmask[i]);
  1366. ar40xx_set_mirror_regs(priv);
  1367. mutex_unlock(&priv->reg_mutex);
  1368. return 0;
  1369. }
  1370. static int
  1371. ar40xx_sw_reset_switch(struct switch_dev *dev)
  1372. {
  1373. struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
  1374. int i, rv;
  1375. mutex_lock(&priv->reg_mutex);
  1376. memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
  1377. offsetof(struct ar40xx_priv, vlan));
  1378. for (i = 0; i < AR40XX_MAX_VLANS; i++)
  1379. priv->vlan_id[i] = i;
  1380. ar40xx_vlan_init(priv);
  1381. priv->mirror_rx = false;
  1382. priv->mirror_tx = false;
  1383. priv->source_port = 0;
  1384. priv->monitor_port = 0;
  1385. mutex_unlock(&priv->reg_mutex);
  1386. rv = ar40xx_sw_hw_apply(dev);
  1387. return rv;
  1388. }
  1389. static int
  1390. ar40xx_start(struct ar40xx_priv *priv)
  1391. {
  1392. int ret;
  1393. ret = ar40xx_hw_init(priv);
  1394. if (ret)
  1395. return ret;
  1396. ret = ar40xx_sw_reset_switch(&priv->dev);
  1397. if (ret)
  1398. return ret;
  1399. /* at last, setup cpu port */
  1400. ret = ar40xx_cpuport_setup(priv);
  1401. if (ret)
  1402. return ret;
  1403. schedule_delayed_work(&priv->mib_work,
  1404. msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
  1405. ar40xx_qm_err_check_work_start(priv);
  1406. return 0;
  1407. }
  1408. static const struct switch_dev_ops ar40xx_sw_ops = {
  1409. .attr_global = {
  1410. .attr = ar40xx_sw_attr_globals,
  1411. .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
  1412. },
  1413. .attr_port = {
  1414. .attr = ar40xx_sw_attr_port,
  1415. .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
  1416. },
  1417. .attr_vlan = {
  1418. .attr = ar40xx_sw_attr_vlan,
  1419. .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
  1420. },
  1421. .get_port_pvid = ar40xx_sw_get_pvid,
  1422. .set_port_pvid = ar40xx_sw_set_pvid,
  1423. .get_vlan_ports = ar40xx_sw_get_ports,
  1424. .set_vlan_ports = ar40xx_sw_set_ports,
  1425. .apply_config = ar40xx_sw_hw_apply,
  1426. .reset_switch = ar40xx_sw_reset_switch,
  1427. .get_port_link = ar40xx_sw_get_port_link,
  1428. };
  1429. /* Platform driver probe function */
  1430. static int ar40xx_probe(struct platform_device *pdev)
  1431. {
  1432. struct device_node *switch_node;
  1433. struct device_node *psgmii_node;
  1434. struct device_node *mdio_node;
  1435. const __be32 *mac_mode;
  1436. struct clk *ess_clk;
  1437. struct switch_dev *swdev;
  1438. struct ar40xx_priv *priv;
  1439. u32 len;
  1440. u32 num_mibs;
  1441. struct resource psgmii_base = {0};
  1442. struct resource switch_base = {0};
  1443. int ret;
  1444. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  1445. if (!priv)
  1446. return -ENOMEM;
  1447. platform_set_drvdata(pdev, priv);
  1448. ar40xx_priv = priv;
  1449. switch_node = of_node_get(pdev->dev.of_node);
  1450. if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
  1451. return -EIO;
  1452. priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
  1453. if (IS_ERR(priv->hw_addr)) {
  1454. dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
  1455. return PTR_ERR(priv->hw_addr);
  1456. }
  1457. /*psgmii dts get*/
  1458. psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
  1459. if (!psgmii_node) {
  1460. dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
  1461. return -EINVAL;
  1462. }
  1463. if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
  1464. return -EIO;
  1465. priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
  1466. if (IS_ERR(priv->psgmii_hw_addr)) {
  1467. dev_err(&pdev->dev, "psgmii ioremap fail!\n");
  1468. return PTR_ERR(priv->psgmii_hw_addr);
  1469. }
  1470. mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
  1471. if (!mac_mode) {
  1472. dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
  1473. return -EINVAL;
  1474. }
  1475. priv->mac_mode = be32_to_cpup(mac_mode);
  1476. ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
  1477. if (ess_clk)
  1478. clk_prepare_enable(ess_clk);
  1479. priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
  1480. if (IS_ERR(priv->ess_rst)) {
  1481. dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
  1482. return PTR_ERR(priv->ess_rst);
  1483. }
  1484. if (of_property_read_u32(switch_node, "switch_cpu_bmp",
  1485. &priv->cpu_bmp) ||
  1486. of_property_read_u32(switch_node, "switch_lan_bmp",
  1487. &priv->lan_bmp) ||
  1488. of_property_read_u32(switch_node, "switch_wan_bmp",
  1489. &priv->wan_bmp)) {
  1490. dev_err(&pdev->dev, "Failed to read port properties\n");
  1491. return -EIO;
  1492. }
  1493. mutex_init(&priv->reg_mutex);
  1494. mutex_init(&priv->mib_lock);
  1495. INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
  1496. /* register switch */
  1497. swdev = &priv->dev;
  1498. mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq4019-mdio");
  1499. if (!mdio_node) {
  1500. dev_err(&pdev->dev, "Probe failed - Cannot find mdio node by phandle!\n");
  1501. ret = -ENODEV;
  1502. goto err_missing_phy;
  1503. }
  1504. priv->mii_bus = of_mdio_find_bus(mdio_node);
  1505. if (priv->mii_bus == NULL) {
  1506. dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
  1507. ret = -ENODEV;
  1508. goto err_missing_phy;
  1509. }
  1510. swdev->alias = dev_name(&priv->mii_bus->dev);
  1511. swdev->cpu_port = AR40XX_PORT_CPU;
  1512. swdev->name = "QCA AR40xx";
  1513. swdev->vlans = AR40XX_MAX_VLANS;
  1514. swdev->ports = AR40XX_NUM_PORTS;
  1515. swdev->ops = &ar40xx_sw_ops;
  1516. ret = register_switch(swdev, NULL);
  1517. if (ret < 0) {
  1518. dev_err(&pdev->dev, "Switch registration failed!\n");
  1519. return ret;
  1520. }
  1521. num_mibs = ARRAY_SIZE(ar40xx_mibs);
  1522. len = priv->dev.ports * num_mibs *
  1523. sizeof(*priv->mib_stats);
  1524. priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  1525. if (!priv->mib_stats) {
  1526. ret = -ENOMEM;
  1527. goto err_unregister_switch;
  1528. }
  1529. ar40xx_start(priv);
  1530. return 0;
  1531. err_unregister_switch:
  1532. unregister_switch(&priv->dev);
  1533. err_missing_phy:
  1534. platform_set_drvdata(pdev, NULL);
  1535. return ret;
  1536. }
  1537. static int ar40xx_remove(struct platform_device *pdev)
  1538. {
  1539. struct ar40xx_priv *priv = platform_get_drvdata(pdev);
  1540. cancel_delayed_work_sync(&priv->qm_dwork);
  1541. cancel_delayed_work_sync(&priv->mib_work);
  1542. unregister_switch(&priv->dev);
  1543. return 0;
  1544. }
  1545. static const struct of_device_id ar40xx_of_mtable[] = {
  1546. {.compatible = "qcom,ess-switch" },
  1547. {}
  1548. };
  1549. struct platform_driver ar40xx_drv = {
  1550. .probe = ar40xx_probe,
  1551. .remove = ar40xx_remove,
  1552. .driver = {
  1553. .name = "ar40xx",
  1554. .of_match_table = ar40xx_of_mtable,
  1555. },
  1556. };
  1557. module_platform_driver(ar40xx_drv);
  1558. MODULE_DESCRIPTION("IPQ40XX ESS driver");
  1559. MODULE_LICENSE("Dual BSD/GPL");