2
0

common.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/of_mdio.h>
  3. #include <linux/of_platform.h>
  4. #include <net/arp.h>
  5. #include <net/nexthop.h>
  6. #include <net/neighbour.h>
  7. #include <net/netevent.h>
  8. #include <linux/inetdevice.h>
  9. #include <linux/rhashtable.h>
  10. #include <linux/of_net.h>
  11. #include <asm/mach-rtl838x/mach-rtl83xx.h>
  12. #include "rtl83xx.h"
  13. extern struct rtl83xx_soc_info soc_info;
  14. extern const struct rtl838x_reg rtl838x_reg;
  15. extern const struct rtl838x_reg rtl839x_reg;
  16. extern const struct rtl838x_reg rtl930x_reg;
  17. extern const struct rtl838x_reg rtl931x_reg;
  18. extern const struct dsa_switch_ops rtl83xx_switch_ops;
  19. extern const struct dsa_switch_ops rtl930x_switch_ops;
  20. DEFINE_MUTEX(smi_lock);
  21. int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
  22. {
  23. u32 msti = 0;
  24. u32 port_state[4];
  25. int index, bit;
  26. int pos = port;
  27. int n = priv->port_width << 1;
  28. /* Ports above or equal CPU port can never be configured */
  29. if (port >= priv->cpu_port)
  30. return -1;
  31. mutex_lock(&priv->reg_mutex);
  32. /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
  33. if (priv->family_id == RTL8390_FAMILY_ID)
  34. pos += 12;
  35. if (priv->family_id == RTL9300_FAMILY_ID)
  36. pos += 3;
  37. if (priv->family_id == RTL9310_FAMILY_ID)
  38. pos += 8;
  39. index = n - (pos >> 4) - 1;
  40. bit = (pos << 1) % 32;
  41. priv->r->stp_get(priv, msti, port_state);
  42. mutex_unlock(&priv->reg_mutex);
  43. return (port_state[index] >> bit) & 3;
  44. }
  45. static struct table_reg rtl838x_tbl_regs[] = {
  46. TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2
  47. TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0
  48. TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1
  49. TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2
  50. TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0
  51. TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1
  52. TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2
  53. TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2
  54. TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0
  55. TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1
  56. TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2
  57. TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB
  58. TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA
  59. TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0
  60. TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1
  61. TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2
  62. TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3
  63. TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4
  64. TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5
  65. };
  66. void rtl_table_init(void)
  67. {
  68. int i;
  69. for (i = 0; i < RTL_TBL_END; i++)
  70. mutex_init(&rtl838x_tbl_regs[i].lock);
  71. }
  72. /* Request access to table t in table access register r
  73. * Returns a handle to a lock for that table
  74. */
  75. struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
  76. {
  77. if (r >= RTL_TBL_END)
  78. return NULL;
  79. if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
  80. return NULL;
  81. mutex_lock(&rtl838x_tbl_regs[r].lock);
  82. rtl838x_tbl_regs[r].tbl = t;
  83. return &rtl838x_tbl_regs[r];
  84. }
  85. /* Release a table r, unlock the corresponding lock */
  86. void rtl_table_release(struct table_reg *r)
  87. {
  88. if (!r)
  89. return;
  90. // pr_info("Unlocking %08x\n", (u32)r);
  91. mutex_unlock(&r->lock);
  92. // pr_info("Unlock done\n");
  93. }
  94. static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
  95. {
  96. int ret = 0;
  97. u32 cmd, val;
  98. /* Read/write bit has inverted meaning on RTL838x */
  99. if (r->rmode)
  100. cmd = is_write ? 0 : BIT(r->c_bit);
  101. else
  102. cmd = is_write ? BIT(r->c_bit) : 0;
  103. cmd |= BIT(r->c_bit + 1); /* Execute bit */
  104. cmd |= r->tbl << r->t_bit; /* Table type */
  105. cmd |= idx & (BIT(r->t_bit) - 1); /* Index */
  106. sw_w32(cmd, r->addr);
  107. ret = readx_poll_timeout(sw_r32, r->addr, val,
  108. !(val & BIT(r->c_bit + 1)), 20, 10000);
  109. if (ret)
  110. pr_err("%s: timeout\n", __func__);
  111. return ret;
  112. }
  113. /* Reads table index idx into the data registers of the table */
  114. int rtl_table_read(struct table_reg *r, int idx)
  115. {
  116. return rtl_table_exec(r, false, idx);
  117. }
  118. /* Writes the content of the table data registers into the table at index idx */
  119. int rtl_table_write(struct table_reg *r, int idx)
  120. {
  121. return rtl_table_exec(r, true, idx);
  122. }
  123. /* Returns the address of the ith data register of table register r
  124. * the address is relative to the beginning of the Switch-IO block at 0xbb000000
  125. */
  126. inline u16 rtl_table_data(struct table_reg *r, int i)
  127. {
  128. if (i >= r->max_data)
  129. i = r->max_data - 1;
  130. return r->data + i * 4;
  131. }
  132. inline u32 rtl_table_data_r(struct table_reg *r, int i)
  133. {
  134. return sw_r32(rtl_table_data(r, i));
  135. }
  136. inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
  137. {
  138. sw_w32(v, rtl_table_data(r, i));
  139. }
  140. /* Port register accessor functions for the RTL838x and RTL930X SoCs */
  141. void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
  142. {
  143. sw_w32_mask((u32)clear, (u32)set, reg);
  144. }
  145. void rtl838x_set_port_reg(u64 set, int reg)
  146. {
  147. sw_w32((u32)set, reg);
  148. }
  149. u64 rtl838x_get_port_reg(int reg)
  150. {
  151. return ((u64)sw_r32(reg));
  152. }
  153. /* Port register accessor functions for the RTL839x and RTL931X SoCs */
  154. void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
  155. {
  156. sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
  157. sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
  158. }
  159. u64 rtl839x_get_port_reg_be(int reg)
  160. {
  161. u64 v = sw_r32(reg);
  162. v <<= 32;
  163. v |= sw_r32(reg + 4);
  164. return v;
  165. }
  166. void rtl839x_set_port_reg_be(u64 set, int reg)
  167. {
  168. sw_w32(set >> 32, reg);
  169. sw_w32(set & 0xffffffff, reg + 4);
  170. }
  171. void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
  172. {
  173. sw_w32_mask((u32)clear, (u32)set, reg);
  174. sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
  175. }
  176. void rtl839x_set_port_reg_le(u64 set, int reg)
  177. {
  178. sw_w32(set, reg);
  179. sw_w32(set >> 32, reg + 4);
  180. }
  181. u64 rtl839x_get_port_reg_le(int reg)
  182. {
  183. u64 v = sw_r32(reg + 4);
  184. v <<= 32;
  185. v |= sw_r32(reg);
  186. return v;
  187. }
  188. int read_phy(u32 port, u32 page, u32 reg, u32 *val)
  189. {
  190. switch (soc_info.family) {
  191. case RTL8380_FAMILY_ID:
  192. return rtl838x_read_phy(port, page, reg, val);
  193. case RTL8390_FAMILY_ID:
  194. return rtl839x_read_phy(port, page, reg, val);
  195. case RTL9300_FAMILY_ID:
  196. return rtl930x_read_phy(port, page, reg, val);
  197. case RTL9310_FAMILY_ID:
  198. return rtl931x_read_phy(port, page, reg, val);
  199. }
  200. return -1;
  201. }
  202. int write_phy(u32 port, u32 page, u32 reg, u32 val)
  203. {
  204. switch (soc_info.family) {
  205. case RTL8380_FAMILY_ID:
  206. return rtl838x_write_phy(port, page, reg, val);
  207. case RTL8390_FAMILY_ID:
  208. return rtl839x_write_phy(port, page, reg, val);
  209. case RTL9300_FAMILY_ID:
  210. return rtl930x_write_phy(port, page, reg, val);
  211. case RTL9310_FAMILY_ID:
  212. return rtl931x_write_phy(port, page, reg, val);
  213. }
  214. return -1;
  215. }
  216. static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
  217. {
  218. struct device *dev = priv->dev;
  219. struct device_node *dn, *phy_node, *mii_np = dev->of_node;
  220. struct mii_bus *bus;
  221. int ret;
  222. u32 pn;
  223. pr_debug("In %s\n", __func__);
  224. mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
  225. if (mii_np) {
  226. pr_debug("Found compatible MDIO node!\n");
  227. } else {
  228. dev_err(priv->dev, "no %s child node found", "mdio-bus");
  229. return -ENODEV;
  230. }
  231. priv->mii_bus = of_mdio_find_bus(mii_np);
  232. if (!priv->mii_bus) {
  233. pr_debug("Deferring probe of mdio bus\n");
  234. return -EPROBE_DEFER;
  235. }
  236. if (!of_device_is_available(mii_np))
  237. ret = -ENODEV;
  238. bus = devm_mdiobus_alloc(priv->ds->dev);
  239. if (!bus)
  240. return -ENOMEM;
  241. bus->name = "rtl838x slave mii";
  242. /* Since the NIC driver is loaded first, we can use the mdio rw functions
  243. * assigned there.
  244. */
  245. bus->read = priv->mii_bus->read;
  246. bus->write = priv->mii_bus->write;
  247. bus->read_paged = priv->mii_bus->read_paged;
  248. bus->write_paged = priv->mii_bus->write_paged;
  249. snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
  250. bus->parent = dev;
  251. priv->ds->slave_mii_bus = bus;
  252. priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
  253. priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
  254. ret = mdiobus_register(priv->ds->slave_mii_bus);
  255. if (ret && mii_np) {
  256. of_node_put(dn);
  257. return ret;
  258. }
  259. dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
  260. if (!dn) {
  261. dev_err(priv->dev, "No RTL switch node in DTS\n");
  262. return -ENODEV;
  263. }
  264. for_each_node_by_name(dn, "port") {
  265. phy_interface_t interface;
  266. u32 led_set;
  267. if (!of_device_is_available(dn))
  268. continue;
  269. if (of_property_read_u32(dn, "reg", &pn))
  270. continue;
  271. phy_node = of_parse_phandle(dn, "phy-handle", 0);
  272. if (!phy_node) {
  273. if (pn != priv->cpu_port)
  274. dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
  275. continue;
  276. }
  277. if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
  278. priv->ports[pn].sds_num = -1;
  279. pr_debug("%s port %d has SDS %d\n", __func__, pn, priv->ports[pn].sds_num);
  280. if (of_get_phy_mode(dn, &interface))
  281. interface = PHY_INTERFACE_MODE_NA;
  282. if (interface == PHY_INTERFACE_MODE_HSGMII)
  283. priv->ports[pn].is2G5 = true;
  284. if (interface == PHY_INTERFACE_MODE_USXGMII)
  285. priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
  286. if (interface == PHY_INTERFACE_MODE_10GBASER)
  287. priv->ports[pn].is10G = true;
  288. if (of_property_read_u32(dn, "led-set", &led_set))
  289. led_set = 0;
  290. priv->ports[pn].led_set = led_set;
  291. // Check for the integrated SerDes of the RTL8380M first
  292. if (of_property_read_bool(phy_node, "phy-is-integrated")
  293. && priv->id == 0x8380 && pn >= 24) {
  294. pr_debug("----> FÓUND A SERDES\n");
  295. priv->ports[pn].phy = PHY_RTL838X_SDS;
  296. continue;
  297. }
  298. if (priv->id >= 0x9300) {
  299. priv->ports[pn].phy_is_integrated = false;
  300. if (of_property_read_bool(phy_node, "phy-is-integrated")) {
  301. priv->ports[pn].phy_is_integrated = true;
  302. priv->ports[pn].phy = PHY_RTL930X_SDS;
  303. }
  304. } else {
  305. if (of_property_read_bool(phy_node, "phy-is-integrated") &&
  306. !of_property_read_bool(phy_node, "sfp")) {
  307. priv->ports[pn].phy = PHY_RTL8218B_INT;
  308. continue;
  309. }
  310. }
  311. if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
  312. of_property_read_bool(phy_node, "sfp")) {
  313. priv->ports[pn].phy = PHY_RTL8214FC;
  314. continue;
  315. }
  316. if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
  317. !of_property_read_bool(phy_node, "sfp")) {
  318. priv->ports[pn].phy = PHY_RTL8218B_EXT;
  319. continue;
  320. }
  321. }
  322. /* Disable MAC polling the PHY so that we can start configuration */
  323. priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
  324. /* Enable PHY control via SoC */
  325. if (priv->family_id == RTL8380_FAMILY_ID) {
  326. /* Enable SerDes NWAY and PHY control via SoC */
  327. sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
  328. } else if (priv->family_id == RTL8390_FAMILY_ID) {
  329. /* Disable PHY polling via SoC */
  330. sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
  331. }
  332. /* Power on fibre ports and reset them if necessary */
  333. if (priv->ports[24].phy == PHY_RTL838X_SDS) {
  334. pr_debug("Powering on fibre ports & reset\n");
  335. rtl8380_sds_power(24, 1);
  336. rtl8380_sds_power(26, 1);
  337. }
  338. pr_debug("%s done\n", __func__);
  339. return 0;
  340. }
  341. static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
  342. {
  343. int t = sw_r32(priv->r->l2_ctrl_1);
  344. t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
  345. if (priv->family_id == RTL8380_FAMILY_ID)
  346. t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
  347. else
  348. t = (t * 3) / 5;
  349. pr_debug("L2 AGING time: %d sec\n", t);
  350. pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
  351. return t;
  352. }
  353. /* Caller must hold priv->reg_mutex */
  354. int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
  355. {
  356. struct rtl838x_switch_priv *priv = ds->priv;
  357. int i;
  358. u32 algomsk = 0;
  359. u32 algoidx = 0;
  360. if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
  361. pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
  362. return -EINVAL;
  363. }
  364. if (group >= priv->n_lags) {
  365. pr_err("%s: LAG %d invalid.\n", __func__, group);
  366. return -EINVAL;
  367. }
  368. if (port >= priv->cpu_port) {
  369. pr_err("%s: Port %d invalid.\n", __func__, port);
  370. return -EINVAL;
  371. }
  372. for (i = 0; i < priv->n_lags; i++) {
  373. if (priv->lags_port_members[i] & BIT_ULL(port))
  374. break;
  375. }
  376. if (i != priv->n_lags) {
  377. pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
  378. return -ENOSPC;
  379. }
  380. switch(info->hash_type) {
  381. case NETDEV_LAG_HASH_L2:
  382. algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
  383. algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
  384. break;
  385. case NETDEV_LAG_HASH_L23:
  386. algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
  387. algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
  388. algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
  389. algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
  390. algoidx = 1;
  391. break;
  392. case NETDEV_LAG_HASH_L34:
  393. algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
  394. algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
  395. algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
  396. algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
  397. algoidx = 2;
  398. break;
  399. default:
  400. algomsk |= 0x7f;
  401. }
  402. priv->r->set_distribution_algorithm(group, algoidx, algomsk);
  403. priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
  404. priv->lags_port_members[group] |= BIT_ULL(port);
  405. pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
  406. __func__, port, group, priv->lags_port_members[group]);
  407. return 0;
  408. }
  409. /* Caller must hold priv->reg_mutex */
  410. int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
  411. {
  412. struct rtl838x_switch_priv *priv = ds->priv;
  413. if (group >= priv->n_lags) {
  414. pr_err("%s: LAG %d invalid.\n", __func__, group);
  415. return -EINVAL;
  416. }
  417. if (port >= priv->cpu_port) {
  418. pr_err("%s: Port %d invalid.\n", __func__, port);
  419. return -EINVAL;
  420. }
  421. if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
  422. pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
  423. return -ENOSPC;
  424. }
  425. // 0x7f algo mask all
  426. priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
  427. priv->lags_port_members[group] &= ~BIT_ULL(port);
  428. pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
  429. __func__, port, group, priv->lags_port_members[group]);
  430. return 0;
  431. }
  432. /* Allocate a 64 bit octet counter located in the LOG HW table */
  433. static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
  434. {
  435. int idx;
  436. mutex_lock(&priv->reg_mutex);
  437. idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
  438. if (idx >= priv->n_counters) {
  439. mutex_unlock(&priv->reg_mutex);
  440. return -1;
  441. }
  442. set_bit(idx, priv->octet_cntr_use_bm);
  443. mutex_unlock(&priv->reg_mutex);
  444. return idx;
  445. }
  446. /* Allocate a 32-bit packet counter
  447. * 2 32-bit packet counters share the location of a 64-bit octet counter
  448. * Initially there are no free packet counters and 2 new ones need to be freed
  449. * by allocating the corresponding octet counter
  450. */
  451. int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
  452. {
  453. int idx, j;
  454. mutex_lock(&priv->reg_mutex);
  455. /* Because initially no packet counters are free, the logic is reversed:
  456. * a 0-bit means the counter is already allocated (for octets)
  457. */
  458. idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
  459. if (idx >= priv->n_counters * 2) {
  460. j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
  461. if (j >= priv->n_counters) {
  462. mutex_unlock(&priv->reg_mutex);
  463. return -1;
  464. }
  465. set_bit(j, priv->octet_cntr_use_bm);
  466. idx = j * 2;
  467. set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
  468. } else {
  469. clear_bit(idx, priv->packet_cntr_use_bm);
  470. }
  471. mutex_unlock(&priv->reg_mutex);
  472. return idx;
  473. }
  474. /* Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
  475. * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
  476. * or mark an existing entry as a nexthop by setting it's nexthop bit
  477. * Called from the L3 layer
  478. * The index in the L2 hash table is filled into nh->l2_id;
  479. */
  480. int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
  481. {
  482. struct rtl838x_l2_entry e;
  483. u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
  484. u32 key = priv->r->l2_hash_key(priv, seed);
  485. int i, idx = -1;
  486. u64 entry;
  487. pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
  488. __func__, nh->mac, nh->rvid, key, seed);
  489. e.type = L2_UNICAST;
  490. u64_to_ether_addr(nh->mac, &e.mac[0]);
  491. e.port = nh->port;
  492. // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
  493. for (i = 0; i < priv->l2_bucket_size; i++) {
  494. entry = priv->r->read_l2_entry_using_hash(key, i, &e);
  495. if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
  496. idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
  497. : ((key << 2) | i) & 0xffff;
  498. break;
  499. }
  500. }
  501. if (idx < 0) {
  502. pr_err("%s: No more L2 forwarding entries available\n", __func__);
  503. return -1;
  504. }
  505. // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
  506. nh->l2_id = idx;
  507. if (e.valid) {
  508. nh->port = e.port;
  509. nh->vid = e.vid; // Save VID
  510. nh->rvid = e.rvid;
  511. nh->dev_id = e.stack_dev;
  512. // If the entry is already a valid next hop entry, don't change it
  513. if (e.next_hop)
  514. return 0;
  515. } else {
  516. e.valid = true;
  517. e.is_static = true;
  518. e.rvid = nh->rvid;
  519. e.is_ip_mc = false;
  520. e.is_ipv6_mc = false;
  521. e.block_da = false;
  522. e.block_sa = false;
  523. e.suspended = false;
  524. e.age = 0; // With port-ignore
  525. e.port = priv->port_ignore;
  526. u64_to_ether_addr(nh->mac, &e.mac[0]);
  527. }
  528. e.next_hop = true;
  529. e.nh_route_id = nh->id; // NH route ID takes place of VID
  530. e.nh_vlan_target = false;
  531. priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
  532. return 0;
  533. }
  534. /* Removes a Layer 2 next hop entry in the forwarding database
  535. * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
  536. * and we wait until the entry ages out
  537. */
  538. int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
  539. {
  540. struct rtl838x_l2_entry e;
  541. u32 key = nh->l2_id >> 2;
  542. int i = nh->l2_id & 0x3;
  543. u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
  544. pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
  545. if (!e.valid) {
  546. dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
  547. return -1;
  548. }
  549. if (e.is_static)
  550. e.valid = false;
  551. e.next_hop = false;
  552. e.vid = nh->vid; // Restore VID
  553. e.rvid = nh->rvid;
  554. priv->r->write_l2_entry_using_hash(key, i, &e);
  555. return 0;
  556. }
  557. static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
  558. struct net_device *ndev,
  559. struct netdev_notifier_changeupper_info *info)
  560. {
  561. struct net_device *upper = info->upper_dev;
  562. struct netdev_lag_upper_info *lag_upper_info = NULL;
  563. int i, j, err;
  564. if (!netif_is_lag_master(upper))
  565. return 0;
  566. mutex_lock(&priv->reg_mutex);
  567. for (i = 0; i < priv->n_lags; i++) {
  568. if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
  569. break;
  570. }
  571. for (j = 0; j < priv->cpu_port; j++) {
  572. if (priv->ports[j].dp->slave == ndev)
  573. break;
  574. }
  575. if (j >= priv->cpu_port) {
  576. err = -EINVAL;
  577. goto out;
  578. }
  579. if (info->linking) {
  580. lag_upper_info = info->upper_info;
  581. if (!priv->lag_devs[i])
  582. priv->lag_devs[i] = upper;
  583. err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
  584. if (err) {
  585. err = -EINVAL;
  586. goto out;
  587. }
  588. } else {
  589. if (!priv->lag_devs[i])
  590. err = -EINVAL;
  591. err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
  592. if (err) {
  593. err = -EINVAL;
  594. goto out;
  595. }
  596. if (!priv->lags_port_members[i])
  597. priv->lag_devs[i] = NULL;
  598. }
  599. out:
  600. mutex_unlock(&priv->reg_mutex);
  601. return 0;
  602. }
  603. /* Is the lower network device a DSA slave network device of our RTL930X-switch?
  604. * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
  605. * DSA master device.
  606. */
  607. int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
  608. {
  609. int i;
  610. // TODO: On 5.12:
  611. // if(!dsa_slave_dev_check(dev)) {
  612. // netdev_info(dev, "%s: not a DSA device.\n", __func__);
  613. // return -EINVAL;
  614. // }
  615. for (i = 0; i < priv->cpu_port; i++) {
  616. if (!priv->ports[i].dp)
  617. continue;
  618. if (priv->ports[i].dp->slave == dev)
  619. return i;
  620. }
  621. return -EINVAL;
  622. }
  623. static int rtl83xx_netdevice_event(struct notifier_block *this,
  624. unsigned long event, void *ptr)
  625. {
  626. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  627. struct rtl838x_switch_priv *priv;
  628. int err;
  629. pr_debug("In: %s, event: %lu\n", __func__, event);
  630. if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
  631. return NOTIFY_DONE;
  632. priv = container_of(this, struct rtl838x_switch_priv, nb);
  633. switch (event) {
  634. case NETDEV_CHANGEUPPER:
  635. err = rtl83xx_handle_changeupper(priv, ndev, ptr);
  636. break;
  637. }
  638. if (err)
  639. return err;
  640. return NOTIFY_DONE;
  641. }
  642. const static struct rhashtable_params route_ht_params = {
  643. .key_len = sizeof(u32),
  644. .key_offset = offsetof(struct rtl83xx_route, gw_ip),
  645. .head_offset = offsetof(struct rtl83xx_route, linkage),
  646. };
  647. /* Updates an L3 next hop entry in the ROUTING table */
  648. static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
  649. {
  650. struct rtl83xx_route *r;
  651. struct rhlist_head *tmp, *list;
  652. rcu_read_lock();
  653. list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
  654. if (!list) {
  655. rcu_read_unlock();
  656. return -ENOENT;
  657. }
  658. rhl_for_each_entry_rcu(r, tmp, list, linkage) {
  659. pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
  660. __func__, &ip_addr, mac);
  661. // Reads the ROUTING table entry associated with the route
  662. priv->r->route_read(r->id, r);
  663. pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
  664. r->nh.mac = r->nh.gw = mac;
  665. r->nh.port = priv->port_ignore;
  666. r->nh.id = r->id;
  667. // Do we need to explicitly add a DMAC entry with the route's nh index?
  668. if (priv->r->set_l3_egress_mac)
  669. priv->r->set_l3_egress_mac(r->id, mac);
  670. // Update ROUTING table: map gateway-mac and switch-mac id to route id
  671. rtl83xx_l2_nexthop_add(priv, &r->nh);
  672. r->attr.valid = true;
  673. r->attr.action = ROUTE_ACT_FORWARD;
  674. r->attr.type = 0;
  675. r->attr.hit = false; // Reset route-used indicator
  676. // Add PIE entry with dst_ip and prefix_len
  677. r->pr.dip = r->dst_ip;
  678. r->pr.dip_m = inet_make_mask(r->prefix_len);
  679. if (r->is_host_route) {
  680. int slot = priv->r->find_l3_slot(r, false);
  681. pr_info("%s: Got slot for route: %d\n", __func__, slot);
  682. priv->r->host_route_write(slot, r);
  683. } else {
  684. priv->r->route_write(r->id, r);
  685. r->pr.fwd_sel = true;
  686. r->pr.fwd_data = r->nh.l2_id;
  687. r->pr.fwd_act = PIE_ACT_ROUTE_UC;
  688. }
  689. if (priv->r->set_l3_nexthop)
  690. priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
  691. if (r->pr.id < 0) {
  692. r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
  693. if (r->pr.packet_cntr >= 0) {
  694. pr_info("Using packet counter %d\n", r->pr.packet_cntr);
  695. r->pr.log_sel = true;
  696. r->pr.log_data = r->pr.packet_cntr;
  697. }
  698. priv->r->pie_rule_add(priv, &r->pr);
  699. } else {
  700. int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
  701. pr_info("%s: total packets: %d\n", __func__, pkts);
  702. priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
  703. }
  704. }
  705. rcu_read_unlock();
  706. return 0;
  707. }
  708. static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
  709. struct net_device *dev, __be32 ip_addr)
  710. {
  711. struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
  712. int err = 0;
  713. u64 mac;
  714. if (!n) {
  715. n = neigh_create(&arp_tbl, &ip_addr, dev);
  716. if (IS_ERR(n))
  717. return PTR_ERR(n);
  718. }
  719. /* If the neigh is already resolved, then go ahead and
  720. * install the entry, otherwise start the ARP process to
  721. * resolve the neigh.
  722. */
  723. if (n->nud_state & NUD_VALID) {
  724. mac = ether_addr_to_u64(n->ha);
  725. pr_info("%s: resolved mac: %016llx\n", __func__, mac);
  726. rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
  727. } else {
  728. pr_info("%s: need to wait\n", __func__);
  729. neigh_event_send(n, NULL);
  730. }
  731. neigh_release(n);
  732. return err;
  733. }
  734. struct rtl83xx_walk_data {
  735. struct rtl838x_switch_priv *priv;
  736. int port;
  737. };
  738. static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
  739. {
  740. struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
  741. struct rtl838x_switch_priv *priv = data->priv;
  742. int ret = 0;
  743. int index;
  744. index = rtl83xx_port_is_under(lower, priv);
  745. data->port = index;
  746. if (index >= 0) {
  747. pr_debug("Found DSA-port, index %d\n", index);
  748. ret = 1;
  749. }
  750. return ret;
  751. }
  752. int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
  753. {
  754. struct rtl83xx_walk_data data;
  755. struct netdev_nested_priv _priv;
  756. data.priv = priv;
  757. data.port = 0;
  758. _priv.data = (void *)&data;
  759. netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
  760. return data.port;
  761. }
  762. static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
  763. {
  764. struct rtl83xx_route *r;
  765. int idx = 0, err;
  766. mutex_lock(&priv->reg_mutex);
  767. idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
  768. pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
  769. r = kzalloc(sizeof(*r), GFP_KERNEL);
  770. if (!r) {
  771. mutex_unlock(&priv->reg_mutex);
  772. return r;
  773. }
  774. r->id = idx;
  775. r->gw_ip = ip;
  776. r->pr.id = -1; // We still need to allocate a rule in HW
  777. r->is_host_route = false;
  778. err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
  779. if (err) {
  780. pr_err("Could not insert new rule\n");
  781. mutex_unlock(&priv->reg_mutex);
  782. goto out_free;
  783. }
  784. set_bit(idx, priv->route_use_bm);
  785. mutex_unlock(&priv->reg_mutex);
  786. return r;
  787. out_free:
  788. kfree(r);
  789. return NULL;
  790. }
  791. static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
  792. {
  793. struct rtl83xx_route *r;
  794. int idx = 0, err;
  795. mutex_lock(&priv->reg_mutex);
  796. idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
  797. pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
  798. r = kzalloc(sizeof(*r), GFP_KERNEL);
  799. if (!r) {
  800. mutex_unlock(&priv->reg_mutex);
  801. return r;
  802. }
  803. /* We require a unique route ID irrespective of whether it is a prefix or host
  804. * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry
  805. */
  806. r->id = idx + MAX_ROUTES;
  807. r->gw_ip = ip;
  808. r->pr.id = -1; // We still need to allocate a rule in HW
  809. r->is_host_route = true;
  810. err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
  811. if (err) {
  812. pr_err("Could not insert new rule\n");
  813. mutex_unlock(&priv->reg_mutex);
  814. goto out_free;
  815. }
  816. set_bit(idx, priv->host_route_use_bm);
  817. mutex_unlock(&priv->reg_mutex);
  818. return r;
  819. out_free:
  820. kfree(r);
  821. return NULL;
  822. }
  823. static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
  824. {
  825. int id;
  826. if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
  827. dev_warn(priv->dev, "Could not remove route\n");
  828. if (r->is_host_route) {
  829. id = priv->r->find_l3_slot(r, false);
  830. pr_debug("%s: Got id for host route: %d\n", __func__, id);
  831. r->attr.valid = false;
  832. priv->r->host_route_write(id, r);
  833. clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
  834. } else {
  835. // If there is a HW representation of the route, delete it
  836. if (priv->r->route_lookup_hw) {
  837. id = priv->r->route_lookup_hw(r);
  838. pr_info("%s: Got id for prefix route: %d\n", __func__, id);
  839. r->attr.valid = false;
  840. priv->r->route_write(id, r);
  841. }
  842. clear_bit(r->id, priv->route_use_bm);
  843. }
  844. kfree(r);
  845. }
  846. static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
  847. struct fib_entry_notifier_info *info)
  848. {
  849. struct fib_nh *nh = fib_info_nh(info->fi, 0);
  850. struct rtl83xx_route *r;
  851. struct rhlist_head *tmp, *list;
  852. pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
  853. rcu_read_lock();
  854. list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
  855. if (!list) {
  856. rcu_read_unlock();
  857. pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
  858. return -ENOENT;
  859. }
  860. rhl_for_each_entry_rcu(r, tmp, list, linkage) {
  861. if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
  862. pr_info("%s: found a route with id %d, nh-id %d\n",
  863. __func__, r->id, r->nh.id);
  864. break;
  865. }
  866. }
  867. rcu_read_unlock();
  868. rtl83xx_l2_nexthop_rm(priv, &r->nh);
  869. pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
  870. set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
  871. priv->r->pie_rule_rm(priv, &r->pr);
  872. rtl83xx_route_rm(priv, r);
  873. nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
  874. return 0;
  875. }
  876. /* On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
  877. * for packets to be routed needs to be allocated.
  878. */
  879. static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
  880. {
  881. int i, free_mac = -1;
  882. struct rtl93xx_rt_mac m;
  883. mutex_lock(&priv->reg_mutex);
  884. for (i = 0; i < MAX_ROUTER_MACS; i++) {
  885. priv->r->get_l3_router_mac(i, &m);
  886. if (free_mac < 0 && !m.valid) {
  887. free_mac = i;
  888. continue;
  889. }
  890. if (m.valid && m.mac == mac) {
  891. free_mac = i;
  892. break;
  893. }
  894. }
  895. if (free_mac < 0) {
  896. pr_err("No free router MACs, cannot offload\n");
  897. mutex_unlock(&priv->reg_mutex);
  898. return -1;
  899. }
  900. m.valid = true;
  901. m.mac = mac;
  902. m.p_type = 0; // An individual port, not a trunk port
  903. m.p_id = 0x3f; // Listen on any port
  904. m.p_id_mask = 0;
  905. m.vid = 0; // Listen on any VLAN...
  906. m.vid_mask = 0; // ... so mask needs to be 0
  907. m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
  908. m.action = L3_FORWARD; // Route the packet
  909. priv->r->set_l3_router_mac(free_mac, &m);
  910. mutex_unlock(&priv->reg_mutex);
  911. return 0;
  912. }
  913. static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
  914. {
  915. int i, free_mac = -1;
  916. struct rtl838x_l3_intf intf;
  917. u64 m;
  918. mutex_lock(&priv->reg_mutex);
  919. for (i = 0; i < MAX_SMACS; i++) {
  920. m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
  921. if (free_mac < 0 && !m) {
  922. free_mac = i;
  923. continue;
  924. }
  925. if (m == mac) {
  926. mutex_unlock(&priv->reg_mutex);
  927. return i;
  928. }
  929. }
  930. if (free_mac < 0) {
  931. pr_err("No free egress interface, cannot offload\n");
  932. return -1;
  933. }
  934. // Set up default egress interface 1
  935. intf.vid = vlan;
  936. intf.smac_idx = free_mac;
  937. intf.ip4_mtu_id = 1;
  938. intf.ip6_mtu_id = 1;
  939. intf.ttl_scope = 1; // TTL
  940. intf.hl_scope = 1; // Hop Limit
  941. intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
  942. intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
  943. priv->r->set_l3_egress_intf(free_mac, &intf);
  944. priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
  945. mutex_unlock(&priv->reg_mutex);
  946. return free_mac;
  947. }
  948. static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
  949. struct fib_entry_notifier_info *info)
  950. {
  951. struct fib_nh *nh = fib_info_nh(info->fi, 0);
  952. struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
  953. int port;
  954. struct rtl83xx_route *r;
  955. bool to_localhost;
  956. int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
  957. pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
  958. if (!info->dst) {
  959. pr_info("Not offloading default route for now\n");
  960. return 0;
  961. }
  962. pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
  963. ether_addr_to_u64(dev->dev_addr), vlan
  964. );
  965. port = rtl83xx_port_dev_lower_find(dev, priv);
  966. if (port < 0)
  967. return -1;
  968. // For now we only work with routes that have a gateway and are not ourself
  969. // if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
  970. // return 0;
  971. if ((info->dst & 0xff) == 0xff)
  972. return 0;
  973. // Do not offload routes to 192.168.100.x
  974. if ((info->dst & 0xffffff00) == 0xc0a86400)
  975. return 0;
  976. // Do not offload routes to 127.x.x.x
  977. if ((info->dst & 0xff000000) == 0x7f000000)
  978. return 0;
  979. // Allocate route or host-route (entry if hardware supports this)
  980. if (info->dst_len == 32 && priv->r->host_route_write)
  981. r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
  982. else
  983. r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
  984. if (!r) {
  985. pr_err("%s: No more free route entries\n", __func__);
  986. return -1;
  987. }
  988. r->dst_ip = info->dst;
  989. r->prefix_len = info->dst_len;
  990. r->nh.rvid = vlan;
  991. to_localhost = !nh->fib_nh_gw4;
  992. if (priv->r->set_l3_router_mac) {
  993. u64 mac = ether_addr_to_u64(dev->dev_addr);
  994. pr_debug("Local route and router mac %016llx\n", mac);
  995. if (rtl83xx_alloc_router_mac(priv, mac))
  996. goto out_free_rt;
  997. // vid = 0: Do not care about VID
  998. r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
  999. if (r->nh.if_id < 0)
  1000. goto out_free_rmac;
  1001. if (to_localhost) {
  1002. int slot;
  1003. r->nh.mac = mac;
  1004. r->nh.port = priv->port_ignore;
  1005. r->attr.valid = true;
  1006. r->attr.action = ROUTE_ACT_TRAP2CPU;
  1007. r->attr.type = 0;
  1008. slot = priv->r->find_l3_slot(r, false);
  1009. pr_debug("%s: Got slot for route: %d\n", __func__, slot);
  1010. priv->r->host_route_write(slot, r);
  1011. }
  1012. }
  1013. // We need to resolve the mac address of the GW
  1014. if (!to_localhost)
  1015. rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
  1016. nh->fib_nh_flags |= RTNH_F_OFFLOAD;
  1017. return 0;
  1018. out_free_rmac:
  1019. out_free_rt:
  1020. return 0;
  1021. }
  1022. static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
  1023. struct fib6_entry_notifier_info *info)
  1024. {
  1025. pr_debug("In %s\n", __func__);
  1026. // nh->fib_nh_flags |= RTNH_F_OFFLOAD;
  1027. return 0;
  1028. }
  1029. struct net_event_work {
  1030. struct work_struct work;
  1031. struct rtl838x_switch_priv *priv;
  1032. u64 mac;
  1033. u32 gw_addr;
  1034. };
  1035. static void rtl83xx_net_event_work_do(struct work_struct *work)
  1036. {
  1037. struct net_event_work *net_work =
  1038. container_of(work, struct net_event_work, work);
  1039. struct rtl838x_switch_priv *priv = net_work->priv;
  1040. rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
  1041. }
  1042. static int rtl83xx_netevent_event(struct notifier_block *this,
  1043. unsigned long event, void *ptr)
  1044. {
  1045. struct rtl838x_switch_priv *priv;
  1046. struct net_device *dev;
  1047. struct neighbour *n = ptr;
  1048. int err, port;
  1049. struct net_event_work *net_work;
  1050. priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
  1051. net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
  1052. if (!net_work)
  1053. return NOTIFY_BAD;
  1054. INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
  1055. net_work->priv = priv;
  1056. switch (event) {
  1057. case NETEVENT_NEIGH_UPDATE:
  1058. if (n->tbl != &arp_tbl)
  1059. return NOTIFY_DONE;
  1060. dev = n->dev;
  1061. port = rtl83xx_port_dev_lower_find(dev, priv);
  1062. if (port < 0 || !(n->nud_state & NUD_VALID)) {
  1063. pr_debug("%s: Neigbour invalid, not updating\n", __func__);
  1064. kfree(net_work);
  1065. return NOTIFY_DONE;
  1066. }
  1067. net_work->mac = ether_addr_to_u64(n->ha);
  1068. net_work->gw_addr = *(__be32 *) n->primary_key;
  1069. pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
  1070. __func__, port, net_work->mac);
  1071. schedule_work(&net_work->work);
  1072. if (err)
  1073. netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
  1074. break;
  1075. }
  1076. return NOTIFY_DONE;
  1077. }
  1078. struct rtl83xx_fib_event_work {
  1079. struct work_struct work;
  1080. union {
  1081. struct fib_entry_notifier_info fen_info;
  1082. struct fib6_entry_notifier_info fen6_info;
  1083. struct fib_rule_notifier_info fr_info;
  1084. };
  1085. struct rtl838x_switch_priv *priv;
  1086. bool is_fib6;
  1087. unsigned long event;
  1088. };
  1089. static void rtl83xx_fib_event_work_do(struct work_struct *work)
  1090. {
  1091. struct rtl83xx_fib_event_work *fib_work =
  1092. container_of(work, struct rtl83xx_fib_event_work, work);
  1093. struct rtl838x_switch_priv *priv = fib_work->priv;
  1094. struct fib_rule *rule;
  1095. int err;
  1096. /* Protect internal structures from changes */
  1097. rtnl_lock();
  1098. pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
  1099. switch (fib_work->event) {
  1100. case FIB_EVENT_ENTRY_ADD:
  1101. case FIB_EVENT_ENTRY_REPLACE:
  1102. case FIB_EVENT_ENTRY_APPEND:
  1103. if (fib_work->is_fib6) {
  1104. err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
  1105. } else {
  1106. err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
  1107. fib_info_put(fib_work->fen_info.fi);
  1108. }
  1109. if (err)
  1110. pr_err("%s: FIB4 failed\n", __func__);
  1111. break;
  1112. case FIB_EVENT_ENTRY_DEL:
  1113. rtl83xx_fib4_del(priv, &fib_work->fen_info);
  1114. fib_info_put(fib_work->fen_info.fi);
  1115. break;
  1116. case FIB_EVENT_RULE_ADD:
  1117. case FIB_EVENT_RULE_DEL:
  1118. rule = fib_work->fr_info.rule;
  1119. if (!fib4_rule_default(rule))
  1120. pr_err("%s: FIB4 default rule failed\n", __func__);
  1121. fib_rule_put(rule);
  1122. break;
  1123. }
  1124. rtnl_unlock();
  1125. kfree(fib_work);
  1126. }
  1127. /* Called with rcu_read_lock() */
  1128. static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
  1129. {
  1130. struct fib_notifier_info *info = ptr;
  1131. struct rtl838x_switch_priv *priv;
  1132. struct rtl83xx_fib_event_work *fib_work;
  1133. if ((info->family != AF_INET && info->family != AF_INET6 &&
  1134. info->family != RTNL_FAMILY_IPMR &&
  1135. info->family != RTNL_FAMILY_IP6MR))
  1136. return NOTIFY_DONE;
  1137. priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
  1138. fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
  1139. if (!fib_work)
  1140. return NOTIFY_BAD;
  1141. INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
  1142. fib_work->priv = priv;
  1143. fib_work->event = event;
  1144. fib_work->is_fib6 = false;
  1145. switch (event) {
  1146. case FIB_EVENT_ENTRY_ADD:
  1147. case FIB_EVENT_ENTRY_REPLACE:
  1148. case FIB_EVENT_ENTRY_APPEND:
  1149. case FIB_EVENT_ENTRY_DEL:
  1150. pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
  1151. if (info->family == AF_INET) {
  1152. struct fib_entry_notifier_info *fen_info = ptr;
  1153. if (fen_info->fi->fib_nh_is_v6) {
  1154. NL_SET_ERR_MSG_MOD(info->extack,
  1155. "IPv6 gateway with IPv4 route is not supported");
  1156. kfree(fib_work);
  1157. return notifier_from_errno(-EINVAL);
  1158. }
  1159. memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
  1160. /* Take referece on fib_info to prevent it from being
  1161. * freed while work is queued. Release it afterwards.
  1162. */
  1163. fib_info_hold(fib_work->fen_info.fi);
  1164. } else if (info->family == AF_INET6) {
  1165. struct fib6_entry_notifier_info *fen6_info = ptr;
  1166. pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
  1167. kfree(fib_work);
  1168. return NOTIFY_DONE;
  1169. }
  1170. break;
  1171. case FIB_EVENT_RULE_ADD:
  1172. case FIB_EVENT_RULE_DEL:
  1173. pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
  1174. memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
  1175. fib_rule_get(fib_work->fr_info.rule);
  1176. break;
  1177. }
  1178. schedule_work(&fib_work->work);
  1179. return NOTIFY_DONE;
  1180. }
  1181. static int __init rtl83xx_sw_probe(struct platform_device *pdev)
  1182. {
  1183. int err = 0, i;
  1184. struct rtl838x_switch_priv *priv;
  1185. struct device *dev = &pdev->dev;
  1186. u64 bpdu_mask;
  1187. pr_debug("Probing RTL838X switch device\n");
  1188. if (!pdev->dev.of_node) {
  1189. dev_err(dev, "No DT found\n");
  1190. return -EINVAL;
  1191. }
  1192. // Initialize access to RTL switch tables
  1193. rtl_table_init();
  1194. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1195. if (!priv)
  1196. return -ENOMEM;
  1197. priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
  1198. if (!priv->ds)
  1199. return -ENOMEM;
  1200. priv->ds->dev = dev;
  1201. priv->ds->priv = priv;
  1202. priv->ds->ops = &rtl83xx_switch_ops;
  1203. priv->ds->needs_standalone_vlan_filtering = true;
  1204. priv->dev = dev;
  1205. mutex_init(&priv->reg_mutex);
  1206. priv->family_id = soc_info.family;
  1207. priv->id = soc_info.id;
  1208. switch(soc_info.family) {
  1209. case RTL8380_FAMILY_ID:
  1210. priv->ds->ops = &rtl83xx_switch_ops;
  1211. priv->cpu_port = RTL838X_CPU_PORT;
  1212. priv->port_mask = 0x1f;
  1213. priv->port_width = 1;
  1214. priv->irq_mask = 0x0FFFFFFF;
  1215. priv->r = &rtl838x_reg;
  1216. priv->ds->num_ports = 29;
  1217. priv->fib_entries = 8192;
  1218. rtl8380_get_version(priv);
  1219. priv->n_lags = 8;
  1220. priv->l2_bucket_size = 4;
  1221. priv->n_pie_blocks = 12;
  1222. priv->port_ignore = 0x1f;
  1223. priv->n_counters = 128;
  1224. break;
  1225. case RTL8390_FAMILY_ID:
  1226. priv->ds->ops = &rtl83xx_switch_ops;
  1227. priv->cpu_port = RTL839X_CPU_PORT;
  1228. priv->port_mask = 0x3f;
  1229. priv->port_width = 2;
  1230. priv->irq_mask = 0xFFFFFFFFFFFFFULL;
  1231. priv->r = &rtl839x_reg;
  1232. priv->ds->num_ports = 53;
  1233. priv->fib_entries = 16384;
  1234. rtl8390_get_version(priv);
  1235. priv->n_lags = 16;
  1236. priv->l2_bucket_size = 4;
  1237. priv->n_pie_blocks = 18;
  1238. priv->port_ignore = 0x3f;
  1239. priv->n_counters = 1024;
  1240. break;
  1241. case RTL9300_FAMILY_ID:
  1242. priv->ds->ops = &rtl930x_switch_ops;
  1243. priv->cpu_port = RTL930X_CPU_PORT;
  1244. priv->port_mask = 0x1f;
  1245. priv->port_width = 1;
  1246. priv->irq_mask = 0x0FFFFFFF;
  1247. priv->r = &rtl930x_reg;
  1248. priv->ds->num_ports = 29;
  1249. priv->fib_entries = 16384;
  1250. priv->version = RTL8390_VERSION_A;
  1251. priv->n_lags = 16;
  1252. sw_w32(1, RTL930X_ST_CTRL);
  1253. priv->l2_bucket_size = 8;
  1254. priv->n_pie_blocks = 16;
  1255. priv->port_ignore = 0x3f;
  1256. priv->n_counters = 2048;
  1257. break;
  1258. case RTL9310_FAMILY_ID:
  1259. priv->ds->ops = &rtl930x_switch_ops;
  1260. priv->cpu_port = RTL931X_CPU_PORT;
  1261. priv->port_mask = 0x3f;
  1262. priv->port_width = 2;
  1263. priv->irq_mask = 0xFFFFFFFFFFFFFULL;
  1264. priv->r = &rtl931x_reg;
  1265. priv->ds->num_ports = 57;
  1266. priv->fib_entries = 16384;
  1267. priv->version = RTL8390_VERSION_A;
  1268. priv->n_lags = 16;
  1269. priv->l2_bucket_size = 8;
  1270. break;
  1271. }
  1272. pr_debug("Chip version %c\n", priv->version);
  1273. err = rtl83xx_mdio_probe(priv);
  1274. if (err) {
  1275. /* Probing fails the 1st time because of missing ethernet driver
  1276. * initialization. Use this to disable traffic in case the bootloader left if on
  1277. */
  1278. return err;
  1279. }
  1280. err = dsa_register_switch(priv->ds);
  1281. if (err) {
  1282. dev_err(dev, "Error registering switch: %d\n", err);
  1283. return err;
  1284. }
  1285. /* dsa_to_port returns dsa_port from the port list in
  1286. * dsa_switch_tree, the tree is built when the switch
  1287. * is registered by dsa_register_switch
  1288. */
  1289. for (i = 0; i <= priv->cpu_port; i++)
  1290. priv->ports[i].dp = dsa_to_port(priv->ds, i);
  1291. /* Enable link and media change interrupts. Are the SERDES masks needed? */
  1292. sw_w32_mask(0, 3, priv->r->isr_glb_src);
  1293. priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
  1294. priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
  1295. priv->link_state_irq = platform_get_irq(pdev, 0);
  1296. pr_info("LINK state irq: %d\n", priv->link_state_irq);
  1297. switch (priv->family_id) {
  1298. case RTL8380_FAMILY_ID:
  1299. err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
  1300. IRQF_SHARED, "rtl838x-link-state", priv->ds);
  1301. break;
  1302. case RTL8390_FAMILY_ID:
  1303. err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
  1304. IRQF_SHARED, "rtl839x-link-state", priv->ds);
  1305. break;
  1306. case RTL9300_FAMILY_ID:
  1307. err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
  1308. IRQF_SHARED, "rtl930x-link-state", priv->ds);
  1309. break;
  1310. case RTL9310_FAMILY_ID:
  1311. err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
  1312. IRQF_SHARED, "rtl931x-link-state", priv->ds);
  1313. break;
  1314. }
  1315. if (err) {
  1316. dev_err(dev, "Error setting up switch interrupt.\n");
  1317. /* Need to free allocated switch here */
  1318. }
  1319. /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
  1320. if (soc_info.family != RTL9310_FAMILY_ID)
  1321. sw_w32(0x1, priv->r->imr_glb);
  1322. rtl83xx_get_l2aging(priv);
  1323. rtl83xx_setup_qos(priv);
  1324. priv->r->l3_setup(priv);
  1325. /* Clear all destination ports for mirror groups */
  1326. for (i = 0; i < 4; i++)
  1327. priv->mirror_group_ports[i] = -1;
  1328. /* Register netdevice event callback to catch changes in link aggregation groups */
  1329. priv->nb.notifier_call = rtl83xx_netdevice_event;
  1330. if (register_netdevice_notifier(&priv->nb)) {
  1331. priv->nb.notifier_call = NULL;
  1332. dev_err(dev, "Failed to register LAG netdev notifier\n");
  1333. goto err_register_nb;
  1334. }
  1335. // Initialize hash table for L3 routing
  1336. rhltable_init(&priv->routes, &route_ht_params);
  1337. /* Register netevent notifier callback to catch notifications about neighboring
  1338. * changes to update nexthop entries for L3 routing.
  1339. */
  1340. priv->ne_nb.notifier_call = rtl83xx_netevent_event;
  1341. if (register_netevent_notifier(&priv->ne_nb)) {
  1342. priv->ne_nb.notifier_call = NULL;
  1343. dev_err(dev, "Failed to register netevent notifier\n");
  1344. goto err_register_ne_nb;
  1345. }
  1346. priv->fib_nb.notifier_call = rtl83xx_fib_event;
  1347. /* Register Forwarding Information Base notifier to offload routes where
  1348. * where possible
  1349. * Only FIBs pointing to our own netdevs are programmed into
  1350. * the device, so no need to pass a callback.
  1351. */
  1352. err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
  1353. if (err)
  1354. goto err_register_fib_nb;
  1355. // TODO: put this into l2_setup()
  1356. // Flood BPDUs to all ports including cpu-port
  1357. if (soc_info.family != RTL9300_FAMILY_ID) {
  1358. bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
  1359. priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
  1360. // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs
  1361. sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
  1362. rtl838x_dbgfs_init(priv);
  1363. } else {
  1364. rtl930x_dbgfs_init(priv);
  1365. }
  1366. return 0;
  1367. err_register_fib_nb:
  1368. unregister_netevent_notifier(&priv->ne_nb);
  1369. err_register_ne_nb:
  1370. unregister_netdevice_notifier(&priv->nb);
  1371. err_register_nb:
  1372. return err;
  1373. }
  1374. static int rtl83xx_sw_remove(struct platform_device *pdev)
  1375. {
  1376. // TODO:
  1377. pr_debug("Removing platform driver for rtl83xx-sw\n");
  1378. return 0;
  1379. }
  1380. static const struct of_device_id rtl83xx_switch_of_ids[] = {
  1381. { .compatible = "realtek,rtl83xx-switch"},
  1382. { /* sentinel */ }
  1383. };
  1384. MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
  1385. static struct platform_driver rtl83xx_switch_driver = {
  1386. .probe = rtl83xx_sw_probe,
  1387. .remove = rtl83xx_sw_remove,
  1388. .driver = {
  1389. .name = "rtl83xx-switch",
  1390. .pm = NULL,
  1391. .of_match_table = rtl83xx_switch_of_ids,
  1392. },
  1393. };
  1394. module_platform_driver(rtl83xx_switch_driver);
  1395. MODULE_AUTHOR("B. Koblitz");
  1396. MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
  1397. MODULE_LICENSE("GPL");