common.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/of_mdio.h>
  3. #include <linux/of_platform.h>
  4. #include <net/arp.h>
  5. #include <net/nexthop.h>
  6. #include <net/neighbour.h>
  7. #include <net/netevent.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/if_vlan.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/rhashtable.h>
  12. #include <linux/of_net.h>
  13. #include <asm/mach-rtl838x/mach-rtl83xx.h>
  14. #include "rtl83xx.h"
  15. extern struct rtl83xx_soc_info soc_info;
  16. extern const struct rtl838x_reg rtl838x_reg;
  17. extern const struct rtl838x_reg rtl839x_reg;
  18. extern const struct rtl838x_reg rtl930x_reg;
  19. extern const struct rtl838x_reg rtl931x_reg;
  20. extern const struct dsa_switch_ops rtl83xx_switch_ops;
  21. extern const struct dsa_switch_ops rtl930x_switch_ops;
  22. DEFINE_MUTEX(smi_lock);
  23. int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
  24. {
  25. u32 msti = 0;
  26. u32 port_state[4];
  27. int index, bit;
  28. int pos = port;
  29. int n = priv->port_width << 1;
  30. /* Ports above or equal CPU port can never be configured */
  31. if (port >= priv->cpu_port)
  32. return -1;
  33. mutex_lock(&priv->reg_mutex);
  34. /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
  35. if (priv->family_id == RTL8390_FAMILY_ID)
  36. pos += 12;
  37. if (priv->family_id == RTL9300_FAMILY_ID)
  38. pos += 3;
  39. if (priv->family_id == RTL9310_FAMILY_ID)
  40. pos += 8;
  41. index = n - (pos >> 4) - 1;
  42. bit = (pos << 1) % 32;
  43. priv->r->stp_get(priv, msti, port_state);
  44. mutex_unlock(&priv->reg_mutex);
  45. return (port_state[index] >> bit) & 3;
  46. }
  47. static struct table_reg rtl838x_tbl_regs[] = {
  48. TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), /* RTL8380_TBL_L2 */
  49. TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), /* RTL8380_TBL_0 */
  50. TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), /* RTL8380_TBL_1 */
  51. TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), /* RTL8390_TBL_L2 */
  52. TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), /* RTL8390_TBL_0 */
  53. TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), /* RTL8390_TBL_1 */
  54. TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), /* RTL8390_TBL_2 */
  55. TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), /* RTL9300_TBL_L2 */
  56. TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), /* RTL9300_TBL_0 */
  57. TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), /* RTL9300_TBL_1 */
  58. TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), /* RTL9300_TBL_2 */
  59. TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), /* RTL9300_TBL_HSB */
  60. TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), /* RTL9300_TBL_HSA */
  61. TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), /* RTL9310_TBL_0 */
  62. TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), /* RTL9310_TBL_1 */
  63. TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), /* RTL9310_TBL_2 */
  64. TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), /* RTL9310_TBL_3 */
  65. TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), /* RTL9310_TBL_4 */
  66. TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), /* RTL9310_TBL_5 */
  67. };
  68. void rtl_table_init(void)
  69. {
  70. for (int i = 0; i < RTL_TBL_END; i++)
  71. mutex_init(&rtl838x_tbl_regs[i].lock);
  72. }
  73. /* Request access to table t in table access register r
  74. * Returns a handle to a lock for that table
  75. */
  76. struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
  77. {
  78. if (r >= RTL_TBL_END)
  79. return NULL;
  80. if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
  81. return NULL;
  82. mutex_lock(&rtl838x_tbl_regs[r].lock);
  83. rtl838x_tbl_regs[r].tbl = t;
  84. return &rtl838x_tbl_regs[r];
  85. }
  86. /* Release a table r, unlock the corresponding lock */
  87. void rtl_table_release(struct table_reg *r)
  88. {
  89. if (!r)
  90. return;
  91. /* pr_info("Unlocking %08x\n", (u32)r); */
  92. mutex_unlock(&r->lock);
  93. /* pr_info("Unlock done\n"); */
  94. }
  95. static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
  96. {
  97. int ret = 0;
  98. u32 cmd, val;
  99. /* Read/write bit has inverted meaning on RTL838x */
  100. if (r->rmode)
  101. cmd = is_write ? 0 : BIT(r->c_bit);
  102. else
  103. cmd = is_write ? BIT(r->c_bit) : 0;
  104. cmd |= BIT(r->c_bit + 1); /* Execute bit */
  105. cmd |= r->tbl << r->t_bit; /* Table type */
  106. cmd |= idx & (BIT(r->t_bit) - 1); /* Index */
  107. sw_w32(cmd, r->addr);
  108. ret = readx_poll_timeout(sw_r32, r->addr, val,
  109. !(val & BIT(r->c_bit + 1)), 20, 10000);
  110. if (ret)
  111. pr_err("%s: timeout\n", __func__);
  112. return ret;
  113. }
  114. /* Reads table index idx into the data registers of the table */
  115. int rtl_table_read(struct table_reg *r, int idx)
  116. {
  117. return rtl_table_exec(r, false, idx);
  118. }
  119. /* Writes the content of the table data registers into the table at index idx */
  120. int rtl_table_write(struct table_reg *r, int idx)
  121. {
  122. return rtl_table_exec(r, true, idx);
  123. }
  124. /* Returns the address of the ith data register of table register r
  125. * the address is relative to the beginning of the Switch-IO block at 0xbb000000
  126. */
  127. inline u16 rtl_table_data(struct table_reg *r, int i)
  128. {
  129. if (i >= r->max_data)
  130. i = r->max_data - 1;
  131. return r->data + i * 4;
  132. }
  133. inline u32 rtl_table_data_r(struct table_reg *r, int i)
  134. {
  135. return sw_r32(rtl_table_data(r, i));
  136. }
  137. inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
  138. {
  139. sw_w32(v, rtl_table_data(r, i));
  140. }
  141. /* Port register accessor functions for the RTL838x and RTL930X SoCs */
  142. void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
  143. {
  144. sw_w32_mask((u32)clear, (u32)set, reg);
  145. }
  146. void rtl838x_set_port_reg(u64 set, int reg)
  147. {
  148. sw_w32((u32)set, reg);
  149. }
  150. u64 rtl838x_get_port_reg(int reg)
  151. {
  152. return ((u64)sw_r32(reg));
  153. }
  154. /* Port register accessor functions for the RTL839x and RTL931X SoCs */
  155. void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
  156. {
  157. sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
  158. sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
  159. }
  160. u64 rtl839x_get_port_reg_be(int reg)
  161. {
  162. u64 v = sw_r32(reg);
  163. v <<= 32;
  164. v |= sw_r32(reg + 4);
  165. return v;
  166. }
  167. void rtl839x_set_port_reg_be(u64 set, int reg)
  168. {
  169. sw_w32(set >> 32, reg);
  170. sw_w32(set & 0xffffffff, reg + 4);
  171. }
  172. void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
  173. {
  174. sw_w32_mask((u32)clear, (u32)set, reg);
  175. sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
  176. }
  177. void rtl839x_set_port_reg_le(u64 set, int reg)
  178. {
  179. sw_w32(set, reg);
  180. sw_w32(set >> 32, reg + 4);
  181. }
  182. u64 rtl839x_get_port_reg_le(int reg)
  183. {
  184. u64 v = sw_r32(reg + 4);
  185. v <<= 32;
  186. v |= sw_r32(reg);
  187. return v;
  188. }
  189. int read_phy(u32 port, u32 page, u32 reg, u32 *val)
  190. {
  191. switch (soc_info.family) {
  192. case RTL8380_FAMILY_ID:
  193. return rtl838x_read_phy(port, page, reg, val);
  194. case RTL8390_FAMILY_ID:
  195. return rtl839x_read_phy(port, page, reg, val);
  196. case RTL9300_FAMILY_ID:
  197. return rtl930x_read_phy(port, page, reg, val);
  198. case RTL9310_FAMILY_ID:
  199. return rtl931x_read_phy(port, page, reg, val);
  200. }
  201. return -1;
  202. }
  203. int write_phy(u32 port, u32 page, u32 reg, u32 val)
  204. {
  205. switch (soc_info.family) {
  206. case RTL8380_FAMILY_ID:
  207. return rtl838x_write_phy(port, page, reg, val);
  208. case RTL8390_FAMILY_ID:
  209. return rtl839x_write_phy(port, page, reg, val);
  210. case RTL9300_FAMILY_ID:
  211. return rtl930x_write_phy(port, page, reg, val);
  212. case RTL9310_FAMILY_ID:
  213. return rtl931x_write_phy(port, page, reg, val);
  214. }
  215. return -1;
  216. }
  217. static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
  218. {
  219. struct device *dev = priv->dev;
  220. struct device_node *dn, *phy_node, *led_node, *mii_np = dev->of_node;
  221. struct mii_bus *bus;
  222. int ret;
  223. u32 pn;
  224. pr_debug("In %s\n", __func__);
  225. mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
  226. if (mii_np) {
  227. pr_debug("Found compatible MDIO node!\n");
  228. } else {
  229. dev_err(priv->dev, "no %s child node found", "mdio-bus");
  230. return -ENODEV;
  231. }
  232. priv->mii_bus = of_mdio_find_bus(mii_np);
  233. if (!priv->mii_bus) {
  234. pr_debug("Deferring probe of mdio bus\n");
  235. return -EPROBE_DEFER;
  236. }
  237. if (!of_device_is_available(mii_np))
  238. ret = -ENODEV;
  239. bus = devm_mdiobus_alloc(priv->ds->dev);
  240. if (!bus)
  241. return -ENOMEM;
  242. bus->name = "rtl838x slave mii";
  243. /* Since the NIC driver is loaded first, we can use the mdio rw functions
  244. * assigned there.
  245. */
  246. bus->read = priv->mii_bus->read;
  247. bus->write = priv->mii_bus->write;
  248. bus->read_paged = priv->mii_bus->read_paged;
  249. bus->write_paged = priv->mii_bus->write_paged;
  250. snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
  251. bus->parent = dev;
  252. priv->ds->slave_mii_bus = bus;
  253. priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
  254. priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
  255. ret = mdiobus_register(priv->ds->slave_mii_bus);
  256. if (ret && mii_np) {
  257. of_node_put(dn);
  258. return ret;
  259. }
  260. dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
  261. if (!dn) {
  262. dev_err(priv->dev, "No RTL switch node in DTS\n");
  263. return -ENODEV;
  264. }
  265. led_node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds");
  266. for_each_node_by_name(dn, "port") {
  267. phy_interface_t interface;
  268. u32 led_set;
  269. char led_set_str[16] = {0};
  270. if (!of_device_is_available(dn))
  271. continue;
  272. if (of_property_read_u32(dn, "reg", &pn))
  273. continue;
  274. phy_node = of_parse_phandle(dn, "phy-handle", 0);
  275. if (!phy_node) {
  276. if (pn != priv->cpu_port)
  277. dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
  278. continue;
  279. }
  280. if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
  281. priv->ports[pn].sds_num = -1;
  282. pr_debug("%s port %d has SDS %d\n", __func__, pn, priv->ports[pn].sds_num);
  283. if (of_get_phy_mode(dn, &interface))
  284. interface = PHY_INTERFACE_MODE_NA;
  285. if (interface == PHY_INTERFACE_MODE_HSGMII)
  286. priv->ports[pn].is2G5 = true;
  287. if (interface == PHY_INTERFACE_MODE_USXGMII)
  288. priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
  289. if (interface == PHY_INTERFACE_MODE_10GBASER)
  290. priv->ports[pn].is10G = true;
  291. priv->ports[pn].leds_on_this_port = 0;
  292. if (led_node) {
  293. if (of_property_read_u32(dn, "led-set", &led_set))
  294. led_set = 0;
  295. priv->ports[pn].led_set = led_set;
  296. sprintf(led_set_str, "led_set%d", led_set);
  297. priv->ports[pn].leds_on_this_port = of_property_count_u32_elems(led_node, led_set_str);
  298. if (priv->ports[pn].leds_on_this_port > 4) {
  299. dev_err(priv->dev, "led_set %d for port %d configuration is invalid\n", led_set, pn);
  300. return -ENODEV;
  301. }
  302. }
  303. /* Check for the integrated SerDes of the RTL8380M first */
  304. if (of_property_read_bool(phy_node, "phy-is-integrated")
  305. && priv->id == 0x8380 && pn >= 24) {
  306. pr_debug("----> FÓUND A SERDES\n");
  307. priv->ports[pn].phy = PHY_RTL838X_SDS;
  308. continue;
  309. }
  310. if (priv->id >= 0x9300) {
  311. priv->ports[pn].phy_is_integrated = false;
  312. if (of_property_read_bool(phy_node, "phy-is-integrated")) {
  313. priv->ports[pn].phy_is_integrated = true;
  314. priv->ports[pn].phy = PHY_RTL930X_SDS;
  315. }
  316. } else {
  317. if (of_property_read_bool(phy_node, "phy-is-integrated") &&
  318. !of_property_read_bool(phy_node, "sfp")) {
  319. priv->ports[pn].phy = PHY_RTL8218B_INT;
  320. continue;
  321. }
  322. }
  323. if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
  324. of_property_read_bool(phy_node, "sfp")) {
  325. priv->ports[pn].phy = PHY_RTL8214FC;
  326. continue;
  327. }
  328. if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
  329. !of_property_read_bool(phy_node, "sfp")) {
  330. priv->ports[pn].phy = PHY_RTL8218B_EXT;
  331. continue;
  332. }
  333. }
  334. /* Disable MAC polling the PHY so that we can start configuration */
  335. priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
  336. /* Enable PHY control via SoC */
  337. if (priv->family_id == RTL8380_FAMILY_ID) {
  338. /* Enable SerDes NWAY and PHY control via SoC */
  339. sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
  340. } else if (priv->family_id == RTL8390_FAMILY_ID) {
  341. /* Disable PHY polling via SoC */
  342. sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
  343. }
  344. /* Power on fibre ports and reset them if necessary */
  345. if (priv->ports[24].phy == PHY_RTL838X_SDS) {
  346. pr_debug("Powering on fibre ports & reset\n");
  347. rtl8380_sds_power(24, 1);
  348. rtl8380_sds_power(26, 1);
  349. }
  350. pr_debug("%s done\n", __func__);
  351. return 0;
  352. }
  353. static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
  354. {
  355. int t = sw_r32(priv->r->l2_ctrl_1);
  356. t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
  357. if (priv->family_id == RTL8380_FAMILY_ID)
  358. t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
  359. else
  360. t = (t * 3) / 5;
  361. pr_debug("L2 AGING time: %d sec\n", t);
  362. pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
  363. return t;
  364. }
  365. /* Caller must hold priv->reg_mutex */
  366. int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
  367. {
  368. struct rtl838x_switch_priv *priv = ds->priv;
  369. int i;
  370. u32 algomsk = 0;
  371. u32 algoidx = 0;
  372. if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
  373. pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
  374. return -EINVAL;
  375. }
  376. if (group >= priv->n_lags) {
  377. pr_err("%s: LAG %d invalid.\n", __func__, group);
  378. return -EINVAL;
  379. }
  380. if (port >= priv->cpu_port) {
  381. pr_err("%s: Port %d invalid.\n", __func__, port);
  382. return -EINVAL;
  383. }
  384. for (i = 0; i < priv->n_lags; i++) {
  385. if (priv->lags_port_members[i] & BIT_ULL(port))
  386. break;
  387. }
  388. if (i != priv->n_lags) {
  389. pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
  390. return -ENOSPC;
  391. }
  392. switch(info->hash_type) {
  393. case NETDEV_LAG_HASH_L2:
  394. algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
  395. algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
  396. break;
  397. case NETDEV_LAG_HASH_L23:
  398. algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
  399. algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
  400. algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
  401. algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
  402. algoidx = 1;
  403. break;
  404. case NETDEV_LAG_HASH_L34:
  405. algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; /* sport */
  406. algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; /* dport */
  407. algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
  408. algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
  409. algoidx = 2;
  410. break;
  411. default:
  412. algomsk |= 0x7f;
  413. }
  414. priv->r->set_distribution_algorithm(group, algoidx, algomsk);
  415. priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
  416. priv->lags_port_members[group] |= BIT_ULL(port);
  417. pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
  418. __func__, port, group, priv->lags_port_members[group]);
  419. return 0;
  420. }
  421. /* Caller must hold priv->reg_mutex */
  422. int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
  423. {
  424. struct rtl838x_switch_priv *priv = ds->priv;
  425. if (group >= priv->n_lags) {
  426. pr_err("%s: LAG %d invalid.\n", __func__, group);
  427. return -EINVAL;
  428. }
  429. if (port >= priv->cpu_port) {
  430. pr_err("%s: Port %d invalid.\n", __func__, port);
  431. return -EINVAL;
  432. }
  433. if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
  434. pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
  435. return -ENOSPC;
  436. }
  437. /* 0x7f algo mask all */
  438. priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
  439. priv->lags_port_members[group] &= ~BIT_ULL(port);
  440. pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
  441. __func__, port, group, priv->lags_port_members[group]);
  442. return 0;
  443. }
  444. // Currently Unused
  445. // /* Allocate a 64 bit octet counter located in the LOG HW table */
  446. // static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
  447. // {
  448. // int idx;
  449. // mutex_lock(&priv->reg_mutex);
  450. // idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
  451. // if (idx >= priv->n_counters) {
  452. // mutex_unlock(&priv->reg_mutex);
  453. // return -1;
  454. // }
  455. // set_bit(idx, priv->octet_cntr_use_bm);
  456. // mutex_unlock(&priv->reg_mutex);
  457. // return idx;
  458. // }
  459. /* Allocate a 32-bit packet counter
  460. * 2 32-bit packet counters share the location of a 64-bit octet counter
  461. * Initially there are no free packet counters and 2 new ones need to be freed
  462. * by allocating the corresponding octet counter
  463. */
  464. int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
  465. {
  466. int idx, j;
  467. mutex_lock(&priv->reg_mutex);
  468. /* Because initially no packet counters are free, the logic is reversed:
  469. * a 0-bit means the counter is already allocated (for octets)
  470. */
  471. idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
  472. if (idx >= priv->n_counters * 2) {
  473. j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
  474. if (j >= priv->n_counters) {
  475. mutex_unlock(&priv->reg_mutex);
  476. return -1;
  477. }
  478. set_bit(j, priv->octet_cntr_use_bm);
  479. idx = j * 2;
  480. set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
  481. } else {
  482. clear_bit(idx, priv->packet_cntr_use_bm);
  483. }
  484. mutex_unlock(&priv->reg_mutex);
  485. return idx;
  486. }
  487. /* Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
  488. * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
  489. * or mark an existing entry as a nexthop by setting it's nexthop bit
  490. * Called from the L3 layer
  491. * The index in the L2 hash table is filled into nh->l2_id;
  492. */
  493. int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
  494. {
  495. struct rtl838x_l2_entry e;
  496. u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
  497. u32 key = priv->r->l2_hash_key(priv, seed);
  498. int idx = -1;
  499. u64 entry;
  500. pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
  501. __func__, nh->mac, nh->rvid, key, seed);
  502. e.type = L2_UNICAST;
  503. u64_to_ether_addr(nh->mac, &e.mac[0]);
  504. e.port = nh->port;
  505. /* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
  506. for (int i = 0; i < priv->l2_bucket_size; i++) {
  507. entry = priv->r->read_l2_entry_using_hash(key, i, &e);
  508. if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
  509. idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
  510. : ((key << 2) | i) & 0xffff;
  511. break;
  512. }
  513. }
  514. if (idx < 0) {
  515. pr_err("%s: No more L2 forwarding entries available\n", __func__);
  516. return -1;
  517. }
  518. /* Found an existing (e->valid is true) or empty entry, make it a nexthop entry */
  519. nh->l2_id = idx;
  520. if (e.valid) {
  521. nh->port = e.port;
  522. nh->vid = e.vid; /* Save VID */
  523. nh->rvid = e.rvid;
  524. nh->dev_id = e.stack_dev;
  525. /* If the entry is already a valid next hop entry, don't change it */
  526. if (e.next_hop)
  527. return 0;
  528. } else {
  529. e.valid = true;
  530. e.is_static = true;
  531. e.rvid = nh->rvid;
  532. e.is_ip_mc = false;
  533. e.is_ipv6_mc = false;
  534. e.block_da = false;
  535. e.block_sa = false;
  536. e.suspended = false;
  537. e.age = 0; /* With port-ignore */
  538. e.port = priv->port_ignore;
  539. u64_to_ether_addr(nh->mac, &e.mac[0]);
  540. }
  541. e.next_hop = true;
  542. e.nh_route_id = nh->id; /* NH route ID takes place of VID */
  543. e.nh_vlan_target = false;
  544. priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
  545. return 0;
  546. }
  547. /* Removes a Layer 2 next hop entry in the forwarding database
  548. * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
  549. * and we wait until the entry ages out
  550. */
  551. int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
  552. {
  553. struct rtl838x_l2_entry e;
  554. u32 key = nh->l2_id >> 2;
  555. int i = nh->l2_id & 0x3;
  556. u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
  557. pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
  558. if (!e.valid) {
  559. dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
  560. return -1;
  561. }
  562. if (e.is_static)
  563. e.valid = false;
  564. e.next_hop = false;
  565. e.vid = nh->vid; /* Restore VID */
  566. e.rvid = nh->rvid;
  567. priv->r->write_l2_entry_using_hash(key, i, &e);
  568. return 0;
  569. }
  570. static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
  571. struct net_device *ndev,
  572. struct netdev_notifier_changeupper_info *info)
  573. {
  574. struct net_device *upper = info->upper_dev;
  575. struct netdev_lag_upper_info *lag_upper_info = NULL;
  576. int i, j, err;
  577. if (!netif_is_lag_master(upper))
  578. return 0;
  579. mutex_lock(&priv->reg_mutex);
  580. for (i = 0; i < priv->n_lags; i++) {
  581. if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
  582. break;
  583. }
  584. for (j = 0; j < priv->cpu_port; j++) {
  585. if (priv->ports[j].dp->slave == ndev)
  586. break;
  587. }
  588. if (j >= priv->cpu_port) {
  589. err = -EINVAL;
  590. goto out;
  591. }
  592. if (info->linking) {
  593. lag_upper_info = info->upper_info;
  594. if (!priv->lag_devs[i])
  595. priv->lag_devs[i] = upper;
  596. err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
  597. if (err) {
  598. err = -EINVAL;
  599. goto out;
  600. }
  601. } else {
  602. if (!priv->lag_devs[i])
  603. err = -EINVAL;
  604. err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
  605. if (err) {
  606. err = -EINVAL;
  607. goto out;
  608. }
  609. if (!priv->lags_port_members[i])
  610. priv->lag_devs[i] = NULL;
  611. }
  612. out:
  613. mutex_unlock(&priv->reg_mutex);
  614. return 0;
  615. }
  616. /* Is the lower network device a DSA slave network device of our RTL930X-switch?
  617. * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
  618. * DSA master device.
  619. */
  620. int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
  621. {
  622. /* TODO: On 5.12:
  623. * if(!dsa_slave_dev_check(dev)) {
  624. * netdev_info(dev, "%s: not a DSA device.\n", __func__);
  625. * return -EINVAL;
  626. * }
  627. */
  628. for (int i = 0; i < priv->cpu_port; i++) {
  629. if (!priv->ports[i].dp)
  630. continue;
  631. if (priv->ports[i].dp->slave == dev)
  632. return i;
  633. }
  634. return -EINVAL;
  635. }
  636. static int rtl83xx_netdevice_event(struct notifier_block *this,
  637. unsigned long event, void *ptr)
  638. {
  639. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  640. struct rtl838x_switch_priv *priv;
  641. int err;
  642. pr_debug("In: %s, event: %lu\n", __func__, event);
  643. if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
  644. return NOTIFY_DONE;
  645. priv = container_of(this, struct rtl838x_switch_priv, nb);
  646. switch (event) {
  647. case NETDEV_CHANGEUPPER:
  648. err = rtl83xx_handle_changeupper(priv, ndev, ptr);
  649. break;
  650. }
  651. if (err)
  652. return err;
  653. return NOTIFY_DONE;
  654. }
  655. const static struct rhashtable_params route_ht_params = {
  656. .key_len = sizeof(u32),
  657. .key_offset = offsetof(struct rtl83xx_route, gw_ip),
  658. .head_offset = offsetof(struct rtl83xx_route, linkage),
  659. };
  660. /* Updates an L3 next hop entry in the ROUTING table */
  661. static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
  662. {
  663. struct rtl83xx_route *r;
  664. struct rhlist_head *tmp, *list;
  665. rcu_read_lock();
  666. list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
  667. if (!list) {
  668. rcu_read_unlock();
  669. return -ENOENT;
  670. }
  671. rhl_for_each_entry_rcu(r, tmp, list, linkage) {
  672. pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
  673. __func__, &ip_addr, mac);
  674. /* Reads the ROUTING table entry associated with the route */
  675. priv->r->route_read(r->id, r);
  676. pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
  677. r->nh.mac = r->nh.gw = mac;
  678. r->nh.port = priv->port_ignore;
  679. r->nh.id = r->id;
  680. /* Do we need to explicitly add a DMAC entry with the route's nh index? */
  681. if (priv->r->set_l3_egress_mac)
  682. priv->r->set_l3_egress_mac(r->id, mac);
  683. /* Update ROUTING table: map gateway-mac and switch-mac id to route id */
  684. rtl83xx_l2_nexthop_add(priv, &r->nh);
  685. r->attr.valid = true;
  686. r->attr.action = ROUTE_ACT_FORWARD;
  687. r->attr.type = 0;
  688. r->attr.hit = false; /* Reset route-used indicator */
  689. /* Add PIE entry with dst_ip and prefix_len */
  690. r->pr.dip = r->dst_ip;
  691. r->pr.dip_m = inet_make_mask(r->prefix_len);
  692. if (r->is_host_route) {
  693. int slot = priv->r->find_l3_slot(r, false);
  694. pr_info("%s: Got slot for route: %d\n", __func__, slot);
  695. priv->r->host_route_write(slot, r);
  696. } else {
  697. priv->r->route_write(r->id, r);
  698. r->pr.fwd_sel = true;
  699. r->pr.fwd_data = r->nh.l2_id;
  700. r->pr.fwd_act = PIE_ACT_ROUTE_UC;
  701. }
  702. if (priv->r->set_l3_nexthop)
  703. priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
  704. if (r->pr.id < 0) {
  705. r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
  706. if (r->pr.packet_cntr >= 0) {
  707. pr_info("Using packet counter %d\n", r->pr.packet_cntr);
  708. r->pr.log_sel = true;
  709. r->pr.log_data = r->pr.packet_cntr;
  710. }
  711. priv->r->pie_rule_add(priv, &r->pr);
  712. } else {
  713. int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
  714. pr_info("%s: total packets: %d\n", __func__, pkts);
  715. priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
  716. }
  717. }
  718. rcu_read_unlock();
  719. return 0;
  720. }
  721. static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
  722. struct net_device *dev, __be32 ip_addr)
  723. {
  724. struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
  725. int err = 0;
  726. u64 mac;
  727. if (!n) {
  728. n = neigh_create(&arp_tbl, &ip_addr, dev);
  729. if (IS_ERR(n))
  730. return PTR_ERR(n);
  731. }
  732. /* If the neigh is already resolved, then go ahead and
  733. * install the entry, otherwise start the ARP process to
  734. * resolve the neigh.
  735. */
  736. if (n->nud_state & NUD_VALID) {
  737. mac = ether_addr_to_u64(n->ha);
  738. pr_info("%s: resolved mac: %016llx\n", __func__, mac);
  739. rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
  740. } else {
  741. pr_info("%s: need to wait\n", __func__);
  742. neigh_event_send(n, NULL);
  743. }
  744. neigh_release(n);
  745. return err;
  746. }
  747. struct rtl83xx_walk_data {
  748. struct rtl838x_switch_priv *priv;
  749. int port;
  750. };
  751. static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
  752. {
  753. struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
  754. struct rtl838x_switch_priv *priv = data->priv;
  755. int ret = 0;
  756. int index;
  757. index = rtl83xx_port_is_under(lower, priv);
  758. data->port = index;
  759. if (index >= 0) {
  760. pr_debug("Found DSA-port, index %d\n", index);
  761. ret = 1;
  762. }
  763. return ret;
  764. }
  765. int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
  766. {
  767. struct rtl83xx_walk_data data;
  768. struct netdev_nested_priv _priv;
  769. data.priv = priv;
  770. data.port = 0;
  771. _priv.data = (void *)&data;
  772. netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
  773. return data.port;
  774. }
  775. static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
  776. {
  777. struct rtl83xx_route *r;
  778. int idx = 0, err;
  779. mutex_lock(&priv->reg_mutex);
  780. idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
  781. pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
  782. r = kzalloc(sizeof(*r), GFP_KERNEL);
  783. if (!r) {
  784. mutex_unlock(&priv->reg_mutex);
  785. return r;
  786. }
  787. r->id = idx;
  788. r->gw_ip = ip;
  789. r->pr.id = -1; /* We still need to allocate a rule in HW */
  790. r->is_host_route = false;
  791. err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
  792. if (err) {
  793. pr_err("Could not insert new rule\n");
  794. mutex_unlock(&priv->reg_mutex);
  795. goto out_free;
  796. }
  797. set_bit(idx, priv->route_use_bm);
  798. mutex_unlock(&priv->reg_mutex);
  799. return r;
  800. out_free:
  801. kfree(r);
  802. return NULL;
  803. }
  804. static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
  805. {
  806. struct rtl83xx_route *r;
  807. int idx = 0, err;
  808. mutex_lock(&priv->reg_mutex);
  809. idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
  810. pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
  811. r = kzalloc(sizeof(*r), GFP_KERNEL);
  812. if (!r) {
  813. mutex_unlock(&priv->reg_mutex);
  814. return r;
  815. }
  816. /* We require a unique route ID irrespective of whether it is a prefix or host
  817. * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry
  818. */
  819. r->id = idx + MAX_ROUTES;
  820. r->gw_ip = ip;
  821. r->pr.id = -1; /* We still need to allocate a rule in HW */
  822. r->is_host_route = true;
  823. err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
  824. if (err) {
  825. pr_err("Could not insert new rule\n");
  826. mutex_unlock(&priv->reg_mutex);
  827. goto out_free;
  828. }
  829. set_bit(idx, priv->host_route_use_bm);
  830. mutex_unlock(&priv->reg_mutex);
  831. return r;
  832. out_free:
  833. kfree(r);
  834. return NULL;
  835. }
  836. static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
  837. {
  838. int id;
  839. if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
  840. dev_warn(priv->dev, "Could not remove route\n");
  841. if (r->is_host_route) {
  842. id = priv->r->find_l3_slot(r, false);
  843. pr_debug("%s: Got id for host route: %d\n", __func__, id);
  844. r->attr.valid = false;
  845. priv->r->host_route_write(id, r);
  846. clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
  847. } else {
  848. /* If there is a HW representation of the route, delete it */
  849. if (priv->r->route_lookup_hw) {
  850. id = priv->r->route_lookup_hw(r);
  851. pr_info("%s: Got id for prefix route: %d\n", __func__, id);
  852. r->attr.valid = false;
  853. priv->r->route_write(id, r);
  854. }
  855. clear_bit(r->id, priv->route_use_bm);
  856. }
  857. kfree(r);
  858. }
  859. static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
  860. struct fib_entry_notifier_info *info)
  861. {
  862. struct fib_nh *nh = fib_info_nh(info->fi, 0);
  863. struct rtl83xx_route *r;
  864. struct rhlist_head *tmp, *list;
  865. pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
  866. rcu_read_lock();
  867. list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
  868. if (!list) {
  869. rcu_read_unlock();
  870. pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
  871. return -ENOENT;
  872. }
  873. rhl_for_each_entry_rcu(r, tmp, list, linkage) {
  874. if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
  875. pr_info("%s: found a route with id %d, nh-id %d\n",
  876. __func__, r->id, r->nh.id);
  877. break;
  878. }
  879. }
  880. rcu_read_unlock();
  881. rtl83xx_l2_nexthop_rm(priv, &r->nh);
  882. pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
  883. set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
  884. priv->r->pie_rule_rm(priv, &r->pr);
  885. rtl83xx_route_rm(priv, r);
  886. nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
  887. return 0;
  888. }
  889. /* On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
  890. * for packets to be routed needs to be allocated.
  891. */
  892. static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
  893. {
  894. int free_mac = -1;
  895. struct rtl93xx_rt_mac m;
  896. mutex_lock(&priv->reg_mutex);
  897. for (int i = 0; i < MAX_ROUTER_MACS; i++) {
  898. priv->r->get_l3_router_mac(i, &m);
  899. if (free_mac < 0 && !m.valid) {
  900. free_mac = i;
  901. continue;
  902. }
  903. if (m.valid && m.mac == mac) {
  904. free_mac = i;
  905. break;
  906. }
  907. }
  908. if (free_mac < 0) {
  909. pr_err("No free router MACs, cannot offload\n");
  910. mutex_unlock(&priv->reg_mutex);
  911. return -1;
  912. }
  913. m.valid = true;
  914. m.mac = mac;
  915. m.p_type = 0; /* An individual port, not a trunk port */
  916. m.p_id = 0x3f; /* Listen on any port */
  917. m.p_id_mask = 0;
  918. m.vid = 0; /* Listen on any VLAN... */
  919. m.vid_mask = 0; /* ... so mask needs to be 0 */
  920. m.mac_mask = 0xffffffffffffULL; /* We want an exact match of the interface MAC */
  921. m.action = L3_FORWARD; /* Route the packet */
  922. priv->r->set_l3_router_mac(free_mac, &m);
  923. mutex_unlock(&priv->reg_mutex);
  924. return 0;
  925. }
  926. static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
  927. {
  928. int free_mac = -1;
  929. struct rtl838x_l3_intf intf;
  930. u64 m;
  931. mutex_lock(&priv->reg_mutex);
  932. for (int i = 0; i < MAX_SMACS; i++) {
  933. m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
  934. if (free_mac < 0 && !m) {
  935. free_mac = i;
  936. continue;
  937. }
  938. if (m == mac) {
  939. mutex_unlock(&priv->reg_mutex);
  940. return i;
  941. }
  942. }
  943. if (free_mac < 0) {
  944. pr_err("No free egress interface, cannot offload\n");
  945. return -1;
  946. }
  947. /* Set up default egress interface 1 */
  948. intf.vid = vlan;
  949. intf.smac_idx = free_mac;
  950. intf.ip4_mtu_id = 1;
  951. intf.ip6_mtu_id = 1;
  952. intf.ttl_scope = 1; /* TTL */
  953. intf.hl_scope = 1; /* Hop Limit */
  954. intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; /* FORWARD */
  955. intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; /* FORWARD; */
  956. priv->r->set_l3_egress_intf(free_mac, &intf);
  957. priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
  958. mutex_unlock(&priv->reg_mutex);
  959. return free_mac;
  960. }
  961. static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
  962. struct fib_entry_notifier_info *info)
  963. {
  964. struct fib_nh *nh = fib_info_nh(info->fi, 0);
  965. struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
  966. int port;
  967. struct rtl83xx_route *r;
  968. bool to_localhost;
  969. int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
  970. pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
  971. if (!info->dst) {
  972. pr_info("Not offloading default route for now\n");
  973. return 0;
  974. }
  975. pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
  976. ether_addr_to_u64(dev->dev_addr), vlan
  977. );
  978. port = rtl83xx_port_dev_lower_find(dev, priv);
  979. if (port < 0)
  980. return -1;
  981. /* For now we only work with routes that have a gateway and are not ourself */
  982. /* if ((!nh->fib_nh_gw4) && (info->dst_len != 32)) */
  983. /* return 0; */
  984. if ((info->dst & 0xff) == 0xff)
  985. return 0;
  986. /* Do not offload routes to 192.168.100.x */
  987. if ((info->dst & 0xffffff00) == 0xc0a86400)
  988. return 0;
  989. /* Do not offload routes to 127.x.x.x */
  990. if ((info->dst & 0xff000000) == 0x7f000000)
  991. return 0;
  992. /* Allocate route or host-route (entry if hardware supports this) */
  993. if (info->dst_len == 32 && priv->r->host_route_write)
  994. r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
  995. else
  996. r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
  997. if (!r) {
  998. pr_err("%s: No more free route entries\n", __func__);
  999. return -1;
  1000. }
  1001. r->dst_ip = info->dst;
  1002. r->prefix_len = info->dst_len;
  1003. r->nh.rvid = vlan;
  1004. to_localhost = !nh->fib_nh_gw4;
  1005. if (priv->r->set_l3_router_mac) {
  1006. u64 mac = ether_addr_to_u64(dev->dev_addr);
  1007. pr_debug("Local route and router mac %016llx\n", mac);
  1008. if (rtl83xx_alloc_router_mac(priv, mac))
  1009. goto out_free_rt;
  1010. /* vid = 0: Do not care about VID */
  1011. r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
  1012. if (r->nh.if_id < 0)
  1013. goto out_free_rmac;
  1014. if (to_localhost) {
  1015. int slot;
  1016. r->nh.mac = mac;
  1017. r->nh.port = priv->port_ignore;
  1018. r->attr.valid = true;
  1019. r->attr.action = ROUTE_ACT_TRAP2CPU;
  1020. r->attr.type = 0;
  1021. slot = priv->r->find_l3_slot(r, false);
  1022. pr_debug("%s: Got slot for route: %d\n", __func__, slot);
  1023. priv->r->host_route_write(slot, r);
  1024. }
  1025. }
  1026. /* We need to resolve the mac address of the GW */
  1027. if (!to_localhost)
  1028. rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
  1029. nh->fib_nh_flags |= RTNH_F_OFFLOAD;
  1030. return 0;
  1031. out_free_rmac:
  1032. out_free_rt:
  1033. return 0;
  1034. }
  1035. static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
  1036. struct fib6_entry_notifier_info *info)
  1037. {
  1038. pr_debug("In %s\n", __func__);
  1039. /* nh->fib_nh_flags |= RTNH_F_OFFLOAD; */
  1040. return 0;
  1041. }
  1042. struct net_event_work {
  1043. struct work_struct work;
  1044. struct rtl838x_switch_priv *priv;
  1045. u64 mac;
  1046. u32 gw_addr;
  1047. };
  1048. static void rtl83xx_net_event_work_do(struct work_struct *work)
  1049. {
  1050. struct net_event_work *net_work =
  1051. container_of(work, struct net_event_work, work);
  1052. struct rtl838x_switch_priv *priv = net_work->priv;
  1053. rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
  1054. kfree(net_work);
  1055. }
  1056. static int rtl83xx_netevent_event(struct notifier_block *this,
  1057. unsigned long event, void *ptr)
  1058. {
  1059. struct rtl838x_switch_priv *priv;
  1060. struct net_device *dev;
  1061. struct neighbour *n = ptr;
  1062. int err, port;
  1063. struct net_event_work *net_work;
  1064. priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
  1065. switch (event) {
  1066. case NETEVENT_NEIGH_UPDATE:
  1067. if (n->tbl != &arp_tbl)
  1068. return NOTIFY_DONE;
  1069. dev = n->dev;
  1070. port = rtl83xx_port_dev_lower_find(dev, priv);
  1071. if (port < 0 || !(n->nud_state & NUD_VALID)) {
  1072. pr_debug("%s: Neigbour invalid, not updating\n", __func__);
  1073. return NOTIFY_DONE;
  1074. }
  1075. net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
  1076. if (!net_work)
  1077. return NOTIFY_BAD;
  1078. INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
  1079. net_work->priv = priv;
  1080. net_work->mac = ether_addr_to_u64(n->ha);
  1081. net_work->gw_addr = *(__be32 *) n->primary_key;
  1082. pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
  1083. __func__, port, net_work->mac);
  1084. schedule_work(&net_work->work);
  1085. if (err)
  1086. netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
  1087. break;
  1088. }
  1089. return NOTIFY_DONE;
  1090. }
  1091. struct rtl83xx_fib_event_work {
  1092. struct work_struct work;
  1093. union {
  1094. struct fib_entry_notifier_info fen_info;
  1095. struct fib6_entry_notifier_info fen6_info;
  1096. struct fib_rule_notifier_info fr_info;
  1097. };
  1098. struct rtl838x_switch_priv *priv;
  1099. bool is_fib6;
  1100. unsigned long event;
  1101. };
  1102. static void rtl83xx_fib_event_work_do(struct work_struct *work)
  1103. {
  1104. struct rtl83xx_fib_event_work *fib_work =
  1105. container_of(work, struct rtl83xx_fib_event_work, work);
  1106. struct rtl838x_switch_priv *priv = fib_work->priv;
  1107. struct fib_rule *rule;
  1108. int err;
  1109. /* Protect internal structures from changes */
  1110. rtnl_lock();
  1111. pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
  1112. switch (fib_work->event) {
  1113. case FIB_EVENT_ENTRY_ADD:
  1114. case FIB_EVENT_ENTRY_REPLACE:
  1115. case FIB_EVENT_ENTRY_APPEND:
  1116. if (fib_work->is_fib6) {
  1117. err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
  1118. } else {
  1119. err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
  1120. fib_info_put(fib_work->fen_info.fi);
  1121. }
  1122. if (err)
  1123. pr_err("%s: FIB4 failed\n", __func__);
  1124. break;
  1125. case FIB_EVENT_ENTRY_DEL:
  1126. rtl83xx_fib4_del(priv, &fib_work->fen_info);
  1127. fib_info_put(fib_work->fen_info.fi);
  1128. break;
  1129. case FIB_EVENT_RULE_ADD:
  1130. case FIB_EVENT_RULE_DEL:
  1131. rule = fib_work->fr_info.rule;
  1132. if (!fib4_rule_default(rule))
  1133. pr_err("%s: FIB4 default rule failed\n", __func__);
  1134. fib_rule_put(rule);
  1135. break;
  1136. }
  1137. rtnl_unlock();
  1138. kfree(fib_work);
  1139. }
  1140. /* Called with rcu_read_lock() */
  1141. static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
  1142. {
  1143. struct fib_notifier_info *info = ptr;
  1144. struct rtl838x_switch_priv *priv;
  1145. struct rtl83xx_fib_event_work *fib_work;
  1146. if ((info->family != AF_INET && info->family != AF_INET6 &&
  1147. info->family != RTNL_FAMILY_IPMR &&
  1148. info->family != RTNL_FAMILY_IP6MR))
  1149. return NOTIFY_DONE;
  1150. priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
  1151. fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
  1152. if (!fib_work)
  1153. return NOTIFY_BAD;
  1154. INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
  1155. fib_work->priv = priv;
  1156. fib_work->event = event;
  1157. fib_work->is_fib6 = false;
  1158. switch (event) {
  1159. case FIB_EVENT_ENTRY_ADD:
  1160. case FIB_EVENT_ENTRY_REPLACE:
  1161. case FIB_EVENT_ENTRY_APPEND:
  1162. case FIB_EVENT_ENTRY_DEL:
  1163. pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
  1164. if (info->family == AF_INET) {
  1165. struct fib_entry_notifier_info *fen_info = ptr;
  1166. if (fen_info->fi->fib_nh_is_v6) {
  1167. NL_SET_ERR_MSG_MOD(info->extack,
  1168. "IPv6 gateway with IPv4 route is not supported");
  1169. kfree(fib_work);
  1170. return notifier_from_errno(-EINVAL);
  1171. }
  1172. memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
  1173. /* Take referece on fib_info to prevent it from being
  1174. * freed while work is queued. Release it afterwards.
  1175. */
  1176. fib_info_hold(fib_work->fen_info.fi);
  1177. } else if (info->family == AF_INET6) {
  1178. //struct fib6_entry_notifier_info *fen6_info = ptr;
  1179. pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
  1180. kfree(fib_work);
  1181. return NOTIFY_DONE;
  1182. }
  1183. break;
  1184. case FIB_EVENT_RULE_ADD:
  1185. case FIB_EVENT_RULE_DEL:
  1186. pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
  1187. memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
  1188. fib_rule_get(fib_work->fr_info.rule);
  1189. break;
  1190. }
  1191. schedule_work(&fib_work->work);
  1192. return NOTIFY_DONE;
  1193. }
  1194. static int __init rtl83xx_sw_probe(struct platform_device *pdev)
  1195. {
  1196. int err = 0;
  1197. struct rtl838x_switch_priv *priv;
  1198. struct device *dev = &pdev->dev;
  1199. u64 bpdu_mask;
  1200. pr_debug("Probing RTL838X switch device\n");
  1201. if (!pdev->dev.of_node) {
  1202. dev_err(dev, "No DT found\n");
  1203. return -EINVAL;
  1204. }
  1205. /* Initialize access to RTL switch tables */
  1206. rtl_table_init();
  1207. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1208. if (!priv)
  1209. return -ENOMEM;
  1210. priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
  1211. if (!priv->ds)
  1212. return -ENOMEM;
  1213. priv->ds->dev = dev;
  1214. priv->ds->priv = priv;
  1215. priv->ds->ops = &rtl83xx_switch_ops;
  1216. priv->ds->needs_standalone_vlan_filtering = true;
  1217. priv->dev = dev;
  1218. mutex_init(&priv->reg_mutex);
  1219. priv->family_id = soc_info.family;
  1220. priv->id = soc_info.id;
  1221. switch(soc_info.family) {
  1222. case RTL8380_FAMILY_ID:
  1223. priv->ds->ops = &rtl83xx_switch_ops;
  1224. priv->cpu_port = RTL838X_CPU_PORT;
  1225. priv->port_mask = 0x1f;
  1226. priv->port_width = 1;
  1227. priv->irq_mask = 0x0FFFFFFF;
  1228. priv->r = &rtl838x_reg;
  1229. priv->ds->num_ports = 29;
  1230. priv->fib_entries = 8192;
  1231. rtl8380_get_version(priv);
  1232. priv->n_lags = 8;
  1233. priv->l2_bucket_size = 4;
  1234. priv->n_pie_blocks = 12;
  1235. priv->port_ignore = 0x1f;
  1236. priv->n_counters = 128;
  1237. break;
  1238. case RTL8390_FAMILY_ID:
  1239. priv->ds->ops = &rtl83xx_switch_ops;
  1240. priv->cpu_port = RTL839X_CPU_PORT;
  1241. priv->port_mask = 0x3f;
  1242. priv->port_width = 2;
  1243. priv->irq_mask = 0xFFFFFFFFFFFFFULL;
  1244. priv->r = &rtl839x_reg;
  1245. priv->ds->num_ports = 53;
  1246. priv->fib_entries = 16384;
  1247. rtl8390_get_version(priv);
  1248. priv->n_lags = 16;
  1249. priv->l2_bucket_size = 4;
  1250. priv->n_pie_blocks = 18;
  1251. priv->port_ignore = 0x3f;
  1252. priv->n_counters = 1024;
  1253. break;
  1254. case RTL9300_FAMILY_ID:
  1255. priv->ds->ops = &rtl930x_switch_ops;
  1256. priv->cpu_port = RTL930X_CPU_PORT;
  1257. priv->port_mask = 0x1f;
  1258. priv->port_width = 1;
  1259. priv->irq_mask = 0x0FFFFFFF;
  1260. priv->r = &rtl930x_reg;
  1261. priv->ds->num_ports = 29;
  1262. priv->fib_entries = 16384;
  1263. priv->version = RTL8390_VERSION_A;
  1264. priv->n_lags = 16;
  1265. sw_w32(1, RTL930X_ST_CTRL);
  1266. priv->l2_bucket_size = 8;
  1267. priv->n_pie_blocks = 16;
  1268. priv->port_ignore = 0x3f;
  1269. priv->n_counters = 2048;
  1270. break;
  1271. case RTL9310_FAMILY_ID:
  1272. priv->ds->ops = &rtl930x_switch_ops;
  1273. priv->cpu_port = RTL931X_CPU_PORT;
  1274. priv->port_mask = 0x3f;
  1275. priv->port_width = 2;
  1276. priv->irq_mask = 0xFFFFFFFFFFFFFULL;
  1277. priv->r = &rtl931x_reg;
  1278. priv->ds->num_ports = 57;
  1279. priv->fib_entries = 16384;
  1280. priv->version = RTL8390_VERSION_A;
  1281. priv->n_lags = 16;
  1282. priv->l2_bucket_size = 8;
  1283. break;
  1284. }
  1285. pr_debug("Chip version %c\n", priv->version);
  1286. err = rtl83xx_mdio_probe(priv);
  1287. if (err) {
  1288. /* Probing fails the 1st time because of missing ethernet driver
  1289. * initialization. Use this to disable traffic in case the bootloader left if on
  1290. */
  1291. return err;
  1292. }
  1293. err = dsa_register_switch(priv->ds);
  1294. if (err) {
  1295. dev_err(dev, "Error registering switch: %d\n", err);
  1296. return err;
  1297. }
  1298. /* dsa_to_port returns dsa_port from the port list in
  1299. * dsa_switch_tree, the tree is built when the switch
  1300. * is registered by dsa_register_switch
  1301. */
  1302. for (int i = 0; i <= priv->cpu_port; i++)
  1303. priv->ports[i].dp = dsa_to_port(priv->ds, i);
  1304. /* Enable link and media change interrupts. Are the SERDES masks needed? */
  1305. sw_w32_mask(0, 3, priv->r->isr_glb_src);
  1306. priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
  1307. priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
  1308. priv->link_state_irq = platform_get_irq(pdev, 0);
  1309. pr_info("LINK state irq: %d\n", priv->link_state_irq);
  1310. switch (priv->family_id) {
  1311. case RTL8380_FAMILY_ID:
  1312. err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
  1313. IRQF_SHARED, "rtl838x-link-state", priv->ds);
  1314. break;
  1315. case RTL8390_FAMILY_ID:
  1316. err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
  1317. IRQF_SHARED, "rtl839x-link-state", priv->ds);
  1318. break;
  1319. case RTL9300_FAMILY_ID:
  1320. err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
  1321. IRQF_SHARED, "rtl930x-link-state", priv->ds);
  1322. break;
  1323. case RTL9310_FAMILY_ID:
  1324. err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
  1325. IRQF_SHARED, "rtl931x-link-state", priv->ds);
  1326. break;
  1327. }
  1328. if (err) {
  1329. dev_err(dev, "Error setting up switch interrupt.\n");
  1330. /* Need to free allocated switch here */
  1331. }
  1332. /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
  1333. if (soc_info.family != RTL9310_FAMILY_ID)
  1334. sw_w32(0x1, priv->r->imr_glb);
  1335. rtl83xx_get_l2aging(priv);
  1336. rtl83xx_setup_qos(priv);
  1337. priv->r->l3_setup(priv);
  1338. /* Clear all destination ports for mirror groups */
  1339. for (int i = 0; i < 4; i++)
  1340. priv->mirror_group_ports[i] = -1;
  1341. /* Register netdevice event callback to catch changes in link aggregation groups */
  1342. priv->nb.notifier_call = rtl83xx_netdevice_event;
  1343. if (register_netdevice_notifier(&priv->nb)) {
  1344. priv->nb.notifier_call = NULL;
  1345. dev_err(dev, "Failed to register LAG netdev notifier\n");
  1346. goto err_register_nb;
  1347. }
  1348. /* Initialize hash table for L3 routing */
  1349. rhltable_init(&priv->routes, &route_ht_params);
  1350. /* Register netevent notifier callback to catch notifications about neighboring
  1351. * changes to update nexthop entries for L3 routing.
  1352. */
  1353. priv->ne_nb.notifier_call = rtl83xx_netevent_event;
  1354. if (register_netevent_notifier(&priv->ne_nb)) {
  1355. priv->ne_nb.notifier_call = NULL;
  1356. dev_err(dev, "Failed to register netevent notifier\n");
  1357. goto err_register_ne_nb;
  1358. }
  1359. priv->fib_nb.notifier_call = rtl83xx_fib_event;
  1360. /* Register Forwarding Information Base notifier to offload routes where
  1361. * where possible
  1362. * Only FIBs pointing to our own netdevs are programmed into
  1363. * the device, so no need to pass a callback.
  1364. */
  1365. err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
  1366. if (err)
  1367. goto err_register_fib_nb;
  1368. /* TODO: put this into l2_setup() */
  1369. /* Flood BPDUs to all ports including cpu-port */
  1370. if (soc_info.family != RTL9300_FAMILY_ID) {
  1371. bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
  1372. priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
  1373. /* TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs */
  1374. sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
  1375. rtl838x_dbgfs_init(priv);
  1376. } else {
  1377. rtl930x_dbgfs_init(priv);
  1378. }
  1379. return 0;
  1380. err_register_fib_nb:
  1381. unregister_netevent_notifier(&priv->ne_nb);
  1382. err_register_ne_nb:
  1383. unregister_netdevice_notifier(&priv->nb);
  1384. err_register_nb:
  1385. return err;
  1386. }
  1387. static int rtl83xx_sw_remove(struct platform_device *pdev)
  1388. {
  1389. /* TODO: */
  1390. pr_debug("Removing platform driver for rtl83xx-sw\n");
  1391. return 0;
  1392. }
  1393. static const struct of_device_id rtl83xx_switch_of_ids[] = {
  1394. { .compatible = "realtek,rtl83xx-switch"},
  1395. { /* sentinel */ }
  1396. };
  1397. MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
  1398. static struct platform_driver rtl83xx_switch_driver = {
  1399. .probe = rtl83xx_sw_probe,
  1400. .remove = rtl83xx_sw_remove,
  1401. .driver = {
  1402. .name = "rtl83xx-switch",
  1403. .pm = NULL,
  1404. .of_match_table = rtl83xx_switch_of_ids,
  1405. },
  1406. };
  1407. module_platform_driver(rtl83xx_switch_driver);
  1408. MODULE_AUTHOR("B. Koblitz");
  1409. MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
  1410. MODULE_LICENSE("GPL");