610-v5.13-32-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. From: Felix Fietkau <[email protected]>
  2. Date: Wed, 24 Mar 2021 02:30:53 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for
  4. initializing the PPE
  5. The PPE (packet processing engine) is used to offload NAT/routed or even
  6. bridged flows. This patch brings up the PPE and uses it to get a packet
  7. hash. It also contains some functionality that will be used to bring up
  8. flow offloading.
  9. Signed-off-by: Felix Fietkau <[email protected]>
  10. Signed-off-by: Pablo Neira Ayuso <[email protected]>
  11. ---
  12. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
  13. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h
  14. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  15. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  16. --- a/drivers/net/ethernet/mediatek/Makefile
  17. +++ b/drivers/net/ethernet/mediatek/Makefile
  18. @@ -4,5 +4,5 @@
  19. #
  20. obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
  21. -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
  22. +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
  23. obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
  24. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  25. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  26. @@ -2299,7 +2299,10 @@ static int mtk_open(struct net_device *d
  27. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  28. if (!refcount_read(&eth->dma_refcnt)) {
  29. - int err = mtk_start_dma(eth);
  30. + u32 gdm_config = MTK_GDMA_TO_PDMA;
  31. + int err;
  32. +
  33. + err = mtk_start_dma(eth);
  34. if (err)
  35. if (err) {
  36. @@ -2307,7 +2310,10 @@ static int mtk_open(struct net_device *d
  37. return err;
  38. }
  39. - mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
  40. + if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
  41. + gdm_config = MTK_GDMA_TO_PPE;
  42. +
  43. + mtk_gdm_config(eth, gdm_config);
  44. napi_enable(&eth->tx_napi);
  45. napi_enable(&eth->rx_napi);
  46. @@ -2374,6 +2380,9 @@ static int mtk_stop(struct net_device *d
  47. mtk_dma_free(eth);
  48. + if (eth->soc->offload_version)
  49. + mtk_ppe_stop(&eth->ppe);
  50. +
  51. return 0;
  52. }
  53. @@ -3102,6 +3111,13 @@ static int mtk_probe(struct platform_dev
  54. goto err_free_dev;
  55. }
  56. + if (eth->soc->offload_version) {
  57. + err = mtk_ppe_init(&eth->ppe, eth->dev,
  58. + eth->base + MTK_ETH_PPE_BASE, 2);
  59. + if (err)
  60. + goto err_free_dev;
  61. + }
  62. +
  63. for (i = 0; i < MTK_MAX_DEVS; i++) {
  64. if (!eth->netdev[i])
  65. continue;
  66. @@ -3176,6 +3192,7 @@ static const struct mtk_soc_data mt7621_
  67. .hw_features = MTK_HW_FEATURES,
  68. .required_clks = MT7621_CLKS_BITMAP,
  69. .required_pctl = false,
  70. + .offload_version = 2,
  71. };
  72. static const struct mtk_soc_data mt7622_data = {
  73. @@ -3184,6 +3201,7 @@ static const struct mtk_soc_data mt7622_
  74. .hw_features = MTK_HW_FEATURES,
  75. .required_clks = MT7622_CLKS_BITMAP,
  76. .required_pctl = false,
  77. + .offload_version = 2,
  78. };
  79. static const struct mtk_soc_data mt7623_data = {
  80. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  81. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  82. @@ -15,6 +15,7 @@
  83. #include <linux/u64_stats_sync.h>
  84. #include <linux/refcount.h>
  85. #include <linux/phylink.h>
  86. +#include "mtk_ppe.h"
  87. #define MTK_QDMA_PAGE_SIZE 2048
  88. #define MTK_MAX_RX_LENGTH 1536
  89. @@ -86,6 +87,7 @@
  90. #define MTK_GDMA_TCS_EN BIT(21)
  91. #define MTK_GDMA_UCS_EN BIT(20)
  92. #define MTK_GDMA_TO_PDMA 0x0
  93. +#define MTK_GDMA_TO_PPE 0x4444
  94. #define MTK_GDMA_DROP_ALL 0x7777
  95. /* Unicast Filter MAC Address Register - Low */
  96. @@ -315,6 +317,12 @@
  97. #define RX_DMA_VID(_x) ((_x) & 0xfff)
  98. /* QDMA descriptor rxd4 */
  99. +#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
  100. +#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
  101. +#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
  102. +#define MTK_RXD4_ALG GENMASK(31, 22)
  103. +
  104. +/* QDMA descriptor rxd4 */
  105. #define RX_DMA_L4_VALID BIT(24)
  106. #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
  107. #define RX_DMA_FPORT_SHIFT 19
  108. @@ -819,6 +827,7 @@ struct mtk_soc_data {
  109. u32 caps;
  110. u32 required_clks;
  111. bool required_pctl;
  112. + u8 offload_version;
  113. netdev_features_t hw_features;
  114. };
  115. @@ -918,6 +927,8 @@ struct mtk_eth {
  116. u32 tx_int_status_reg;
  117. u32 rx_dma_l4_valid;
  118. int ip_align;
  119. +
  120. + struct mtk_ppe ppe;
  121. };
  122. /* struct mtk_mac - the structure that holds the info about the MACs of the
  123. --- /dev/null
  124. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  125. @@ -0,0 +1,511 @@
  126. +// SPDX-License-Identifier: GPL-2.0-only
  127. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  128. +
  129. +#include <linux/kernel.h>
  130. +#include <linux/jiffies.h>
  131. +#include <linux/delay.h>
  132. +#include <linux/io.h>
  133. +#include <linux/etherdevice.h>
  134. +#include <linux/platform_device.h>
  135. +#include "mtk_ppe.h"
  136. +#include "mtk_ppe_regs.h"
  137. +
  138. +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
  139. +{
  140. + writel(val, ppe->base + reg);
  141. +}
  142. +
  143. +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
  144. +{
  145. + return readl(ppe->base + reg);
  146. +}
  147. +
  148. +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
  149. +{
  150. + u32 val;
  151. +
  152. + val = ppe_r32(ppe, reg);
  153. + val &= ~mask;
  154. + val |= set;
  155. + ppe_w32(ppe, reg, val);
  156. +
  157. + return val;
  158. +}
  159. +
  160. +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
  161. +{
  162. + return ppe_m32(ppe, reg, 0, val);
  163. +}
  164. +
  165. +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
  166. +{
  167. + return ppe_m32(ppe, reg, val, 0);
  168. +}
  169. +
  170. +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  171. +{
  172. + unsigned long timeout = jiffies + HZ;
  173. +
  174. + while (time_is_before_jiffies(timeout)) {
  175. + if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
  176. + return 0;
  177. +
  178. + usleep_range(10, 20);
  179. + }
  180. +
  181. + dev_err(ppe->dev, "PPE table busy");
  182. +
  183. + return -ETIMEDOUT;
  184. +}
  185. +
  186. +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
  187. +{
  188. + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  189. + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  190. +}
  191. +
  192. +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
  193. +{
  194. + mtk_ppe_cache_clear(ppe);
  195. +
  196. + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
  197. + enable * MTK_PPE_CACHE_CTL_EN);
  198. +}
  199. +
  200. +static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
  201. +{
  202. + u32 hv1, hv2, hv3;
  203. + u32 hash;
  204. +
  205. + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
  206. + case MTK_PPE_PKT_TYPE_BRIDGE:
  207. + hv1 = e->bridge.src_mac_lo;
  208. + hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
  209. + hv2 = e->bridge.src_mac_hi >> 16;
  210. + hv2 ^= e->bridge.dest_mac_lo;
  211. + hv3 = e->bridge.dest_mac_hi;
  212. + break;
  213. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  214. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  215. + hv1 = e->ipv4.orig.ports;
  216. + hv2 = e->ipv4.orig.dest_ip;
  217. + hv3 = e->ipv4.orig.src_ip;
  218. + break;
  219. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  220. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  221. + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
  222. + hv1 ^= e->ipv6.ports;
  223. +
  224. + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
  225. + hv2 ^= e->ipv6.dest_ip[0];
  226. +
  227. + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
  228. + hv3 ^= e->ipv6.src_ip[0];
  229. + break;
  230. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  231. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  232. + default:
  233. + WARN_ON_ONCE(1);
  234. + return MTK_PPE_HASH_MASK;
  235. + }
  236. +
  237. + hash = (hv1 & hv2) | ((~hv1) & hv3);
  238. + hash = (hash >> 24) | ((hash & 0xffffff) << 8);
  239. + hash ^= hv1 ^ hv2 ^ hv3;
  240. + hash ^= hash >> 16;
  241. + hash <<= 1;
  242. + hash &= MTK_PPE_ENTRIES - 1;
  243. +
  244. + return hash;
  245. +}
  246. +
  247. +static inline struct mtk_foe_mac_info *
  248. +mtk_foe_entry_l2(struct mtk_foe_entry *entry)
  249. +{
  250. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  251. +
  252. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  253. + return &entry->ipv6.l2;
  254. +
  255. + return &entry->ipv4.l2;
  256. +}
  257. +
  258. +static inline u32 *
  259. +mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
  260. +{
  261. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  262. +
  263. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  264. + return &entry->ipv6.ib2;
  265. +
  266. + return &entry->ipv4.ib2;
  267. +}
  268. +
  269. +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  270. + u8 pse_port, u8 *src_mac, u8 *dest_mac)
  271. +{
  272. + struct mtk_foe_mac_info *l2;
  273. + u32 ports_pad, val;
  274. +
  275. + memset(entry, 0, sizeof(*entry));
  276. +
  277. + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  278. + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
  279. + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  280. + MTK_FOE_IB1_BIND_TTL |
  281. + MTK_FOE_IB1_BIND_CACHE;
  282. + entry->ib1 = val;
  283. +
  284. + val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
  285. + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
  286. + FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
  287. +
  288. + if (is_multicast_ether_addr(dest_mac))
  289. + val |= MTK_FOE_IB2_MULTICAST;
  290. +
  291. + ports_pad = 0xa5a5a500 | (l4proto & 0xff);
  292. + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  293. + entry->ipv4.orig.ports = ports_pad;
  294. + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  295. + entry->ipv6.ports = ports_pad;
  296. +
  297. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
  298. + entry->ipv6.ib2 = val;
  299. + l2 = &entry->ipv6.l2;
  300. + } else {
  301. + entry->ipv4.ib2 = val;
  302. + l2 = &entry->ipv4.l2;
  303. + }
  304. +
  305. + l2->dest_mac_hi = get_unaligned_be32(dest_mac);
  306. + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
  307. + l2->src_mac_hi = get_unaligned_be32(src_mac);
  308. + l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
  309. +
  310. + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  311. + l2->etype = ETH_P_IPV6;
  312. + else
  313. + l2->etype = ETH_P_IP;
  314. +
  315. + return 0;
  316. +}
  317. +
  318. +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
  319. +{
  320. + u32 *ib2 = mtk_foe_entry_ib2(entry);
  321. + u32 val;
  322. +
  323. + val = *ib2;
  324. + val &= ~MTK_FOE_IB2_DEST_PORT;
  325. + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
  326. + *ib2 = val;
  327. +
  328. + return 0;
  329. +}
  330. +
  331. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
  332. + __be32 src_addr, __be16 src_port,
  333. + __be32 dest_addr, __be16 dest_port)
  334. +{
  335. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  336. + struct mtk_ipv4_tuple *t;
  337. +
  338. + switch (type) {
  339. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  340. + if (egress) {
  341. + t = &entry->ipv4.new;
  342. + break;
  343. + }
  344. + fallthrough;
  345. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  346. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  347. + t = &entry->ipv4.orig;
  348. + break;
  349. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  350. + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
  351. + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
  352. + return 0;
  353. + default:
  354. + WARN_ON_ONCE(1);
  355. + return -EINVAL;
  356. + }
  357. +
  358. + t->src_ip = be32_to_cpu(src_addr);
  359. + t->dest_ip = be32_to_cpu(dest_addr);
  360. +
  361. + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  362. + return 0;
  363. +
  364. + t->src_port = be16_to_cpu(src_port);
  365. + t->dest_port = be16_to_cpu(dest_port);
  366. +
  367. + return 0;
  368. +}
  369. +
  370. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  371. + __be32 *src_addr, __be16 src_port,
  372. + __be32 *dest_addr, __be16 dest_port)
  373. +{
  374. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  375. + u32 *src, *dest;
  376. + int i;
  377. +
  378. + switch (type) {
  379. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  380. + src = entry->dslite.tunnel_src_ip;
  381. + dest = entry->dslite.tunnel_dest_ip;
  382. + break;
  383. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  384. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  385. + entry->ipv6.src_port = be16_to_cpu(src_port);
  386. + entry->ipv6.dest_port = be16_to_cpu(dest_port);
  387. + fallthrough;
  388. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  389. + src = entry->ipv6.src_ip;
  390. + dest = entry->ipv6.dest_ip;
  391. + break;
  392. + default:
  393. + WARN_ON_ONCE(1);
  394. + return -EINVAL;
  395. + };
  396. +
  397. + for (i = 0; i < 4; i++)
  398. + src[i] = be32_to_cpu(src_addr[i]);
  399. + for (i = 0; i < 4; i++)
  400. + dest[i] = be32_to_cpu(dest_addr[i]);
  401. +
  402. + return 0;
  403. +}
  404. +
  405. +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
  406. +{
  407. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  408. +
  409. + l2->etype = BIT(port);
  410. +
  411. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
  412. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  413. + else
  414. + l2->etype |= BIT(8);
  415. +
  416. + entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
  417. +
  418. + return 0;
  419. +}
  420. +
  421. +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
  422. +{
  423. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  424. +
  425. + switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
  426. + case 0:
  427. + entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
  428. + FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  429. + l2->vlan1 = vid;
  430. + return 0;
  431. + case 1:
  432. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
  433. + l2->vlan1 = vid;
  434. + l2->etype |= BIT(8);
  435. + } else {
  436. + l2->vlan2 = vid;
  437. + entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  438. + }
  439. + return 0;
  440. + default:
  441. + return -ENOSPC;
  442. + }
  443. +}
  444. +
  445. +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
  446. +{
  447. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  448. +
  449. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
  450. + (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
  451. + l2->etype = ETH_P_PPP_SES;
  452. +
  453. + entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
  454. + l2->pppoe_id = sid;
  455. +
  456. + return 0;
  457. +}
  458. +
  459. +static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
  460. +{
  461. + return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
  462. + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
  463. +}
  464. +
  465. +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  466. + u16 timestamp)
  467. +{
  468. + struct mtk_foe_entry *hwe;
  469. + u32 hash;
  470. +
  471. + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
  472. + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  473. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
  474. +
  475. + hash = mtk_ppe_hash_entry(entry);
  476. + hwe = &ppe->foe_table[hash];
  477. + if (!mtk_foe_entry_usable(hwe)) {
  478. + hwe++;
  479. + hash++;
  480. +
  481. + if (!mtk_foe_entry_usable(hwe))
  482. + return -ENOSPC;
  483. + }
  484. +
  485. + memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
  486. + wmb();
  487. + hwe->ib1 = entry->ib1;
  488. +
  489. + dma_wmb();
  490. +
  491. + mtk_ppe_cache_clear(ppe);
  492. +
  493. + return hash;
  494. +}
  495. +
  496. +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
  497. + int version)
  498. +{
  499. + struct mtk_foe_entry *foe;
  500. +
  501. + /* need to allocate a separate device, since it PPE DMA access is
  502. + * not coherent.
  503. + */
  504. + ppe->base = base;
  505. + ppe->dev = dev;
  506. + ppe->version = version;
  507. +
  508. + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
  509. + &ppe->foe_phys, GFP_KERNEL);
  510. + if (!foe)
  511. + return -ENOMEM;
  512. +
  513. + ppe->foe_table = foe;
  514. +
  515. + mtk_ppe_debugfs_init(ppe);
  516. +
  517. + return 0;
  518. +}
  519. +
  520. +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
  521. +{
  522. + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
  523. + int i, k;
  524. +
  525. + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
  526. +
  527. + if (!IS_ENABLED(CONFIG_SOC_MT7621))
  528. + return;
  529. +
  530. + /* skip all entries that cross the 1024 byte boundary */
  531. + for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
  532. + for (k = 0; k < ARRAY_SIZE(skip); k++)
  533. + ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
  534. +}
  535. +
  536. +int mtk_ppe_start(struct mtk_ppe *ppe)
  537. +{
  538. + u32 val;
  539. +
  540. + mtk_ppe_init_foe_table(ppe);
  541. + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
  542. +
  543. + val = MTK_PPE_TB_CFG_ENTRY_80B |
  544. + MTK_PPE_TB_CFG_AGE_NON_L4 |
  545. + MTK_PPE_TB_CFG_AGE_UNBIND |
  546. + MTK_PPE_TB_CFG_AGE_TCP |
  547. + MTK_PPE_TB_CFG_AGE_UDP |
  548. + MTK_PPE_TB_CFG_AGE_TCP_FIN |
  549. + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
  550. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
  551. + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
  552. + MTK_PPE_KEEPALIVE_DISABLE) |
  553. + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
  554. + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
  555. + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
  556. + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
  557. + MTK_PPE_ENTRIES_SHIFT);
  558. + ppe_w32(ppe, MTK_PPE_TB_CFG, val);
  559. +
  560. + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
  561. + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
  562. +
  563. + mtk_ppe_cache_enable(ppe, true);
  564. +
  565. + val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
  566. + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
  567. + MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
  568. + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
  569. + MTK_PPE_FLOW_CFG_IP6_6RD |
  570. + MTK_PPE_FLOW_CFG_IP4_NAT |
  571. + MTK_PPE_FLOW_CFG_IP4_NAPT |
  572. + MTK_PPE_FLOW_CFG_IP4_DSLITE |
  573. + MTK_PPE_FLOW_CFG_L2_BRIDGE |
  574. + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
  575. + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
  576. +
  577. + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
  578. + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
  579. + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
  580. +
  581. + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
  582. + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
  583. + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
  584. +
  585. + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
  586. + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
  587. + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
  588. +
  589. + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
  590. + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
  591. +
  592. + val = MTK_PPE_BIND_LIMIT1_FULL |
  593. + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
  594. + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
  595. +
  596. + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
  597. + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
  598. + ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
  599. +
  600. + /* enable PPE */
  601. + val = MTK_PPE_GLO_CFG_EN |
  602. + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
  603. + MTK_PPE_GLO_CFG_IP4_CS_DROP |
  604. + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
  605. + ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
  606. +
  607. + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  608. +
  609. + return 0;
  610. +}
  611. +
  612. +int mtk_ppe_stop(struct mtk_ppe *ppe)
  613. +{
  614. + u32 val;
  615. + int i;
  616. +
  617. + for (i = 0; i < MTK_PPE_ENTRIES; i++)
  618. + ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
  619. + MTK_FOE_STATE_INVALID);
  620. +
  621. + mtk_ppe_cache_enable(ppe, false);
  622. +
  623. + /* disable offload engine */
  624. + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
  625. + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
  626. +
  627. + /* disable aging */
  628. + val = MTK_PPE_TB_CFG_AGE_NON_L4 |
  629. + MTK_PPE_TB_CFG_AGE_UNBIND |
  630. + MTK_PPE_TB_CFG_AGE_TCP |
  631. + MTK_PPE_TB_CFG_AGE_UDP |
  632. + MTK_PPE_TB_CFG_AGE_TCP_FIN;
  633. + ppe_clear(ppe, MTK_PPE_TB_CFG, val);
  634. +
  635. + return mtk_ppe_wait_busy(ppe);
  636. +}
  637. --- /dev/null
  638. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  639. @@ -0,0 +1,287 @@
  640. +// SPDX-License-Identifier: GPL-2.0-only
  641. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  642. +
  643. +#ifndef __MTK_PPE_H
  644. +#define __MTK_PPE_H
  645. +
  646. +#include <linux/kernel.h>
  647. +#include <linux/bitfield.h>
  648. +
  649. +#define MTK_ETH_PPE_BASE 0xc00
  650. +
  651. +#define MTK_PPE_ENTRIES_SHIFT 3
  652. +#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
  653. +#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
  654. +
  655. +#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
  656. +#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
  657. +#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
  658. +
  659. +#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
  660. +#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
  661. +#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
  662. +#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
  663. +#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
  664. +#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
  665. +#define MTK_FOE_IB1_BIND_CACHE BIT(22)
  666. +#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
  667. +#define MTK_FOE_IB1_BIND_TTL BIT(24)
  668. +
  669. +#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
  670. +#define MTK_FOE_IB1_STATE GENMASK(29, 28)
  671. +#define MTK_FOE_IB1_UDP BIT(30)
  672. +#define MTK_FOE_IB1_STATIC BIT(31)
  673. +
  674. +enum {
  675. + MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
  676. + MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
  677. + MTK_PPE_PKT_TYPE_BRIDGE = 2,
  678. + MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
  679. + MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
  680. + MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
  681. + MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
  682. +};
  683. +
  684. +#define MTK_FOE_IB2_QID GENMASK(3, 0)
  685. +#define MTK_FOE_IB2_PSE_QOS BIT(4)
  686. +#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
  687. +#define MTK_FOE_IB2_MULTICAST BIT(8)
  688. +
  689. +#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
  690. +#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
  691. +#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
  692. +
  693. +#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
  694. +
  695. +#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
  696. +
  697. +#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
  698. +
  699. +#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
  700. +#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
  701. +#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
  702. +
  703. +enum {
  704. + MTK_FOE_STATE_INVALID,
  705. + MTK_FOE_STATE_UNBIND,
  706. + MTK_FOE_STATE_BIND,
  707. + MTK_FOE_STATE_FIN
  708. +};
  709. +
  710. +struct mtk_foe_mac_info {
  711. + u16 vlan1;
  712. + u16 etype;
  713. +
  714. + u32 dest_mac_hi;
  715. +
  716. + u16 vlan2;
  717. + u16 dest_mac_lo;
  718. +
  719. + u32 src_mac_hi;
  720. +
  721. + u16 pppoe_id;
  722. + u16 src_mac_lo;
  723. +};
  724. +
  725. +struct mtk_foe_bridge {
  726. + u32 dest_mac_hi;
  727. +
  728. + u16 src_mac_lo;
  729. + u16 dest_mac_lo;
  730. +
  731. + u32 src_mac_hi;
  732. +
  733. + u32 ib2;
  734. +
  735. + u32 _rsv[5];
  736. +
  737. + u32 udf_tsid;
  738. + struct mtk_foe_mac_info l2;
  739. +};
  740. +
  741. +struct mtk_ipv4_tuple {
  742. + u32 src_ip;
  743. + u32 dest_ip;
  744. + union {
  745. + struct {
  746. + u16 dest_port;
  747. + u16 src_port;
  748. + };
  749. + struct {
  750. + u8 protocol;
  751. + u8 _pad[3]; /* fill with 0xa5a5a5 */
  752. + };
  753. + u32 ports;
  754. + };
  755. +};
  756. +
  757. +struct mtk_foe_ipv4 {
  758. + struct mtk_ipv4_tuple orig;
  759. +
  760. + u32 ib2;
  761. +
  762. + struct mtk_ipv4_tuple new;
  763. +
  764. + u16 timestamp;
  765. + u16 _rsv0[3];
  766. +
  767. + u32 udf_tsid;
  768. +
  769. + struct mtk_foe_mac_info l2;
  770. +};
  771. +
  772. +struct mtk_foe_ipv4_dslite {
  773. + struct mtk_ipv4_tuple ip4;
  774. +
  775. + u32 tunnel_src_ip[4];
  776. + u32 tunnel_dest_ip[4];
  777. +
  778. + u8 flow_label[3];
  779. + u8 priority;
  780. +
  781. + u32 udf_tsid;
  782. +
  783. + u32 ib2;
  784. +
  785. + struct mtk_foe_mac_info l2;
  786. +};
  787. +
  788. +struct mtk_foe_ipv6 {
  789. + u32 src_ip[4];
  790. + u32 dest_ip[4];
  791. +
  792. + union {
  793. + struct {
  794. + u8 protocol;
  795. + u8 _pad[3]; /* fill with 0xa5a5a5 */
  796. + }; /* 3-tuple */
  797. + struct {
  798. + u16 dest_port;
  799. + u16 src_port;
  800. + }; /* 5-tuple */
  801. + u32 ports;
  802. + };
  803. +
  804. + u32 _rsv[3];
  805. +
  806. + u32 udf;
  807. +
  808. + u32 ib2;
  809. + struct mtk_foe_mac_info l2;
  810. +};
  811. +
  812. +struct mtk_foe_ipv6_6rd {
  813. + u32 src_ip[4];
  814. + u32 dest_ip[4];
  815. + u16 dest_port;
  816. + u16 src_port;
  817. +
  818. + u32 tunnel_src_ip;
  819. + u32 tunnel_dest_ip;
  820. +
  821. + u16 hdr_csum;
  822. + u8 dscp;
  823. + u8 ttl;
  824. +
  825. + u8 flag;
  826. + u8 pad;
  827. + u8 per_flow_6rd_id;
  828. + u8 pad2;
  829. +
  830. + u32 ib2;
  831. + struct mtk_foe_mac_info l2;
  832. +};
  833. +
  834. +struct mtk_foe_entry {
  835. + u32 ib1;
  836. +
  837. + union {
  838. + struct mtk_foe_bridge bridge;
  839. + struct mtk_foe_ipv4 ipv4;
  840. + struct mtk_foe_ipv4_dslite dslite;
  841. + struct mtk_foe_ipv6 ipv6;
  842. + struct mtk_foe_ipv6_6rd ipv6_6rd;
  843. + u32 data[19];
  844. + };
  845. +};
  846. +
  847. +enum {
  848. + MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
  849. + MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
  850. + MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
  851. + MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
  852. + MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
  853. + MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
  854. + MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
  855. + MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
  856. + MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
  857. + MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
  858. + MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
  859. + MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
  860. + MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
  861. + MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
  862. + MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
  863. + MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
  864. + MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
  865. + MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
  866. + MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
  867. + MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
  868. + MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
  869. + MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
  870. + MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
  871. + MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
  872. + MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
  873. + MTK_PPE_CPU_REASON_INVALID = 0x1f,
  874. +};
  875. +
  876. +struct mtk_ppe {
  877. + struct device *dev;
  878. + void __iomem *base;
  879. + int version;
  880. +
  881. + struct mtk_foe_entry *foe_table;
  882. + dma_addr_t foe_phys;
  883. +
  884. + void *acct_table;
  885. +};
  886. +
  887. +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
  888. + int version);
  889. +int mtk_ppe_start(struct mtk_ppe *ppe);
  890. +int mtk_ppe_stop(struct mtk_ppe *ppe);
  891. +
  892. +static inline void
  893. +mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
  894. +{
  895. + ppe->foe_table[hash].ib1 = 0;
  896. + dma_wmb();
  897. +}
  898. +
  899. +static inline int
  900. +mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
  901. +{
  902. + u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
  903. +
  904. + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
  905. + return -1;
  906. +
  907. + return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
  908. +}
  909. +
  910. +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  911. + u8 pse_port, u8 *src_mac, u8 *dest_mac);
  912. +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
  913. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
  914. + __be32 src_addr, __be16 src_port,
  915. + __be32 dest_addr, __be16 dest_port);
  916. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  917. + __be32 *src_addr, __be16 src_port,
  918. + __be32 *dest_addr, __be16 dest_port);
  919. +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
  920. +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
  921. +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
  922. +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  923. + u16 timestamp);
  924. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
  925. +
  926. +#endif
  927. --- /dev/null
  928. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  929. @@ -0,0 +1,217 @@
  930. +// SPDX-License-Identifier: GPL-2.0-only
  931. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  932. +
  933. +#include <linux/kernel.h>
  934. +#include <linux/debugfs.h>
  935. +#include "mtk_eth_soc.h"
  936. +
  937. +struct mtk_flow_addr_info
  938. +{
  939. + void *src, *dest;
  940. + u16 *src_port, *dest_port;
  941. + bool ipv6;
  942. +};
  943. +
  944. +static const char *mtk_foe_entry_state_str(int state)
  945. +{
  946. + static const char * const state_str[] = {
  947. + [MTK_FOE_STATE_INVALID] = "INV",
  948. + [MTK_FOE_STATE_UNBIND] = "UNB",
  949. + [MTK_FOE_STATE_BIND] = "BND",
  950. + [MTK_FOE_STATE_FIN] = "FIN",
  951. + };
  952. +
  953. + if (state >= ARRAY_SIZE(state_str) || !state_str[state])
  954. + return "UNK";
  955. +
  956. + return state_str[state];
  957. +}
  958. +
  959. +static const char *mtk_foe_pkt_type_str(int type)
  960. +{
  961. + static const char * const type_str[] = {
  962. + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
  963. + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
  964. + [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
  965. + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
  966. + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
  967. + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
  968. + [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
  969. + };
  970. +
  971. + if (type >= ARRAY_SIZE(type_str) || !type_str[type])
  972. + return "UNKNOWN";
  973. +
  974. + return type_str[type];
  975. +}
  976. +
  977. +static void
  978. +mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
  979. +{
  980. + u32 n_addr[4];
  981. + int i;
  982. +
  983. + if (!ipv6) {
  984. + seq_printf(m, "%pI4h", addr);
  985. + return;
  986. + }
  987. +
  988. + for (i = 0; i < ARRAY_SIZE(n_addr); i++)
  989. + n_addr[i] = htonl(addr[i]);
  990. + seq_printf(m, "%pI6", n_addr);
  991. +}
  992. +
  993. +static void
  994. +mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
  995. +{
  996. + mtk_print_addr(m, ai->src, ai->ipv6);
  997. + if (ai->src_port)
  998. + seq_printf(m, ":%d", *ai->src_port);
  999. + seq_printf(m, "->");
  1000. + mtk_print_addr(m, ai->dest, ai->ipv6);
  1001. + if (ai->dest_port)
  1002. + seq_printf(m, ":%d", *ai->dest_port);
  1003. +}
  1004. +
  1005. +static int
  1006. +mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
  1007. +{
  1008. + struct mtk_ppe *ppe = m->private;
  1009. + int i, count;
  1010. +
  1011. + for (i = 0, count = 0; i < MTK_PPE_ENTRIES; i++) {
  1012. + struct mtk_foe_entry *entry = &ppe->foe_table[i];
  1013. + struct mtk_foe_mac_info *l2;
  1014. + struct mtk_flow_addr_info ai = {};
  1015. + unsigned char h_source[ETH_ALEN];
  1016. + unsigned char h_dest[ETH_ALEN];
  1017. + int type, state;
  1018. + u32 ib2;
  1019. +
  1020. +
  1021. + state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
  1022. + if (!state)
  1023. + continue;
  1024. +
  1025. + if (bind && state != MTK_FOE_STATE_BIND)
  1026. + continue;
  1027. +
  1028. + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  1029. + seq_printf(m, "%05x %s %7s", i,
  1030. + mtk_foe_entry_state_str(state),
  1031. + mtk_foe_pkt_type_str(type));
  1032. +
  1033. + switch (type) {
  1034. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  1035. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  1036. + ai.src_port = &entry->ipv4.orig.src_port;
  1037. + ai.dest_port = &entry->ipv4.orig.dest_port;
  1038. + fallthrough;
  1039. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  1040. + ai.src = &entry->ipv4.orig.src_ip;
  1041. + ai.dest = &entry->ipv4.orig.dest_ip;
  1042. + break;
  1043. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  1044. + ai.src_port = &entry->ipv6.src_port;
  1045. + ai.dest_port = &entry->ipv6.dest_port;
  1046. + fallthrough;
  1047. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  1048. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  1049. + ai.src = &entry->ipv6.src_ip;
  1050. + ai.dest = &entry->ipv6.dest_ip;
  1051. + ai.ipv6 = true;
  1052. + break;
  1053. + }
  1054. +
  1055. + seq_printf(m, " orig=");
  1056. + mtk_print_addr_info(m, &ai);
  1057. +
  1058. + switch (type) {
  1059. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  1060. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  1061. + ai.src_port = &entry->ipv4.new.src_port;
  1062. + ai.dest_port = &entry->ipv4.new.dest_port;
  1063. + fallthrough;
  1064. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  1065. + ai.src = &entry->ipv4.new.src_ip;
  1066. + ai.dest = &entry->ipv4.new.dest_ip;
  1067. + seq_printf(m, " new=");
  1068. + mtk_print_addr_info(m, &ai);
  1069. + break;
  1070. + }
  1071. +
  1072. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
  1073. + l2 = &entry->ipv6.l2;
  1074. + ib2 = entry->ipv6.ib2;
  1075. + } else {
  1076. + l2 = &entry->ipv4.l2;
  1077. + ib2 = entry->ipv4.ib2;
  1078. + }
  1079. +
  1080. + *((__be32 *)h_source) = htonl(l2->src_mac_hi);
  1081. + *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
  1082. + *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
  1083. + *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
  1084. +
  1085. + seq_printf(m, " eth=%pM->%pM etype=%04x"
  1086. + " vlan=%d,%d ib1=%08x ib2=%08x\n",
  1087. + h_source, h_dest, ntohs(l2->etype),
  1088. + l2->vlan1, l2->vlan2, entry->ib1, ib2);
  1089. + }
  1090. +
  1091. + return 0;
  1092. +}
  1093. +
  1094. +static int
  1095. +mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
  1096. +{
  1097. + return mtk_ppe_debugfs_foe_show(m, private, false);
  1098. +}
  1099. +
  1100. +static int
  1101. +mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
  1102. +{
  1103. + return mtk_ppe_debugfs_foe_show(m, private, true);
  1104. +}
  1105. +
  1106. +static int
  1107. +mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
  1108. +{
  1109. + return single_open(file, mtk_ppe_debugfs_foe_show_all,
  1110. + inode->i_private);
  1111. +}
  1112. +
  1113. +static int
  1114. +mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
  1115. +{
  1116. + return single_open(file, mtk_ppe_debugfs_foe_show_bind,
  1117. + inode->i_private);
  1118. +}
  1119. +
  1120. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
  1121. +{
  1122. + static const struct file_operations fops_all = {
  1123. + .open = mtk_ppe_debugfs_foe_open_all,
  1124. + .read = seq_read,
  1125. + .llseek = seq_lseek,
  1126. + .release = single_release,
  1127. + };
  1128. +
  1129. + static const struct file_operations fops_bind = {
  1130. + .open = mtk_ppe_debugfs_foe_open_bind,
  1131. + .read = seq_read,
  1132. + .llseek = seq_lseek,
  1133. + .release = single_release,
  1134. + };
  1135. +
  1136. + struct dentry *root;
  1137. +
  1138. + root = debugfs_create_dir("mtk_ppe", NULL);
  1139. + if (!root)
  1140. + return -ENOMEM;
  1141. +
  1142. + debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
  1143. + debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
  1144. +
  1145. + return 0;
  1146. +}
  1147. --- /dev/null
  1148. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  1149. @@ -0,0 +1,144 @@
  1150. +// SPDX-License-Identifier: GPL-2.0-only
  1151. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  1152. +
  1153. +#ifndef __MTK_PPE_REGS_H
  1154. +#define __MTK_PPE_REGS_H
  1155. +
  1156. +#define MTK_PPE_GLO_CFG 0x200
  1157. +#define MTK_PPE_GLO_CFG_EN BIT(0)
  1158. +#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
  1159. +#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
  1160. +#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
  1161. +#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
  1162. +#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
  1163. +#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
  1164. +#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
  1165. +#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
  1166. +#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
  1167. +#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
  1168. +#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
  1169. +#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
  1170. +#define MTK_PPE_GLO_CFG_BUSY BIT(31)
  1171. +
  1172. +#define MTK_PPE_FLOW_CFG 0x204
  1173. +#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
  1174. +#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
  1175. +#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
  1176. +#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
  1177. +#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
  1178. +#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
  1179. +#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
  1180. +#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
  1181. +#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
  1182. +#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
  1183. +#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
  1184. +#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
  1185. +#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
  1186. +#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
  1187. +
  1188. +#define MTK_PPE_IP_PROTO_CHK 0x208
  1189. +#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
  1190. +#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
  1191. +
  1192. +#define MTK_PPE_TB_CFG 0x21c
  1193. +#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
  1194. +#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
  1195. +#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
  1196. +#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
  1197. +#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
  1198. +#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
  1199. +#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
  1200. +#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
  1201. +#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
  1202. +#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
  1203. +#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
  1204. +#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
  1205. +#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
  1206. +
  1207. +enum {
  1208. + MTK_PPE_SCAN_MODE_DISABLED,
  1209. + MTK_PPE_SCAN_MODE_CHECK_AGE,
  1210. + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
  1211. +};
  1212. +
  1213. +enum {
  1214. + MTK_PPE_KEEPALIVE_DISABLE,
  1215. + MTK_PPE_KEEPALIVE_UNICAST_CPU,
  1216. + MTK_PPE_KEEPALIVE_DUP_CPU = 3,
  1217. +};
  1218. +
  1219. +enum {
  1220. + MTK_PPE_SEARCH_MISS_ACTION_DROP,
  1221. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
  1222. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
  1223. +};
  1224. +
  1225. +#define MTK_PPE_TB_BASE 0x220
  1226. +
  1227. +#define MTK_PPE_TB_USED 0x224
  1228. +#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
  1229. +
  1230. +#define MTK_PPE_BIND_RATE 0x228
  1231. +#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
  1232. +#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
  1233. +
  1234. +#define MTK_PPE_BIND_LIMIT0 0x22c
  1235. +#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
  1236. +#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
  1237. +
  1238. +#define MTK_PPE_BIND_LIMIT1 0x230
  1239. +#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
  1240. +#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
  1241. +
  1242. +#define MTK_PPE_KEEPALIVE 0x234
  1243. +#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
  1244. +#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
  1245. +#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
  1246. +
  1247. +#define MTK_PPE_UNBIND_AGE 0x238
  1248. +#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
  1249. +#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
  1250. +
  1251. +#define MTK_PPE_BIND_AGE0 0x23c
  1252. +#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
  1253. +#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
  1254. +
  1255. +#define MTK_PPE_BIND_AGE1 0x240
  1256. +#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
  1257. +#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
  1258. +
  1259. +#define MTK_PPE_HASH_SEED 0x244
  1260. +
  1261. +#define MTK_PPE_DEFAULT_CPU_PORT 0x248
  1262. +#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
  1263. +
  1264. +#define MTK_PPE_MTU_DROP 0x308
  1265. +
  1266. +#define MTK_PPE_VLAN_MTU0 0x30c
  1267. +#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
  1268. +#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
  1269. +
  1270. +#define MTK_PPE_VLAN_MTU1 0x310
  1271. +#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
  1272. +#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
  1273. +
  1274. +#define MTK_PPE_VPM_TPID 0x318
  1275. +
  1276. +#define MTK_PPE_CACHE_CTL 0x320
  1277. +#define MTK_PPE_CACHE_CTL_EN BIT(0)
  1278. +#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
  1279. +#define MTK_PPE_CACHE_CTL_REQ BIT(8)
  1280. +#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
  1281. +#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
  1282. +
  1283. +#define MTK_PPE_MIB_CFG 0x334
  1284. +#define MTK_PPE_MIB_CFG_EN BIT(0)
  1285. +#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
  1286. +
  1287. +#define MTK_PPE_MIB_TB_BASE 0x338
  1288. +
  1289. +#define MTK_PPE_MIB_CACHE_CTL 0x350
  1290. +#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
  1291. +#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
  1292. +
  1293. +#endif