610-v5.13-32-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308
  1. From: Felix Fietkau <[email protected]>
  2. Date: Wed, 24 Mar 2021 02:30:53 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for
  4. initializing the PPE
  5. The PPE (packet processing engine) is used to offload NAT/routed or even
  6. bridged flows. This patch brings up the PPE and uses it to get a packet
  7. hash. It also contains some functionality that will be used to bring up
  8. flow offloading.
  9. Signed-off-by: Felix Fietkau <[email protected]>
  10. Signed-off-by: Pablo Neira Ayuso <[email protected]>
  11. ---
  12. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
  13. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h
  14. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  15. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  16. --- a/drivers/net/ethernet/mediatek/Makefile
  17. +++ b/drivers/net/ethernet/mediatek/Makefile
  18. @@ -4,5 +4,5 @@
  19. #
  20. obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
  21. -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
  22. +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
  23. obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
  24. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  25. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  26. @@ -2258,12 +2258,17 @@ static int mtk_open(struct net_device *d
  27. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  28. if (!refcount_read(&eth->dma_refcnt)) {
  29. - int err = mtk_start_dma(eth);
  30. + u32 gdm_config = MTK_GDMA_TO_PDMA;
  31. + int err;
  32. + err = mtk_start_dma(eth);
  33. if (err)
  34. return err;
  35. - mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
  36. + if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
  37. + gdm_config = MTK_GDMA_TO_PPE;
  38. +
  39. + mtk_gdm_config(eth, gdm_config);
  40. napi_enable(&eth->tx_napi);
  41. napi_enable(&eth->rx_napi);
  42. @@ -2330,6 +2335,9 @@ static int mtk_stop(struct net_device *d
  43. mtk_dma_free(eth);
  44. + if (eth->soc->offload_version)
  45. + mtk_ppe_stop(&eth->ppe);
  46. +
  47. return 0;
  48. }
  49. @@ -3058,6 +3066,13 @@ static int mtk_probe(struct platform_dev
  50. goto err_free_dev;
  51. }
  52. + if (eth->soc->offload_version) {
  53. + err = mtk_ppe_init(&eth->ppe, eth->dev,
  54. + eth->base + MTK_ETH_PPE_BASE, 2);
  55. + if (err)
  56. + goto err_free_dev;
  57. + }
  58. +
  59. for (i = 0; i < MTK_MAX_DEVS; i++) {
  60. if (!eth->netdev[i])
  61. continue;
  62. @@ -3132,6 +3147,7 @@ static const struct mtk_soc_data mt7621_
  63. .hw_features = MTK_HW_FEATURES,
  64. .required_clks = MT7621_CLKS_BITMAP,
  65. .required_pctl = false,
  66. + .offload_version = 2,
  67. };
  68. static const struct mtk_soc_data mt7622_data = {
  69. @@ -3140,6 +3156,7 @@ static const struct mtk_soc_data mt7622_
  70. .hw_features = MTK_HW_FEATURES,
  71. .required_clks = MT7622_CLKS_BITMAP,
  72. .required_pctl = false,
  73. + .offload_version = 2,
  74. };
  75. static const struct mtk_soc_data mt7623_data = {
  76. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  77. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  78. @@ -15,6 +15,7 @@
  79. #include <linux/u64_stats_sync.h>
  80. #include <linux/refcount.h>
  81. #include <linux/phylink.h>
  82. +#include "mtk_ppe.h"
  83. #define MTK_QDMA_PAGE_SIZE 2048
  84. #define MTK_MAX_RX_LENGTH 1536
  85. @@ -86,6 +87,7 @@
  86. #define MTK_GDMA_TCS_EN BIT(21)
  87. #define MTK_GDMA_UCS_EN BIT(20)
  88. #define MTK_GDMA_TO_PDMA 0x0
  89. +#define MTK_GDMA_TO_PPE 0x4444
  90. #define MTK_GDMA_DROP_ALL 0x7777
  91. /* Unicast Filter MAC Address Register - Low */
  92. @@ -302,6 +304,12 @@
  93. #define RX_DMA_VID(_x) ((_x) & 0xfff)
  94. /* QDMA descriptor rxd4 */
  95. +#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
  96. +#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
  97. +#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
  98. +#define MTK_RXD4_ALG GENMASK(31, 22)
  99. +
  100. +/* QDMA descriptor rxd4 */
  101. #define RX_DMA_L4_VALID BIT(24)
  102. #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
  103. #define RX_DMA_FPORT_SHIFT 19
  104. @@ -799,6 +807,7 @@ struct mtk_soc_data {
  105. u32 caps;
  106. u32 required_clks;
  107. bool required_pctl;
  108. + u8 offload_version;
  109. netdev_features_t hw_features;
  110. };
  111. @@ -898,6 +907,8 @@ struct mtk_eth {
  112. u32 tx_int_status_reg;
  113. u32 rx_dma_l4_valid;
  114. int ip_align;
  115. +
  116. + struct mtk_ppe ppe;
  117. };
  118. /* struct mtk_mac - the structure that holds the info about the MACs of the
  119. --- /dev/null
  120. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  121. @@ -0,0 +1,511 @@
  122. +// SPDX-License-Identifier: GPL-2.0-only
  123. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  124. +
  125. +#include <linux/kernel.h>
  126. +#include <linux/jiffies.h>
  127. +#include <linux/delay.h>
  128. +#include <linux/io.h>
  129. +#include <linux/etherdevice.h>
  130. +#include <linux/platform_device.h>
  131. +#include "mtk_ppe.h"
  132. +#include "mtk_ppe_regs.h"
  133. +
  134. +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
  135. +{
  136. + writel(val, ppe->base + reg);
  137. +}
  138. +
  139. +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
  140. +{
  141. + return readl(ppe->base + reg);
  142. +}
  143. +
  144. +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
  145. +{
  146. + u32 val;
  147. +
  148. + val = ppe_r32(ppe, reg);
  149. + val &= ~mask;
  150. + val |= set;
  151. + ppe_w32(ppe, reg, val);
  152. +
  153. + return val;
  154. +}
  155. +
  156. +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
  157. +{
  158. + return ppe_m32(ppe, reg, 0, val);
  159. +}
  160. +
  161. +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
  162. +{
  163. + return ppe_m32(ppe, reg, val, 0);
  164. +}
  165. +
  166. +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  167. +{
  168. + unsigned long timeout = jiffies + HZ;
  169. +
  170. + while (time_is_before_jiffies(timeout)) {
  171. + if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
  172. + return 0;
  173. +
  174. + usleep_range(10, 20);
  175. + }
  176. +
  177. + dev_err(ppe->dev, "PPE table busy");
  178. +
  179. + return -ETIMEDOUT;
  180. +}
  181. +
  182. +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
  183. +{
  184. + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  185. + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  186. +}
  187. +
  188. +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
  189. +{
  190. + mtk_ppe_cache_clear(ppe);
  191. +
  192. + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
  193. + enable * MTK_PPE_CACHE_CTL_EN);
  194. +}
  195. +
  196. +static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
  197. +{
  198. + u32 hv1, hv2, hv3;
  199. + u32 hash;
  200. +
  201. + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
  202. + case MTK_PPE_PKT_TYPE_BRIDGE:
  203. + hv1 = e->bridge.src_mac_lo;
  204. + hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
  205. + hv2 = e->bridge.src_mac_hi >> 16;
  206. + hv2 ^= e->bridge.dest_mac_lo;
  207. + hv3 = e->bridge.dest_mac_hi;
  208. + break;
  209. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  210. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  211. + hv1 = e->ipv4.orig.ports;
  212. + hv2 = e->ipv4.orig.dest_ip;
  213. + hv3 = e->ipv4.orig.src_ip;
  214. + break;
  215. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  216. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  217. + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
  218. + hv1 ^= e->ipv6.ports;
  219. +
  220. + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
  221. + hv2 ^= e->ipv6.dest_ip[0];
  222. +
  223. + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
  224. + hv3 ^= e->ipv6.src_ip[0];
  225. + break;
  226. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  227. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  228. + default:
  229. + WARN_ON_ONCE(1);
  230. + return MTK_PPE_HASH_MASK;
  231. + }
  232. +
  233. + hash = (hv1 & hv2) | ((~hv1) & hv3);
  234. + hash = (hash >> 24) | ((hash & 0xffffff) << 8);
  235. + hash ^= hv1 ^ hv2 ^ hv3;
  236. + hash ^= hash >> 16;
  237. + hash <<= 1;
  238. + hash &= MTK_PPE_ENTRIES - 1;
  239. +
  240. + return hash;
  241. +}
  242. +
  243. +static inline struct mtk_foe_mac_info *
  244. +mtk_foe_entry_l2(struct mtk_foe_entry *entry)
  245. +{
  246. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  247. +
  248. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  249. + return &entry->ipv6.l2;
  250. +
  251. + return &entry->ipv4.l2;
  252. +}
  253. +
  254. +static inline u32 *
  255. +mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
  256. +{
  257. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  258. +
  259. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  260. + return &entry->ipv6.ib2;
  261. +
  262. + return &entry->ipv4.ib2;
  263. +}
  264. +
  265. +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  266. + u8 pse_port, u8 *src_mac, u8 *dest_mac)
  267. +{
  268. + struct mtk_foe_mac_info *l2;
  269. + u32 ports_pad, val;
  270. +
  271. + memset(entry, 0, sizeof(*entry));
  272. +
  273. + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  274. + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
  275. + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  276. + MTK_FOE_IB1_BIND_TTL |
  277. + MTK_FOE_IB1_BIND_CACHE;
  278. + entry->ib1 = val;
  279. +
  280. + val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
  281. + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
  282. + FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
  283. +
  284. + if (is_multicast_ether_addr(dest_mac))
  285. + val |= MTK_FOE_IB2_MULTICAST;
  286. +
  287. + ports_pad = 0xa5a5a500 | (l4proto & 0xff);
  288. + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  289. + entry->ipv4.orig.ports = ports_pad;
  290. + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  291. + entry->ipv6.ports = ports_pad;
  292. +
  293. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
  294. + entry->ipv6.ib2 = val;
  295. + l2 = &entry->ipv6.l2;
  296. + } else {
  297. + entry->ipv4.ib2 = val;
  298. + l2 = &entry->ipv4.l2;
  299. + }
  300. +
  301. + l2->dest_mac_hi = get_unaligned_be32(dest_mac);
  302. + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
  303. + l2->src_mac_hi = get_unaligned_be32(src_mac);
  304. + l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
  305. +
  306. + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  307. + l2->etype = ETH_P_IPV6;
  308. + else
  309. + l2->etype = ETH_P_IP;
  310. +
  311. + return 0;
  312. +}
  313. +
  314. +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
  315. +{
  316. + u32 *ib2 = mtk_foe_entry_ib2(entry);
  317. + u32 val;
  318. +
  319. + val = *ib2;
  320. + val &= ~MTK_FOE_IB2_DEST_PORT;
  321. + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
  322. + *ib2 = val;
  323. +
  324. + return 0;
  325. +}
  326. +
  327. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
  328. + __be32 src_addr, __be16 src_port,
  329. + __be32 dest_addr, __be16 dest_port)
  330. +{
  331. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  332. + struct mtk_ipv4_tuple *t;
  333. +
  334. + switch (type) {
  335. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  336. + if (egress) {
  337. + t = &entry->ipv4.new;
  338. + break;
  339. + }
  340. + fallthrough;
  341. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  342. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  343. + t = &entry->ipv4.orig;
  344. + break;
  345. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  346. + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
  347. + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
  348. + return 0;
  349. + default:
  350. + WARN_ON_ONCE(1);
  351. + return -EINVAL;
  352. + }
  353. +
  354. + t->src_ip = be32_to_cpu(src_addr);
  355. + t->dest_ip = be32_to_cpu(dest_addr);
  356. +
  357. + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  358. + return 0;
  359. +
  360. + t->src_port = be16_to_cpu(src_port);
  361. + t->dest_port = be16_to_cpu(dest_port);
  362. +
  363. + return 0;
  364. +}
  365. +
  366. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  367. + __be32 *src_addr, __be16 src_port,
  368. + __be32 *dest_addr, __be16 dest_port)
  369. +{
  370. + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  371. + u32 *src, *dest;
  372. + int i;
  373. +
  374. + switch (type) {
  375. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  376. + src = entry->dslite.tunnel_src_ip;
  377. + dest = entry->dslite.tunnel_dest_ip;
  378. + break;
  379. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  380. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  381. + entry->ipv6.src_port = be16_to_cpu(src_port);
  382. + entry->ipv6.dest_port = be16_to_cpu(dest_port);
  383. + fallthrough;
  384. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  385. + src = entry->ipv6.src_ip;
  386. + dest = entry->ipv6.dest_ip;
  387. + break;
  388. + default:
  389. + WARN_ON_ONCE(1);
  390. + return -EINVAL;
  391. + };
  392. +
  393. + for (i = 0; i < 4; i++)
  394. + src[i] = be32_to_cpu(src_addr[i]);
  395. + for (i = 0; i < 4; i++)
  396. + dest[i] = be32_to_cpu(dest_addr[i]);
  397. +
  398. + return 0;
  399. +}
  400. +
  401. +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
  402. +{
  403. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  404. +
  405. + l2->etype = BIT(port);
  406. +
  407. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
  408. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  409. + else
  410. + l2->etype |= BIT(8);
  411. +
  412. + entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
  413. +
  414. + return 0;
  415. +}
  416. +
  417. +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
  418. +{
  419. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  420. +
  421. + switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
  422. + case 0:
  423. + entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
  424. + FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  425. + l2->vlan1 = vid;
  426. + return 0;
  427. + case 1:
  428. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
  429. + l2->vlan1 = vid;
  430. + l2->etype |= BIT(8);
  431. + } else {
  432. + l2->vlan2 = vid;
  433. + entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  434. + }
  435. + return 0;
  436. + default:
  437. + return -ENOSPC;
  438. + }
  439. +}
  440. +
  441. +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
  442. +{
  443. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  444. +
  445. + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
  446. + (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
  447. + l2->etype = ETH_P_PPP_SES;
  448. +
  449. + entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
  450. + l2->pppoe_id = sid;
  451. +
  452. + return 0;
  453. +}
  454. +
  455. +static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
  456. +{
  457. + return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
  458. + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
  459. +}
  460. +
  461. +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  462. + u16 timestamp)
  463. +{
  464. + struct mtk_foe_entry *hwe;
  465. + u32 hash;
  466. +
  467. + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
  468. + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  469. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
  470. +
  471. + hash = mtk_ppe_hash_entry(entry);
  472. + hwe = &ppe->foe_table[hash];
  473. + if (!mtk_foe_entry_usable(hwe)) {
  474. + hwe++;
  475. + hash++;
  476. +
  477. + if (!mtk_foe_entry_usable(hwe))
  478. + return -ENOSPC;
  479. + }
  480. +
  481. + memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
  482. + wmb();
  483. + hwe->ib1 = entry->ib1;
  484. +
  485. + dma_wmb();
  486. +
  487. + mtk_ppe_cache_clear(ppe);
  488. +
  489. + return hash;
  490. +}
  491. +
  492. +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
  493. + int version)
  494. +{
  495. + struct mtk_foe_entry *foe;
  496. +
  497. + /* need to allocate a separate device, since it PPE DMA access is
  498. + * not coherent.
  499. + */
  500. + ppe->base = base;
  501. + ppe->dev = dev;
  502. + ppe->version = version;
  503. +
  504. + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
  505. + &ppe->foe_phys, GFP_KERNEL);
  506. + if (!foe)
  507. + return -ENOMEM;
  508. +
  509. + ppe->foe_table = foe;
  510. +
  511. + mtk_ppe_debugfs_init(ppe);
  512. +
  513. + return 0;
  514. +}
  515. +
  516. +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
  517. +{
  518. + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
  519. + int i, k;
  520. +
  521. + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
  522. +
  523. + if (!IS_ENABLED(CONFIG_SOC_MT7621))
  524. + return;
  525. +
  526. + /* skip all entries that cross the 1024 byte boundary */
  527. + for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
  528. + for (k = 0; k < ARRAY_SIZE(skip); k++)
  529. + ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
  530. +}
  531. +
  532. +int mtk_ppe_start(struct mtk_ppe *ppe)
  533. +{
  534. + u32 val;
  535. +
  536. + mtk_ppe_init_foe_table(ppe);
  537. + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
  538. +
  539. + val = MTK_PPE_TB_CFG_ENTRY_80B |
  540. + MTK_PPE_TB_CFG_AGE_NON_L4 |
  541. + MTK_PPE_TB_CFG_AGE_UNBIND |
  542. + MTK_PPE_TB_CFG_AGE_TCP |
  543. + MTK_PPE_TB_CFG_AGE_UDP |
  544. + MTK_PPE_TB_CFG_AGE_TCP_FIN |
  545. + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
  546. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
  547. + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
  548. + MTK_PPE_KEEPALIVE_DISABLE) |
  549. + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
  550. + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
  551. + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
  552. + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
  553. + MTK_PPE_ENTRIES_SHIFT);
  554. + ppe_w32(ppe, MTK_PPE_TB_CFG, val);
  555. +
  556. + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
  557. + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
  558. +
  559. + mtk_ppe_cache_enable(ppe, true);
  560. +
  561. + val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
  562. + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
  563. + MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
  564. + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
  565. + MTK_PPE_FLOW_CFG_IP6_6RD |
  566. + MTK_PPE_FLOW_CFG_IP4_NAT |
  567. + MTK_PPE_FLOW_CFG_IP4_NAPT |
  568. + MTK_PPE_FLOW_CFG_IP4_DSLITE |
  569. + MTK_PPE_FLOW_CFG_L2_BRIDGE |
  570. + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
  571. + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
  572. +
  573. + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
  574. + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
  575. + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
  576. +
  577. + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
  578. + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
  579. + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
  580. +
  581. + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
  582. + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
  583. + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
  584. +
  585. + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
  586. + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
  587. +
  588. + val = MTK_PPE_BIND_LIMIT1_FULL |
  589. + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
  590. + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
  591. +
  592. + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
  593. + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
  594. + ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
  595. +
  596. + /* enable PPE */
  597. + val = MTK_PPE_GLO_CFG_EN |
  598. + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
  599. + MTK_PPE_GLO_CFG_IP4_CS_DROP |
  600. + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
  601. + ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
  602. +
  603. + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  604. +
  605. + return 0;
  606. +}
  607. +
  608. +int mtk_ppe_stop(struct mtk_ppe *ppe)
  609. +{
  610. + u32 val;
  611. + int i;
  612. +
  613. + for (i = 0; i < MTK_PPE_ENTRIES; i++)
  614. + ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
  615. + MTK_FOE_STATE_INVALID);
  616. +
  617. + mtk_ppe_cache_enable(ppe, false);
  618. +
  619. + /* disable offload engine */
  620. + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
  621. + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
  622. +
  623. + /* disable aging */
  624. + val = MTK_PPE_TB_CFG_AGE_NON_L4 |
  625. + MTK_PPE_TB_CFG_AGE_UNBIND |
  626. + MTK_PPE_TB_CFG_AGE_TCP |
  627. + MTK_PPE_TB_CFG_AGE_UDP |
  628. + MTK_PPE_TB_CFG_AGE_TCP_FIN;
  629. + ppe_clear(ppe, MTK_PPE_TB_CFG, val);
  630. +
  631. + return mtk_ppe_wait_busy(ppe);
  632. +}
  633. --- /dev/null
  634. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  635. @@ -0,0 +1,287 @@
  636. +// SPDX-License-Identifier: GPL-2.0-only
  637. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  638. +
  639. +#ifndef __MTK_PPE_H
  640. +#define __MTK_PPE_H
  641. +
  642. +#include <linux/kernel.h>
  643. +#include <linux/bitfield.h>
  644. +
  645. +#define MTK_ETH_PPE_BASE 0xc00
  646. +
  647. +#define MTK_PPE_ENTRIES_SHIFT 3
  648. +#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
  649. +#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
  650. +
  651. +#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
  652. +#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
  653. +#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
  654. +
  655. +#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
  656. +#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
  657. +#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
  658. +#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
  659. +#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
  660. +#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
  661. +#define MTK_FOE_IB1_BIND_CACHE BIT(22)
  662. +#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
  663. +#define MTK_FOE_IB1_BIND_TTL BIT(24)
  664. +
  665. +#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
  666. +#define MTK_FOE_IB1_STATE GENMASK(29, 28)
  667. +#define MTK_FOE_IB1_UDP BIT(30)
  668. +#define MTK_FOE_IB1_STATIC BIT(31)
  669. +
  670. +enum {
  671. + MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
  672. + MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
  673. + MTK_PPE_PKT_TYPE_BRIDGE = 2,
  674. + MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
  675. + MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
  676. + MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
  677. + MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
  678. +};
  679. +
  680. +#define MTK_FOE_IB2_QID GENMASK(3, 0)
  681. +#define MTK_FOE_IB2_PSE_QOS BIT(4)
  682. +#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
  683. +#define MTK_FOE_IB2_MULTICAST BIT(8)
  684. +
  685. +#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
  686. +#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
  687. +#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
  688. +
  689. +#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
  690. +
  691. +#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
  692. +
  693. +#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
  694. +
  695. +#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
  696. +#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
  697. +#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
  698. +
  699. +enum {
  700. + MTK_FOE_STATE_INVALID,
  701. + MTK_FOE_STATE_UNBIND,
  702. + MTK_FOE_STATE_BIND,
  703. + MTK_FOE_STATE_FIN
  704. +};
  705. +
  706. +struct mtk_foe_mac_info {
  707. + u16 vlan1;
  708. + u16 etype;
  709. +
  710. + u32 dest_mac_hi;
  711. +
  712. + u16 vlan2;
  713. + u16 dest_mac_lo;
  714. +
  715. + u32 src_mac_hi;
  716. +
  717. + u16 pppoe_id;
  718. + u16 src_mac_lo;
  719. +};
  720. +
  721. +struct mtk_foe_bridge {
  722. + u32 dest_mac_hi;
  723. +
  724. + u16 src_mac_lo;
  725. + u16 dest_mac_lo;
  726. +
  727. + u32 src_mac_hi;
  728. +
  729. + u32 ib2;
  730. +
  731. + u32 _rsv[5];
  732. +
  733. + u32 udf_tsid;
  734. + struct mtk_foe_mac_info l2;
  735. +};
  736. +
  737. +struct mtk_ipv4_tuple {
  738. + u32 src_ip;
  739. + u32 dest_ip;
  740. + union {
  741. + struct {
  742. + u16 dest_port;
  743. + u16 src_port;
  744. + };
  745. + struct {
  746. + u8 protocol;
  747. + u8 _pad[3]; /* fill with 0xa5a5a5 */
  748. + };
  749. + u32 ports;
  750. + };
  751. +};
  752. +
  753. +struct mtk_foe_ipv4 {
  754. + struct mtk_ipv4_tuple orig;
  755. +
  756. + u32 ib2;
  757. +
  758. + struct mtk_ipv4_tuple new;
  759. +
  760. + u16 timestamp;
  761. + u16 _rsv0[3];
  762. +
  763. + u32 udf_tsid;
  764. +
  765. + struct mtk_foe_mac_info l2;
  766. +};
  767. +
  768. +struct mtk_foe_ipv4_dslite {
  769. + struct mtk_ipv4_tuple ip4;
  770. +
  771. + u32 tunnel_src_ip[4];
  772. + u32 tunnel_dest_ip[4];
  773. +
  774. + u8 flow_label[3];
  775. + u8 priority;
  776. +
  777. + u32 udf_tsid;
  778. +
  779. + u32 ib2;
  780. +
  781. + struct mtk_foe_mac_info l2;
  782. +};
  783. +
  784. +struct mtk_foe_ipv6 {
  785. + u32 src_ip[4];
  786. + u32 dest_ip[4];
  787. +
  788. + union {
  789. + struct {
  790. + u8 protocol;
  791. + u8 _pad[3]; /* fill with 0xa5a5a5 */
  792. + }; /* 3-tuple */
  793. + struct {
  794. + u16 dest_port;
  795. + u16 src_port;
  796. + }; /* 5-tuple */
  797. + u32 ports;
  798. + };
  799. +
  800. + u32 _rsv[3];
  801. +
  802. + u32 udf;
  803. +
  804. + u32 ib2;
  805. + struct mtk_foe_mac_info l2;
  806. +};
  807. +
  808. +struct mtk_foe_ipv6_6rd {
  809. + u32 src_ip[4];
  810. + u32 dest_ip[4];
  811. + u16 dest_port;
  812. + u16 src_port;
  813. +
  814. + u32 tunnel_src_ip;
  815. + u32 tunnel_dest_ip;
  816. +
  817. + u16 hdr_csum;
  818. + u8 dscp;
  819. + u8 ttl;
  820. +
  821. + u8 flag;
  822. + u8 pad;
  823. + u8 per_flow_6rd_id;
  824. + u8 pad2;
  825. +
  826. + u32 ib2;
  827. + struct mtk_foe_mac_info l2;
  828. +};
  829. +
  830. +struct mtk_foe_entry {
  831. + u32 ib1;
  832. +
  833. + union {
  834. + struct mtk_foe_bridge bridge;
  835. + struct mtk_foe_ipv4 ipv4;
  836. + struct mtk_foe_ipv4_dslite dslite;
  837. + struct mtk_foe_ipv6 ipv6;
  838. + struct mtk_foe_ipv6_6rd ipv6_6rd;
  839. + u32 data[19];
  840. + };
  841. +};
  842. +
  843. +enum {
  844. + MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
  845. + MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
  846. + MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
  847. + MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
  848. + MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
  849. + MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
  850. + MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
  851. + MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
  852. + MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
  853. + MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
  854. + MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
  855. + MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
  856. + MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
  857. + MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
  858. + MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
  859. + MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
  860. + MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
  861. + MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
  862. + MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
  863. + MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
  864. + MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
  865. + MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
  866. + MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
  867. + MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
  868. + MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
  869. + MTK_PPE_CPU_REASON_INVALID = 0x1f,
  870. +};
  871. +
  872. +struct mtk_ppe {
  873. + struct device *dev;
  874. + void __iomem *base;
  875. + int version;
  876. +
  877. + struct mtk_foe_entry *foe_table;
  878. + dma_addr_t foe_phys;
  879. +
  880. + void *acct_table;
  881. +};
  882. +
  883. +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
  884. + int version);
  885. +int mtk_ppe_start(struct mtk_ppe *ppe);
  886. +int mtk_ppe_stop(struct mtk_ppe *ppe);
  887. +
  888. +static inline void
  889. +mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
  890. +{
  891. + ppe->foe_table[hash].ib1 = 0;
  892. + dma_wmb();
  893. +}
  894. +
  895. +static inline int
  896. +mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
  897. +{
  898. + u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
  899. +
  900. + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
  901. + return -1;
  902. +
  903. + return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
  904. +}
  905. +
  906. +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  907. + u8 pse_port, u8 *src_mac, u8 *dest_mac);
  908. +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
  909. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
  910. + __be32 src_addr, __be16 src_port,
  911. + __be32 dest_addr, __be16 dest_port);
  912. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  913. + __be32 *src_addr, __be16 src_port,
  914. + __be32 *dest_addr, __be16 dest_port);
  915. +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
  916. +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
  917. +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
  918. +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  919. + u16 timestamp);
  920. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
  921. +
  922. +#endif
  923. --- /dev/null
  924. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  925. @@ -0,0 +1,217 @@
  926. +// SPDX-License-Identifier: GPL-2.0-only
  927. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  928. +
  929. +#include <linux/kernel.h>
  930. +#include <linux/debugfs.h>
  931. +#include "mtk_eth_soc.h"
  932. +
  933. +struct mtk_flow_addr_info
  934. +{
  935. + void *src, *dest;
  936. + u16 *src_port, *dest_port;
  937. + bool ipv6;
  938. +};
  939. +
  940. +static const char *mtk_foe_entry_state_str(int state)
  941. +{
  942. + static const char * const state_str[] = {
  943. + [MTK_FOE_STATE_INVALID] = "INV",
  944. + [MTK_FOE_STATE_UNBIND] = "UNB",
  945. + [MTK_FOE_STATE_BIND] = "BND",
  946. + [MTK_FOE_STATE_FIN] = "FIN",
  947. + };
  948. +
  949. + if (state >= ARRAY_SIZE(state_str) || !state_str[state])
  950. + return "UNK";
  951. +
  952. + return state_str[state];
  953. +}
  954. +
  955. +static const char *mtk_foe_pkt_type_str(int type)
  956. +{
  957. + static const char * const type_str[] = {
  958. + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
  959. + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
  960. + [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
  961. + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
  962. + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
  963. + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
  964. + [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
  965. + };
  966. +
  967. + if (type >= ARRAY_SIZE(type_str) || !type_str[type])
  968. + return "UNKNOWN";
  969. +
  970. + return type_str[type];
  971. +}
  972. +
  973. +static void
  974. +mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
  975. +{
  976. + u32 n_addr[4];
  977. + int i;
  978. +
  979. + if (!ipv6) {
  980. + seq_printf(m, "%pI4h", addr);
  981. + return;
  982. + }
  983. +
  984. + for (i = 0; i < ARRAY_SIZE(n_addr); i++)
  985. + n_addr[i] = htonl(addr[i]);
  986. + seq_printf(m, "%pI6", n_addr);
  987. +}
  988. +
  989. +static void
  990. +mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
  991. +{
  992. + mtk_print_addr(m, ai->src, ai->ipv6);
  993. + if (ai->src_port)
  994. + seq_printf(m, ":%d", *ai->src_port);
  995. + seq_printf(m, "->");
  996. + mtk_print_addr(m, ai->dest, ai->ipv6);
  997. + if (ai->dest_port)
  998. + seq_printf(m, ":%d", *ai->dest_port);
  999. +}
  1000. +
  1001. +static int
  1002. +mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
  1003. +{
  1004. + struct mtk_ppe *ppe = m->private;
  1005. + int i, count;
  1006. +
  1007. + for (i = 0, count = 0; i < MTK_PPE_ENTRIES; i++) {
  1008. + struct mtk_foe_entry *entry = &ppe->foe_table[i];
  1009. + struct mtk_foe_mac_info *l2;
  1010. + struct mtk_flow_addr_info ai = {};
  1011. + unsigned char h_source[ETH_ALEN];
  1012. + unsigned char h_dest[ETH_ALEN];
  1013. + int type, state;
  1014. + u32 ib2;
  1015. +
  1016. +
  1017. + state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
  1018. + if (!state)
  1019. + continue;
  1020. +
  1021. + if (bind && state != MTK_FOE_STATE_BIND)
  1022. + continue;
  1023. +
  1024. + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  1025. + seq_printf(m, "%05x %s %7s", i,
  1026. + mtk_foe_entry_state_str(state),
  1027. + mtk_foe_pkt_type_str(type));
  1028. +
  1029. + switch (type) {
  1030. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  1031. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  1032. + ai.src_port = &entry->ipv4.orig.src_port;
  1033. + ai.dest_port = &entry->ipv4.orig.dest_port;
  1034. + fallthrough;
  1035. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  1036. + ai.src = &entry->ipv4.orig.src_ip;
  1037. + ai.dest = &entry->ipv4.orig.dest_ip;
  1038. + break;
  1039. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  1040. + ai.src_port = &entry->ipv6.src_port;
  1041. + ai.dest_port = &entry->ipv6.dest_port;
  1042. + fallthrough;
  1043. + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  1044. + case MTK_PPE_PKT_TYPE_IPV6_6RD:
  1045. + ai.src = &entry->ipv6.src_ip;
  1046. + ai.dest = &entry->ipv6.dest_ip;
  1047. + ai.ipv6 = true;
  1048. + break;
  1049. + }
  1050. +
  1051. + seq_printf(m, " orig=");
  1052. + mtk_print_addr_info(m, &ai);
  1053. +
  1054. + switch (type) {
  1055. + case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  1056. + case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  1057. + ai.src_port = &entry->ipv4.new.src_port;
  1058. + ai.dest_port = &entry->ipv4.new.dest_port;
  1059. + fallthrough;
  1060. + case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  1061. + ai.src = &entry->ipv4.new.src_ip;
  1062. + ai.dest = &entry->ipv4.new.dest_ip;
  1063. + seq_printf(m, " new=");
  1064. + mtk_print_addr_info(m, &ai);
  1065. + break;
  1066. + }
  1067. +
  1068. + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
  1069. + l2 = &entry->ipv6.l2;
  1070. + ib2 = entry->ipv6.ib2;
  1071. + } else {
  1072. + l2 = &entry->ipv4.l2;
  1073. + ib2 = entry->ipv4.ib2;
  1074. + }
  1075. +
  1076. + *((__be32 *)h_source) = htonl(l2->src_mac_hi);
  1077. + *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
  1078. + *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
  1079. + *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
  1080. +
  1081. + seq_printf(m, " eth=%pM->%pM etype=%04x"
  1082. + " vlan=%d,%d ib1=%08x ib2=%08x\n",
  1083. + h_source, h_dest, ntohs(l2->etype),
  1084. + l2->vlan1, l2->vlan2, entry->ib1, ib2);
  1085. + }
  1086. +
  1087. + return 0;
  1088. +}
  1089. +
  1090. +static int
  1091. +mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
  1092. +{
  1093. + return mtk_ppe_debugfs_foe_show(m, private, false);
  1094. +}
  1095. +
  1096. +static int
  1097. +mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
  1098. +{
  1099. + return mtk_ppe_debugfs_foe_show(m, private, true);
  1100. +}
  1101. +
  1102. +static int
  1103. +mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
  1104. +{
  1105. + return single_open(file, mtk_ppe_debugfs_foe_show_all,
  1106. + inode->i_private);
  1107. +}
  1108. +
  1109. +static int
  1110. +mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
  1111. +{
  1112. + return single_open(file, mtk_ppe_debugfs_foe_show_bind,
  1113. + inode->i_private);
  1114. +}
  1115. +
  1116. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
  1117. +{
  1118. + static const struct file_operations fops_all = {
  1119. + .open = mtk_ppe_debugfs_foe_open_all,
  1120. + .read = seq_read,
  1121. + .llseek = seq_lseek,
  1122. + .release = single_release,
  1123. + };
  1124. +
  1125. + static const struct file_operations fops_bind = {
  1126. + .open = mtk_ppe_debugfs_foe_open_bind,
  1127. + .read = seq_read,
  1128. + .llseek = seq_lseek,
  1129. + .release = single_release,
  1130. + };
  1131. +
  1132. + struct dentry *root;
  1133. +
  1134. + root = debugfs_create_dir("mtk_ppe", NULL);
  1135. + if (!root)
  1136. + return -ENOMEM;
  1137. +
  1138. + debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
  1139. + debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
  1140. +
  1141. + return 0;
  1142. +}
  1143. --- /dev/null
  1144. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  1145. @@ -0,0 +1,144 @@
  1146. +// SPDX-License-Identifier: GPL-2.0-only
  1147. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  1148. +
  1149. +#ifndef __MTK_PPE_REGS_H
  1150. +#define __MTK_PPE_REGS_H
  1151. +
  1152. +#define MTK_PPE_GLO_CFG 0x200
  1153. +#define MTK_PPE_GLO_CFG_EN BIT(0)
  1154. +#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
  1155. +#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
  1156. +#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
  1157. +#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
  1158. +#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
  1159. +#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
  1160. +#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
  1161. +#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
  1162. +#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
  1163. +#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
  1164. +#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
  1165. +#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
  1166. +#define MTK_PPE_GLO_CFG_BUSY BIT(31)
  1167. +
  1168. +#define MTK_PPE_FLOW_CFG 0x204
  1169. +#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
  1170. +#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
  1171. +#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
  1172. +#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
  1173. +#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
  1174. +#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
  1175. +#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
  1176. +#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
  1177. +#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
  1178. +#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
  1179. +#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
  1180. +#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
  1181. +#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
  1182. +#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
  1183. +
  1184. +#define MTK_PPE_IP_PROTO_CHK 0x208
  1185. +#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
  1186. +#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
  1187. +
  1188. +#define MTK_PPE_TB_CFG 0x21c
  1189. +#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
  1190. +#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
  1191. +#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
  1192. +#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
  1193. +#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
  1194. +#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
  1195. +#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
  1196. +#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
  1197. +#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
  1198. +#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
  1199. +#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
  1200. +#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
  1201. +#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
  1202. +
  1203. +enum {
  1204. + MTK_PPE_SCAN_MODE_DISABLED,
  1205. + MTK_PPE_SCAN_MODE_CHECK_AGE,
  1206. + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
  1207. +};
  1208. +
  1209. +enum {
  1210. + MTK_PPE_KEEPALIVE_DISABLE,
  1211. + MTK_PPE_KEEPALIVE_UNICAST_CPU,
  1212. + MTK_PPE_KEEPALIVE_DUP_CPU = 3,
  1213. +};
  1214. +
  1215. +enum {
  1216. + MTK_PPE_SEARCH_MISS_ACTION_DROP,
  1217. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
  1218. + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
  1219. +};
  1220. +
  1221. +#define MTK_PPE_TB_BASE 0x220
  1222. +
  1223. +#define MTK_PPE_TB_USED 0x224
  1224. +#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
  1225. +
  1226. +#define MTK_PPE_BIND_RATE 0x228
  1227. +#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
  1228. +#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
  1229. +
  1230. +#define MTK_PPE_BIND_LIMIT0 0x22c
  1231. +#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
  1232. +#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
  1233. +
  1234. +#define MTK_PPE_BIND_LIMIT1 0x230
  1235. +#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
  1236. +#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
  1237. +
  1238. +#define MTK_PPE_KEEPALIVE 0x234
  1239. +#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
  1240. +#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
  1241. +#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
  1242. +
  1243. +#define MTK_PPE_UNBIND_AGE 0x238
  1244. +#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
  1245. +#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
  1246. +
  1247. +#define MTK_PPE_BIND_AGE0 0x23c
  1248. +#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
  1249. +#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
  1250. +
  1251. +#define MTK_PPE_BIND_AGE1 0x240
  1252. +#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
  1253. +#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
  1254. +
  1255. +#define MTK_PPE_HASH_SEED 0x244
  1256. +
  1257. +#define MTK_PPE_DEFAULT_CPU_PORT 0x248
  1258. +#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
  1259. +
  1260. +#define MTK_PPE_MTU_DROP 0x308
  1261. +
  1262. +#define MTK_PPE_VLAN_MTU0 0x30c
  1263. +#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
  1264. +#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
  1265. +
  1266. +#define MTK_PPE_VLAN_MTU1 0x310
  1267. +#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
  1268. +#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
  1269. +
  1270. +#define MTK_PPE_VPM_TPID 0x318
  1271. +
  1272. +#define MTK_PPE_CACHE_CTL 0x320
  1273. +#define MTK_PPE_CACHE_CTL_EN BIT(0)
  1274. +#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
  1275. +#define MTK_PPE_CACHE_CTL_REQ BIT(8)
  1276. +#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
  1277. +#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
  1278. +
  1279. +#define MTK_PPE_MIB_CFG 0x334
  1280. +#define MTK_PPE_MIB_CFG_EN BIT(0)
  1281. +#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
  1282. +
  1283. +#define MTK_PPE_MIB_TB_BASE 0x338
  1284. +
  1285. +#define MTK_PPE_MIB_CACHE_CTL 0x350
  1286. +#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
  1287. +#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
  1288. +
  1289. +#endif