610-v5.13-33-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. From: Felix Fietkau <[email protected]>
  2. Date: Wed, 24 Mar 2021 02:30:54 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add flow offloading support
  4. This adds support for offloading IPv4 routed flows, including SNAT/DNAT,
  5. one VLAN, PPPoE and DSA.
  6. Signed-off-by: Felix Fietkau <[email protected]>
  7. Signed-off-by: Pablo Neira Ayuso <[email protected]>
  8. ---
  9. create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  10. --- a/drivers/net/ethernet/mediatek/Makefile
  11. +++ b/drivers/net/ethernet/mediatek/Makefile
  12. @@ -4,5 +4,5 @@
  13. #
  14. obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
  15. -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
  16. +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
  17. obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
  18. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  19. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  20. @@ -2858,6 +2858,7 @@ static const struct net_device_ops mtk_n
  21. #ifdef CONFIG_NET_POLL_CONTROLLER
  22. .ndo_poll_controller = mtk_poll_controller,
  23. #endif
  24. + .ndo_setup_tc = mtk_eth_setup_tc,
  25. };
  26. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  27. @@ -3116,6 +3117,10 @@ static int mtk_probe(struct platform_dev
  28. eth->base + MTK_ETH_PPE_BASE, 2);
  29. if (err)
  30. goto err_free_dev;
  31. +
  32. + err = mtk_eth_offload_init(eth);
  33. + if (err)
  34. + goto err_free_dev;
  35. }
  36. for (i = 0; i < MTK_MAX_DEVS; i++) {
  37. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  38. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  39. @@ -15,6 +15,7 @@
  40. #include <linux/u64_stats_sync.h>
  41. #include <linux/refcount.h>
  42. #include <linux/phylink.h>
  43. +#include <linux/rhashtable.h>
  44. #include "mtk_ppe.h"
  45. #define MTK_QDMA_PAGE_SIZE 2048
  46. @@ -40,7 +41,8 @@
  47. NETIF_F_HW_VLAN_CTAG_RX | \
  48. NETIF_F_SG | NETIF_F_TSO | \
  49. NETIF_F_TSO6 | \
  50. - NETIF_F_IPV6_CSUM)
  51. + NETIF_F_IPV6_CSUM |\
  52. + NETIF_F_HW_TC)
  53. #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
  54. #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
  55. @@ -929,6 +931,7 @@ struct mtk_eth {
  56. int ip_align;
  57. struct mtk_ppe ppe;
  58. + struct rhashtable flow_table;
  59. };
  60. /* struct mtk_mac - the structure that holds the info about the MACs of the
  61. @@ -973,4 +976,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk
  62. int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
  63. int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
  64. +int mtk_eth_offload_init(struct mtk_eth *eth);
  65. +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
  66. + void *type_data);
  67. +
  68. +
  69. #endif /* MTK_ETH_H */
  70. --- /dev/null
  71. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  72. @@ -0,0 +1,485 @@
  73. +// SPDX-License-Identifier: GPL-2.0-only
  74. +/*
  75. + * Copyright (C) 2020 Felix Fietkau <[email protected]>
  76. + */
  77. +
  78. +#include <linux/if_ether.h>
  79. +#include <linux/rhashtable.h>
  80. +#include <linux/if_ether.h>
  81. +#include <linux/ip.h>
  82. +#include <net/flow_offload.h>
  83. +#include <net/pkt_cls.h>
  84. +#include <net/dsa.h>
  85. +#include "mtk_eth_soc.h"
  86. +
  87. +struct mtk_flow_data {
  88. + struct ethhdr eth;
  89. +
  90. + union {
  91. + struct {
  92. + __be32 src_addr;
  93. + __be32 dst_addr;
  94. + } v4;
  95. + };
  96. +
  97. + __be16 src_port;
  98. + __be16 dst_port;
  99. +
  100. + struct {
  101. + u16 id;
  102. + __be16 proto;
  103. + u8 num;
  104. + } vlan;
  105. + struct {
  106. + u16 sid;
  107. + u8 num;
  108. + } pppoe;
  109. +};
  110. +
  111. +struct mtk_flow_entry {
  112. + struct rhash_head node;
  113. + unsigned long cookie;
  114. + u16 hash;
  115. +};
  116. +
  117. +static const struct rhashtable_params mtk_flow_ht_params = {
  118. + .head_offset = offsetof(struct mtk_flow_entry, node),
  119. + .head_offset = offsetof(struct mtk_flow_entry, cookie),
  120. + .key_len = sizeof(unsigned long),
  121. + .automatic_shrinking = true,
  122. +};
  123. +
  124. +static u32
  125. +mtk_eth_timestamp(struct mtk_eth *eth)
  126. +{
  127. + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
  128. +}
  129. +
  130. +static int
  131. +mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
  132. + bool egress)
  133. +{
  134. + return mtk_foe_entry_set_ipv4_tuple(foe, egress,
  135. + data->v4.src_addr, data->src_port,
  136. + data->v4.dst_addr, data->dst_port);
  137. +}
  138. +
  139. +static void
  140. +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
  141. +{
  142. + void *dest = eth + act->mangle.offset;
  143. + const void *src = &act->mangle.val;
  144. +
  145. + if (act->mangle.offset > 8)
  146. + return;
  147. +
  148. + if (act->mangle.mask == 0xffff) {
  149. + src += 2;
  150. + dest += 2;
  151. + }
  152. +
  153. + memcpy(dest, src, act->mangle.mask ? 2 : 4);
  154. +}
  155. +
  156. +
  157. +static int
  158. +mtk_flow_mangle_ports(const struct flow_action_entry *act,
  159. + struct mtk_flow_data *data)
  160. +{
  161. + u32 val = ntohl(act->mangle.val);
  162. +
  163. + switch (act->mangle.offset) {
  164. + case 0:
  165. + if (act->mangle.mask == ~htonl(0xffff))
  166. + data->dst_port = cpu_to_be16(val);
  167. + else
  168. + data->src_port = cpu_to_be16(val >> 16);
  169. + break;
  170. + case 2:
  171. + data->dst_port = cpu_to_be16(val);
  172. + break;
  173. + default:
  174. + return -EINVAL;
  175. + }
  176. +
  177. + return 0;
  178. +}
  179. +
  180. +static int
  181. +mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
  182. + struct mtk_flow_data *data)
  183. +{
  184. + __be32 *dest;
  185. +
  186. + switch (act->mangle.offset) {
  187. + case offsetof(struct iphdr, saddr):
  188. + dest = &data->v4.src_addr;
  189. + break;
  190. + case offsetof(struct iphdr, daddr):
  191. + dest = &data->v4.dst_addr;
  192. + break;
  193. + default:
  194. + return -EINVAL;
  195. + }
  196. +
  197. + memcpy(dest, &act->mangle.val, sizeof(u32));
  198. +
  199. + return 0;
  200. +}
  201. +
  202. +static int
  203. +mtk_flow_get_dsa_port(struct net_device **dev)
  204. +{
  205. +#if IS_ENABLED(CONFIG_NET_DSA)
  206. + struct dsa_port *dp;
  207. +
  208. + dp = dsa_port_from_netdev(*dev);
  209. + if (IS_ERR(dp))
  210. + return -ENODEV;
  211. +
  212. + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
  213. + return -ENODEV;
  214. +
  215. + *dev = dp->cpu_dp->master;
  216. +
  217. + return dp->index;
  218. +#else
  219. + return -ENODEV;
  220. +#endif
  221. +}
  222. +
  223. +static int
  224. +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
  225. + struct net_device *dev)
  226. +{
  227. + int pse_port, dsa_port;
  228. +
  229. + dsa_port = mtk_flow_get_dsa_port(&dev);
  230. + if (dsa_port >= 0)
  231. + mtk_foe_entry_set_dsa(foe, dsa_port);
  232. +
  233. + if (dev == eth->netdev[0])
  234. + pse_port = 1;
  235. + else if (dev == eth->netdev[1])
  236. + pse_port = 2;
  237. + else
  238. + return -EOPNOTSUPP;
  239. +
  240. + mtk_foe_entry_set_pse_port(foe, pse_port);
  241. +
  242. + return 0;
  243. +}
  244. +
  245. +static int
  246. +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
  247. +{
  248. + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
  249. + struct flow_action_entry *act;
  250. + struct mtk_flow_data data = {};
  251. + struct mtk_foe_entry foe;
  252. + struct net_device *odev = NULL;
  253. + struct mtk_flow_entry *entry;
  254. + int offload_type = 0;
  255. + u16 addr_type = 0;
  256. + u32 timestamp;
  257. + u8 l4proto = 0;
  258. + int err = 0;
  259. + int hash;
  260. + int i;
  261. +
  262. + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
  263. + struct flow_match_meta match;
  264. +
  265. + flow_rule_match_meta(rule, &match);
  266. + } else {
  267. + return -EOPNOTSUPP;
  268. + }
  269. +
  270. + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
  271. + struct flow_match_control match;
  272. +
  273. + flow_rule_match_control(rule, &match);
  274. + addr_type = match.key->addr_type;
  275. + } else {
  276. + return -EOPNOTSUPP;
  277. + }
  278. +
  279. + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
  280. + struct flow_match_basic match;
  281. +
  282. + flow_rule_match_basic(rule, &match);
  283. + l4proto = match.key->ip_proto;
  284. + } else {
  285. + return -EOPNOTSUPP;
  286. + }
  287. +
  288. + flow_action_for_each(i, act, &rule->action) {
  289. + switch (act->id) {
  290. + case FLOW_ACTION_MANGLE:
  291. + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
  292. + mtk_flow_offload_mangle_eth(act, &data.eth);
  293. + break;
  294. + case FLOW_ACTION_REDIRECT:
  295. + odev = act->dev;
  296. + break;
  297. + case FLOW_ACTION_CSUM:
  298. + break;
  299. + case FLOW_ACTION_VLAN_PUSH:
  300. + if (data.vlan.num == 1 ||
  301. + act->vlan.proto != htons(ETH_P_8021Q))
  302. + return -EOPNOTSUPP;
  303. +
  304. + data.vlan.id = act->vlan.vid;
  305. + data.vlan.proto = act->vlan.proto;
  306. + data.vlan.num++;
  307. + break;
  308. + case FLOW_ACTION_PPPOE_PUSH:
  309. + if (data.pppoe.num == 1)
  310. + return -EOPNOTSUPP;
  311. +
  312. + data.pppoe.sid = act->pppoe.sid;
  313. + data.pppoe.num++;
  314. + break;
  315. + default:
  316. + return -EOPNOTSUPP;
  317. + }
  318. + }
  319. +
  320. + switch (addr_type) {
  321. + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  322. + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
  323. + break;
  324. + default:
  325. + return -EOPNOTSUPP;
  326. + }
  327. +
  328. + if (!is_valid_ether_addr(data.eth.h_source) ||
  329. + !is_valid_ether_addr(data.eth.h_dest))
  330. + return -EINVAL;
  331. +
  332. + err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
  333. + data.eth.h_source,
  334. + data.eth.h_dest);
  335. + if (err)
  336. + return err;
  337. +
  338. + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
  339. + struct flow_match_ports ports;
  340. +
  341. + flow_rule_match_ports(rule, &ports);
  342. + data.src_port = ports.key->src;
  343. + data.dst_port = ports.key->dst;
  344. + } else {
  345. + return -EOPNOTSUPP;
  346. + }
  347. +
  348. + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  349. + struct flow_match_ipv4_addrs addrs;
  350. +
  351. + flow_rule_match_ipv4_addrs(rule, &addrs);
  352. +
  353. + data.v4.src_addr = addrs.key->src;
  354. + data.v4.dst_addr = addrs.key->dst;
  355. +
  356. + mtk_flow_set_ipv4_addr(&foe, &data, false);
  357. + }
  358. +
  359. + flow_action_for_each(i, act, &rule->action) {
  360. + if (act->id != FLOW_ACTION_MANGLE)
  361. + continue;
  362. +
  363. + switch (act->mangle.htype) {
  364. + case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
  365. + case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
  366. + err = mtk_flow_mangle_ports(act, &data);
  367. + break;
  368. + case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
  369. + err = mtk_flow_mangle_ipv4(act, &data);
  370. + break;
  371. + case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
  372. + /* handled earlier */
  373. + break;
  374. + default:
  375. + return -EOPNOTSUPP;
  376. + }
  377. +
  378. + if (err)
  379. + return err;
  380. + }
  381. +
  382. + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  383. + err = mtk_flow_set_ipv4_addr(&foe, &data, true);
  384. + if (err)
  385. + return err;
  386. + }
  387. +
  388. + if (data.vlan.num == 1) {
  389. + if (data.vlan.proto != htons(ETH_P_8021Q))
  390. + return -EOPNOTSUPP;
  391. +
  392. + mtk_foe_entry_set_vlan(&foe, data.vlan.id);
  393. + }
  394. + if (data.pppoe.num == 1)
  395. + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
  396. +
  397. + err = mtk_flow_set_output_device(eth, &foe, odev);
  398. + if (err)
  399. + return err;
  400. +
  401. + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  402. + if (!entry)
  403. + return -ENOMEM;
  404. +
  405. + entry->cookie = f->cookie;
  406. + timestamp = mtk_eth_timestamp(eth);
  407. + hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
  408. + if (hash < 0) {
  409. + err = hash;
  410. + goto free;
  411. + }
  412. +
  413. + entry->hash = hash;
  414. + err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
  415. + mtk_flow_ht_params);
  416. + if (err < 0)
  417. + goto clear_flow;
  418. +
  419. + return 0;
  420. +clear_flow:
  421. + mtk_foe_entry_clear(&eth->ppe, hash);
  422. +free:
  423. + kfree(entry);
  424. + return err;
  425. +}
  426. +
  427. +static int
  428. +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
  429. +{
  430. + struct mtk_flow_entry *entry;
  431. +
  432. + entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
  433. + mtk_flow_ht_params);
  434. + if (!entry)
  435. + return -ENOENT;
  436. +
  437. + mtk_foe_entry_clear(&eth->ppe, entry->hash);
  438. + rhashtable_remove_fast(&eth->flow_table, &entry->node,
  439. + mtk_flow_ht_params);
  440. + kfree(entry);
  441. +
  442. + return 0;
  443. +}
  444. +
  445. +static int
  446. +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
  447. +{
  448. + struct mtk_flow_entry *entry;
  449. + int timestamp;
  450. + u32 idle;
  451. +
  452. + entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
  453. + mtk_flow_ht_params);
  454. + if (!entry)
  455. + return -ENOENT;
  456. +
  457. + timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
  458. + if (timestamp < 0)
  459. + return -ETIMEDOUT;
  460. +
  461. + idle = mtk_eth_timestamp(eth) - timestamp;
  462. + f->stats.lastused = jiffies - idle * HZ;
  463. +
  464. + return 0;
  465. +}
  466. +
  467. +static int
  468. +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
  469. +{
  470. + struct flow_cls_offload *cls = type_data;
  471. + struct net_device *dev = cb_priv;
  472. + struct mtk_mac *mac = netdev_priv(dev);
  473. + struct mtk_eth *eth = mac->hw;
  474. +
  475. + if (!tc_can_offload(dev))
  476. + return -EOPNOTSUPP;
  477. +
  478. + if (type != TC_SETUP_CLSFLOWER)
  479. + return -EOPNOTSUPP;
  480. +
  481. + switch (cls->command) {
  482. + case FLOW_CLS_REPLACE:
  483. + return mtk_flow_offload_replace(eth, cls);
  484. + case FLOW_CLS_DESTROY:
  485. + return mtk_flow_offload_destroy(eth, cls);
  486. + case FLOW_CLS_STATS:
  487. + return mtk_flow_offload_stats(eth, cls);
  488. + default:
  489. + return -EOPNOTSUPP;
  490. + }
  491. +
  492. + return 0;
  493. +}
  494. +
  495. +static int
  496. +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
  497. +{
  498. + struct mtk_mac *mac = netdev_priv(dev);
  499. + struct mtk_eth *eth = mac->hw;
  500. + static LIST_HEAD(block_cb_list);
  501. + struct flow_block_cb *block_cb;
  502. + flow_setup_cb_t *cb;
  503. +
  504. + if (!eth->ppe.foe_table)
  505. + return -EOPNOTSUPP;
  506. +
  507. + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  508. + return -EOPNOTSUPP;
  509. +
  510. + cb = mtk_eth_setup_tc_block_cb;
  511. + f->driver_block_list = &block_cb_list;
  512. +
  513. + switch (f->command) {
  514. + case FLOW_BLOCK_BIND:
  515. + block_cb = flow_block_cb_lookup(f->block, cb, dev);
  516. + if (block_cb) {
  517. + flow_block_cb_incref(block_cb);
  518. + return 0;
  519. + }
  520. + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
  521. + if (IS_ERR(block_cb))
  522. + return PTR_ERR(block_cb);
  523. +
  524. + flow_block_cb_add(block_cb, f);
  525. + list_add_tail(&block_cb->driver_list, &block_cb_list);
  526. + return 0;
  527. + case FLOW_BLOCK_UNBIND:
  528. + block_cb = flow_block_cb_lookup(f->block, cb, dev);
  529. + if (!block_cb)
  530. + return -ENOENT;
  531. +
  532. + if (flow_block_cb_decref(block_cb)) {
  533. + flow_block_cb_remove(block_cb, f);
  534. + list_del(&block_cb->driver_list);
  535. + }
  536. + return 0;
  537. + default:
  538. + return -EOPNOTSUPP;
  539. + }
  540. +}
  541. +
  542. +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
  543. + void *type_data)
  544. +{
  545. + if (type == TC_SETUP_FT)
  546. + return mtk_eth_setup_tc_block(dev, type_data);
  547. +
  548. + return -EOPNOTSUPP;
  549. +}
  550. +
  551. +int mtk_eth_offload_init(struct mtk_eth *eth)
  552. +{
  553. + if (!eth->ppe.foe_table)
  554. + return 0;
  555. +
  556. + return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
  557. +}