| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568 |
- From: Felix Fietkau <[email protected]>
- Date: Wed, 24 Mar 2021 02:30:54 +0100
- Subject: [PATCH] net: ethernet: mtk_eth_soc: add flow offloading support
- This adds support for offloading IPv4 routed flows, including SNAT/DNAT,
- one VLAN, PPPoE and DSA.
- Signed-off-by: Felix Fietkau <[email protected]>
- Signed-off-by: Pablo Neira Ayuso <[email protected]>
- ---
- create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
- --- a/drivers/net/ethernet/mediatek/Makefile
- +++ b/drivers/net/ethernet/mediatek/Makefile
- @@ -4,5 +4,5 @@
- #
-
- obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
- -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
- +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
- obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- @@ -2858,6 +2858,7 @@ static const struct net_device_ops mtk_n
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mtk_poll_controller,
- #endif
- + .ndo_setup_tc = mtk_eth_setup_tc,
- };
-
- static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
- @@ -3116,6 +3117,10 @@ static int mtk_probe(struct platform_dev
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
- goto err_free_dev;
- +
- + err = mtk_eth_offload_init(eth);
- + if (err)
- + goto err_free_dev;
- }
-
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- @@ -15,6 +15,7 @@
- #include <linux/u64_stats_sync.h>
- #include <linux/refcount.h>
- #include <linux/phylink.h>
- +#include <linux/rhashtable.h>
- #include "mtk_ppe.h"
-
- #define MTK_QDMA_PAGE_SIZE 2048
- @@ -40,7 +41,8 @@
- NETIF_F_HW_VLAN_CTAG_RX | \
- NETIF_F_SG | NETIF_F_TSO | \
- NETIF_F_TSO6 | \
- - NETIF_F_IPV6_CSUM)
- + NETIF_F_IPV6_CSUM |\
- + NETIF_F_HW_TC)
- #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
- #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
-
- @@ -929,6 +931,7 @@ struct mtk_eth {
- int ip_align;
-
- struct mtk_ppe ppe;
- + struct rhashtable flow_table;
- };
-
- /* struct mtk_mac - the structure that holds the info about the MACs of the
- @@ -973,4 +976,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk
- int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
- int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
-
- +int mtk_eth_offload_init(struct mtk_eth *eth);
- +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
- + void *type_data);
- +
- +
- #endif /* MTK_ETH_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
- @@ -0,0 +1,485 @@
- +// SPDX-License-Identifier: GPL-2.0-only
- +/*
- + * Copyright (C) 2020 Felix Fietkau <[email protected]>
- + */
- +
- +#include <linux/if_ether.h>
- +#include <linux/rhashtable.h>
- +#include <linux/if_ether.h>
- +#include <linux/ip.h>
- +#include <net/flow_offload.h>
- +#include <net/pkt_cls.h>
- +#include <net/dsa.h>
- +#include "mtk_eth_soc.h"
- +
- +struct mtk_flow_data {
- + struct ethhdr eth;
- +
- + union {
- + struct {
- + __be32 src_addr;
- + __be32 dst_addr;
- + } v4;
- + };
- +
- + __be16 src_port;
- + __be16 dst_port;
- +
- + struct {
- + u16 id;
- + __be16 proto;
- + u8 num;
- + } vlan;
- + struct {
- + u16 sid;
- + u8 num;
- + } pppoe;
- +};
- +
- +struct mtk_flow_entry {
- + struct rhash_head node;
- + unsigned long cookie;
- + u16 hash;
- +};
- +
- +static const struct rhashtable_params mtk_flow_ht_params = {
- + .head_offset = offsetof(struct mtk_flow_entry, node),
- + .head_offset = offsetof(struct mtk_flow_entry, cookie),
- + .key_len = sizeof(unsigned long),
- + .automatic_shrinking = true,
- +};
- +
- +static u32
- +mtk_eth_timestamp(struct mtk_eth *eth)
- +{
- + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
- +}
- +
- +static int
- +mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
- + bool egress)
- +{
- + return mtk_foe_entry_set_ipv4_tuple(foe, egress,
- + data->v4.src_addr, data->src_port,
- + data->v4.dst_addr, data->dst_port);
- +}
- +
- +static void
- +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
- +{
- + void *dest = eth + act->mangle.offset;
- + const void *src = &act->mangle.val;
- +
- + if (act->mangle.offset > 8)
- + return;
- +
- + if (act->mangle.mask == 0xffff) {
- + src += 2;
- + dest += 2;
- + }
- +
- + memcpy(dest, src, act->mangle.mask ? 2 : 4);
- +}
- +
- +
- +static int
- +mtk_flow_mangle_ports(const struct flow_action_entry *act,
- + struct mtk_flow_data *data)
- +{
- + u32 val = ntohl(act->mangle.val);
- +
- + switch (act->mangle.offset) {
- + case 0:
- + if (act->mangle.mask == ~htonl(0xffff))
- + data->dst_port = cpu_to_be16(val);
- + else
- + data->src_port = cpu_to_be16(val >> 16);
- + break;
- + case 2:
- + data->dst_port = cpu_to_be16(val);
- + break;
- + default:
- + return -EINVAL;
- + }
- +
- + return 0;
- +}
- +
- +static int
- +mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
- + struct mtk_flow_data *data)
- +{
- + __be32 *dest;
- +
- + switch (act->mangle.offset) {
- + case offsetof(struct iphdr, saddr):
- + dest = &data->v4.src_addr;
- + break;
- + case offsetof(struct iphdr, daddr):
- + dest = &data->v4.dst_addr;
- + break;
- + default:
- + return -EINVAL;
- + }
- +
- + memcpy(dest, &act->mangle.val, sizeof(u32));
- +
- + return 0;
- +}
- +
- +static int
- +mtk_flow_get_dsa_port(struct net_device **dev)
- +{
- +#if IS_ENABLED(CONFIG_NET_DSA)
- + struct dsa_port *dp;
- +
- + dp = dsa_port_from_netdev(*dev);
- + if (IS_ERR(dp))
- + return -ENODEV;
- +
- + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
- + return -ENODEV;
- +
- + *dev = dp->cpu_dp->master;
- +
- + return dp->index;
- +#else
- + return -ENODEV;
- +#endif
- +}
- +
- +static int
- +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- + struct net_device *dev)
- +{
- + int pse_port, dsa_port;
- +
- + dsa_port = mtk_flow_get_dsa_port(&dev);
- + if (dsa_port >= 0)
- + mtk_foe_entry_set_dsa(foe, dsa_port);
- +
- + if (dev == eth->netdev[0])
- + pse_port = 1;
- + else if (dev == eth->netdev[1])
- + pse_port = 2;
- + else
- + return -EOPNOTSUPP;
- +
- + mtk_foe_entry_set_pse_port(foe, pse_port);
- +
- + return 0;
- +}
- +
- +static int
- +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
- +{
- + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- + struct flow_action_entry *act;
- + struct mtk_flow_data data = {};
- + struct mtk_foe_entry foe;
- + struct net_device *odev = NULL;
- + struct mtk_flow_entry *entry;
- + int offload_type = 0;
- + u16 addr_type = 0;
- + u32 timestamp;
- + u8 l4proto = 0;
- + int err = 0;
- + int hash;
- + int i;
- +
- + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
- + struct flow_match_meta match;
- +
- + flow_rule_match_meta(rule, &match);
- + } else {
- + return -EOPNOTSUPP;
- + }
- +
- + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- + struct flow_match_control match;
- +
- + flow_rule_match_control(rule, &match);
- + addr_type = match.key->addr_type;
- + } else {
- + return -EOPNOTSUPP;
- + }
- +
- + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
- + struct flow_match_basic match;
- +
- + flow_rule_match_basic(rule, &match);
- + l4proto = match.key->ip_proto;
- + } else {
- + return -EOPNOTSUPP;
- + }
- +
- + flow_action_for_each(i, act, &rule->action) {
- + switch (act->id) {
- + case FLOW_ACTION_MANGLE:
- + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
- + mtk_flow_offload_mangle_eth(act, &data.eth);
- + break;
- + case FLOW_ACTION_REDIRECT:
- + odev = act->dev;
- + break;
- + case FLOW_ACTION_CSUM:
- + break;
- + case FLOW_ACTION_VLAN_PUSH:
- + if (data.vlan.num == 1 ||
- + act->vlan.proto != htons(ETH_P_8021Q))
- + return -EOPNOTSUPP;
- +
- + data.vlan.id = act->vlan.vid;
- + data.vlan.proto = act->vlan.proto;
- + data.vlan.num++;
- + break;
- + case FLOW_ACTION_PPPOE_PUSH:
- + if (data.pppoe.num == 1)
- + return -EOPNOTSUPP;
- +
- + data.pppoe.sid = act->pppoe.sid;
- + data.pppoe.num++;
- + break;
- + default:
- + return -EOPNOTSUPP;
- + }
- + }
- +
- + switch (addr_type) {
- + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
- + break;
- + default:
- + return -EOPNOTSUPP;
- + }
- +
- + if (!is_valid_ether_addr(data.eth.h_source) ||
- + !is_valid_ether_addr(data.eth.h_dest))
- + return -EINVAL;
- +
- + err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
- + data.eth.h_source,
- + data.eth.h_dest);
- + if (err)
- + return err;
- +
- + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
- + struct flow_match_ports ports;
- +
- + flow_rule_match_ports(rule, &ports);
- + data.src_port = ports.key->src;
- + data.dst_port = ports.key->dst;
- + } else {
- + return -EOPNOTSUPP;
- + }
- +
- + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- + struct flow_match_ipv4_addrs addrs;
- +
- + flow_rule_match_ipv4_addrs(rule, &addrs);
- +
- + data.v4.src_addr = addrs.key->src;
- + data.v4.dst_addr = addrs.key->dst;
- +
- + mtk_flow_set_ipv4_addr(&foe, &data, false);
- + }
- +
- + flow_action_for_each(i, act, &rule->action) {
- + if (act->id != FLOW_ACTION_MANGLE)
- + continue;
- +
- + switch (act->mangle.htype) {
- + case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
- + case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- + err = mtk_flow_mangle_ports(act, &data);
- + break;
- + case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
- + err = mtk_flow_mangle_ipv4(act, &data);
- + break;
- + case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
- + /* handled earlier */
- + break;
- + default:
- + return -EOPNOTSUPP;
- + }
- +
- + if (err)
- + return err;
- + }
- +
- + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- + err = mtk_flow_set_ipv4_addr(&foe, &data, true);
- + if (err)
- + return err;
- + }
- +
- + if (data.vlan.num == 1) {
- + if (data.vlan.proto != htons(ETH_P_8021Q))
- + return -EOPNOTSUPP;
- +
- + mtk_foe_entry_set_vlan(&foe, data.vlan.id);
- + }
- + if (data.pppoe.num == 1)
- + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
- +
- + err = mtk_flow_set_output_device(eth, &foe, odev);
- + if (err)
- + return err;
- +
- + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- + if (!entry)
- + return -ENOMEM;
- +
- + entry->cookie = f->cookie;
- + timestamp = mtk_eth_timestamp(eth);
- + hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp);
- + if (hash < 0) {
- + err = hash;
- + goto free;
- + }
- +
- + entry->hash = hash;
- + err = rhashtable_insert_fast(ð->flow_table, &entry->node,
- + mtk_flow_ht_params);
- + if (err < 0)
- + goto clear_flow;
- +
- + return 0;
- +clear_flow:
- + mtk_foe_entry_clear(ð->ppe, hash);
- +free:
- + kfree(entry);
- + return err;
- +}
- +
- +static int
- +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
- +{
- + struct mtk_flow_entry *entry;
- +
- + entry = rhashtable_lookup(ð->flow_table, &f->cookie,
- + mtk_flow_ht_params);
- + if (!entry)
- + return -ENOENT;
- +
- + mtk_foe_entry_clear(ð->ppe, entry->hash);
- + rhashtable_remove_fast(ð->flow_table, &entry->node,
- + mtk_flow_ht_params);
- + kfree(entry);
- +
- + return 0;
- +}
- +
- +static int
- +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
- +{
- + struct mtk_flow_entry *entry;
- + int timestamp;
- + u32 idle;
- +
- + entry = rhashtable_lookup(ð->flow_table, &f->cookie,
- + mtk_flow_ht_params);
- + if (!entry)
- + return -ENOENT;
- +
- + timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash);
- + if (timestamp < 0)
- + return -ETIMEDOUT;
- +
- + idle = mtk_eth_timestamp(eth) - timestamp;
- + f->stats.lastused = jiffies - idle * HZ;
- +
- + return 0;
- +}
- +
- +static int
- +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
- +{
- + struct flow_cls_offload *cls = type_data;
- + struct net_device *dev = cb_priv;
- + struct mtk_mac *mac = netdev_priv(dev);
- + struct mtk_eth *eth = mac->hw;
- +
- + if (!tc_can_offload(dev))
- + return -EOPNOTSUPP;
- +
- + if (type != TC_SETUP_CLSFLOWER)
- + return -EOPNOTSUPP;
- +
- + switch (cls->command) {
- + case FLOW_CLS_REPLACE:
- + return mtk_flow_offload_replace(eth, cls);
- + case FLOW_CLS_DESTROY:
- + return mtk_flow_offload_destroy(eth, cls);
- + case FLOW_CLS_STATS:
- + return mtk_flow_offload_stats(eth, cls);
- + default:
- + return -EOPNOTSUPP;
- + }
- +
- + return 0;
- +}
- +
- +static int
- +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
- +{
- + struct mtk_mac *mac = netdev_priv(dev);
- + struct mtk_eth *eth = mac->hw;
- + static LIST_HEAD(block_cb_list);
- + struct flow_block_cb *block_cb;
- + flow_setup_cb_t *cb;
- +
- + if (!eth->ppe.foe_table)
- + return -EOPNOTSUPP;
- +
- + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- + return -EOPNOTSUPP;
- +
- + cb = mtk_eth_setup_tc_block_cb;
- + f->driver_block_list = &block_cb_list;
- +
- + switch (f->command) {
- + case FLOW_BLOCK_BIND:
- + block_cb = flow_block_cb_lookup(f->block, cb, dev);
- + if (block_cb) {
- + flow_block_cb_incref(block_cb);
- + return 0;
- + }
- + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
- + if (IS_ERR(block_cb))
- + return PTR_ERR(block_cb);
- +
- + flow_block_cb_add(block_cb, f);
- + list_add_tail(&block_cb->driver_list, &block_cb_list);
- + return 0;
- + case FLOW_BLOCK_UNBIND:
- + block_cb = flow_block_cb_lookup(f->block, cb, dev);
- + if (!block_cb)
- + return -ENOENT;
- +
- + if (flow_block_cb_decref(block_cb)) {
- + flow_block_cb_remove(block_cb, f);
- + list_del(&block_cb->driver_list);
- + }
- + return 0;
- + default:
- + return -EOPNOTSUPP;
- + }
- +}
- +
- +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
- + void *type_data)
- +{
- + if (type == TC_SETUP_FT)
- + return mtk_eth_setup_tc_block(dev, type_data);
- +
- + return -EOPNOTSUPP;
- +}
- +
- +int mtk_eth_offload_init(struct mtk_eth *eth)
- +{
- + if (!eth->ppe.foe_table)
- + return 0;
- +
- + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
- +}
|