600-bridge_offload.patch 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. --- a/include/linux/if_bridge.h
  2. +++ b/include/linux/if_bridge.h
  3. @@ -59,6 +59,7 @@ struct br_ip_list {
  4. #define BR_MRP_LOST_IN_CONT BIT(19)
  5. #define BR_TX_FWD_OFFLOAD BIT(20)
  6. #define BR_BPDU_FILTER BIT(21)
  7. +#define BR_OFFLOAD BIT(22)
  8. #define BR_DEFAULT_AGEING_TIME (300 * HZ)
  9. --- a/net/bridge/Makefile
  10. +++ b/net/bridge/Makefile
  11. @@ -5,7 +5,7 @@
  12. obj-$(CONFIG_BRIDGE) += bridge.o
  13. -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
  14. +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
  15. br_ioctl.o br_stp.o br_stp_bpdu.o \
  16. br_stp_if.o br_stp_timer.o br_netlink.o \
  17. br_netlink_tunnel.o br_arp_nd_proxy.o
  18. --- a/net/bridge/br.c
  19. +++ b/net/bridge/br.c
  20. @@ -18,6 +18,7 @@
  21. #include <net/switchdev.h>
  22. #include "br_private.h"
  23. +#include "br_private_offload.h"
  24. /*
  25. * Handle changes in state of network devices enslaved to a bridge.
  26. @@ -381,6 +382,10 @@ static int __init br_init(void)
  27. if (err)
  28. goto err_out;
  29. + err = br_offload_init();
  30. + if (err)
  31. + goto err_out0;
  32. +
  33. err = register_pernet_subsys(&br_net_ops);
  34. if (err)
  35. goto err_out1;
  36. @@ -430,6 +435,8 @@ err_out3:
  37. err_out2:
  38. unregister_pernet_subsys(&br_net_ops);
  39. err_out1:
  40. + br_offload_fini();
  41. +err_out0:
  42. br_fdb_fini();
  43. err_out:
  44. stp_proto_unregister(&br_stp_proto);
  45. @@ -452,6 +459,7 @@ static void __exit br_deinit(void)
  46. #if IS_ENABLED(CONFIG_ATM_LANE)
  47. br_fdb_test_addr_hook = NULL;
  48. #endif
  49. + br_offload_fini();
  50. br_fdb_fini();
  51. }
  52. --- a/net/bridge/br_device.c
  53. +++ b/net/bridge/br_device.c
  54. @@ -524,6 +524,8 @@ void br_dev_setup(struct net_device *dev
  55. br->bridge_hello_time = br->hello_time = 2 * HZ;
  56. br->bridge_forward_delay = br->forward_delay = 15 * HZ;
  57. br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
  58. + br->offload_cache_size = 128;
  59. + br->offload_cache_reserved = 8;
  60. dev->max_mtu = ETH_MAX_MTU;
  61. br_netfilter_rtable_init(br);
  62. --- a/net/bridge/br_fdb.c
  63. +++ b/net/bridge/br_fdb.c
  64. @@ -23,6 +23,7 @@
  65. #include <net/switchdev.h>
  66. #include <trace/events/bridge.h>
  67. #include "br_private.h"
  68. +#include "br_private_offload.h"
  69. static const struct rhashtable_params br_fdb_rht_params = {
  70. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  71. @@ -518,6 +519,8 @@ static struct net_bridge_fdb_entry *fdb_
  72. fdb->key.vlan_id = vid;
  73. fdb->flags = flags;
  74. fdb->updated = fdb->used = jiffies;
  75. + INIT_HLIST_HEAD(&fdb->offload_in);
  76. + INIT_HLIST_HEAD(&fdb->offload_out);
  77. if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
  78. &fdb->rhnode,
  79. br_fdb_rht_params)) {
  80. @@ -794,6 +797,8 @@ static void fdb_notify(struct net_bridge
  81. struct sk_buff *skb;
  82. int err = -ENOBUFS;
  83. + br_offload_fdb_update(fdb);
  84. +
  85. if (swdev_notify)
  86. br_switchdev_fdb_notify(br, fdb, type);
  87. --- a/net/bridge/br_forward.c
  88. +++ b/net/bridge/br_forward.c
  89. @@ -16,6 +16,7 @@
  90. #include <linux/if_vlan.h>
  91. #include <linux/netfilter_bridge.h>
  92. #include "br_private.h"
  93. +#include "br_private_offload.h"
  94. /* Don't forward packets to originating port or forwarding disabled */
  95. static inline int should_deliver(const struct net_bridge_port *p,
  96. @@ -32,6 +33,8 @@ static inline int should_deliver(const s
  97. int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  98. {
  99. + br_offload_output(skb);
  100. +
  101. skb_push(skb, ETH_HLEN);
  102. if (!is_skb_forwardable(skb->dev, skb))
  103. goto drop;
  104. --- a/net/bridge/br_if.c
  105. +++ b/net/bridge/br_if.c
  106. @@ -25,6 +25,7 @@
  107. #include <net/net_namespace.h>
  108. #include "br_private.h"
  109. +#include "br_private_offload.h"
  110. /*
  111. * Determine initial path cost based on speed.
  112. @@ -428,7 +429,7 @@ static struct net_bridge_port *new_nbp(s
  113. p->path_cost = port_cost(dev);
  114. p->priority = 0x8000 >> BR_PORT_BITS;
  115. p->port_no = index;
  116. - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
  117. + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
  118. br_init_port(p);
  119. br_set_state(p, BR_STATE_DISABLED);
  120. br_stp_port_timer_init(p);
  121. @@ -771,6 +772,9 @@ void br_port_flags_change(struct net_bri
  122. if (mask & BR_NEIGH_SUPPRESS)
  123. br_recalculate_neigh_suppress_enabled(br);
  124. +
  125. + if (mask & BR_OFFLOAD)
  126. + br_offload_port_state(p);
  127. }
  128. bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
  129. --- a/net/bridge/br_input.c
  130. +++ b/net/bridge/br_input.c
  131. @@ -22,6 +22,7 @@
  132. #include <linux/rculist.h>
  133. #include "br_private.h"
  134. #include "br_private_tunnel.h"
  135. +#include "br_private_offload.h"
  136. static int
  137. br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
  138. @@ -164,6 +165,7 @@ int br_handle_frame_finish(struct net *n
  139. dst->used = now;
  140. br_forward(dst->dst, skb, local_rcv, false);
  141. } else {
  142. + br_offload_skb_disable(skb);
  143. if (!mcast_hit)
  144. br_flood(br, skb, pkt_type, local_rcv, false);
  145. else
  146. @@ -297,6 +299,9 @@ static rx_handler_result_t br_handle_fra
  147. memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
  148. p = br_port_get_rcu(skb->dev);
  149. + if (br_offload_input(p, skb))
  150. + return RX_HANDLER_CONSUMED;
  151. +
  152. if (p->flags & BR_VLAN_TUNNEL)
  153. br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
  154. --- /dev/null
  155. +++ b/net/bridge/br_offload.c
  156. @@ -0,0 +1,438 @@
  157. +// SPDX-License-Identifier: GPL-2.0-only
  158. +#include <linux/kernel.h>
  159. +#include <linux/workqueue.h>
  160. +#include "br_private.h"
  161. +#include "br_private_offload.h"
  162. +
  163. +static DEFINE_SPINLOCK(offload_lock);
  164. +
  165. +struct bridge_flow_key {
  166. + u8 dest[ETH_ALEN];
  167. + u8 src[ETH_ALEN];
  168. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  169. + u16 vlan_tag;
  170. + bool vlan_present;
  171. +#endif
  172. +};
  173. +
  174. +struct bridge_flow {
  175. + struct net_bridge_port *port;
  176. + struct rhash_head node;
  177. + struct bridge_flow_key key;
  178. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  179. + bool vlan_out_present;
  180. + u16 vlan_out;
  181. +#endif
  182. +
  183. + unsigned long used;
  184. + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
  185. + struct hlist_node fdb_list_in, fdb_list_out;
  186. +
  187. + struct rcu_head rcu;
  188. +};
  189. +
  190. +static const struct rhashtable_params flow_params = {
  191. + .automatic_shrinking = true,
  192. + .head_offset = offsetof(struct bridge_flow, node),
  193. + .key_len = sizeof(struct bridge_flow_key),
  194. + .key_offset = offsetof(struct bridge_flow, key),
  195. +};
  196. +
  197. +static struct kmem_cache *offload_cache __read_mostly;
  198. +
  199. +static void
  200. +flow_rcu_free(struct rcu_head *head)
  201. +{
  202. + struct bridge_flow *flow;
  203. +
  204. + flow = container_of(head, struct bridge_flow, rcu);
  205. + kmem_cache_free(offload_cache, flow);
  206. +}
  207. +
  208. +static void
  209. +__br_offload_flow_free(struct bridge_flow *flow)
  210. +{
  211. + flow->used = 0;
  212. + hlist_del(&flow->fdb_list_in);
  213. + hlist_del(&flow->fdb_list_out);
  214. +
  215. + call_rcu(&flow->rcu, flow_rcu_free);
  216. +}
  217. +
  218. +static void
  219. +br_offload_flow_free(struct bridge_flow *flow)
  220. +{
  221. + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
  222. + flow_params) != 0)
  223. + return;
  224. +
  225. + __br_offload_flow_free(flow);
  226. +}
  227. +
  228. +static bool
  229. +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
  230. + struct net_bridge_fdb_entry *fdb)
  231. +{
  232. + if (!time_after(flow->used, fdb->updated))
  233. + return false;
  234. +
  235. + fdb->updated = flow->used;
  236. +
  237. + return true;
  238. +}
  239. +
  240. +
  241. +static void
  242. +br_offload_flow_refresh_time(struct bridge_flow *flow)
  243. +{
  244. + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
  245. + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
  246. +}
  247. +
  248. +static void
  249. +br_offload_destroy_cb(void *ptr, void *arg)
  250. +{
  251. + struct bridge_flow *flow = ptr;
  252. +
  253. + __br_offload_flow_free(flow);
  254. +}
  255. +
  256. +static bool
  257. +br_offload_need_gc(struct net_bridge_port *p)
  258. +{
  259. + return (atomic_read(&p->offload.rht.nelems) +
  260. + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
  261. +}
  262. +
  263. +static void
  264. +br_offload_gc_work(struct work_struct *work)
  265. +{
  266. + struct rhashtable_iter hti;
  267. + struct net_bridge_port *p;
  268. + struct bridge_flow *gc_flow = NULL;
  269. + struct bridge_flow *flow;
  270. + unsigned long gc_used;
  271. +
  272. + p = container_of(work, struct net_bridge_port, offload.gc_work);
  273. +
  274. + if (!br_offload_need_gc(p))
  275. + return;
  276. +
  277. + rhashtable_walk_enter(&p->offload.rht, &hti);
  278. + rhashtable_walk_start(&hti);
  279. + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
  280. + unsigned long used;
  281. +
  282. + if (IS_ERR(flow))
  283. + continue;
  284. +
  285. + used = READ_ONCE(flow->used);
  286. + if (!used)
  287. + continue;
  288. +
  289. + if (gc_flow && !time_before(used, gc_used))
  290. + continue;
  291. +
  292. + gc_flow = flow;
  293. + gc_used = used;
  294. + }
  295. + rhashtable_walk_stop(&hti);
  296. + rhashtable_walk_exit(&hti);
  297. +
  298. + if (!gc_flow)
  299. + return;
  300. +
  301. + spin_lock_bh(&offload_lock);
  302. + if (br_offload_need_gc(p) && gc_flow &&
  303. + gc_flow->used == gc_used)
  304. + br_offload_flow_free(gc_flow);
  305. + if (p->offload.enabled && br_offload_need_gc(p))
  306. + queue_work(system_long_wq, work);
  307. + spin_unlock_bh(&offload_lock);
  308. +
  309. +}
  310. +
  311. +void br_offload_port_state(struct net_bridge_port *p)
  312. +{
  313. + struct net_bridge_port_offload *o = &p->offload;
  314. + bool enabled = true;
  315. + bool flush = false;
  316. +
  317. + if (p->state != BR_STATE_FORWARDING ||
  318. + !(p->flags & BR_OFFLOAD))
  319. + enabled = false;
  320. +
  321. + spin_lock_bh(&offload_lock);
  322. + if (o->enabled == enabled)
  323. + goto out;
  324. +
  325. + if (enabled) {
  326. + if (!o->gc_work.func)
  327. + INIT_WORK(&o->gc_work, br_offload_gc_work);
  328. + rhashtable_init(&o->rht, &flow_params);
  329. + } else {
  330. + flush = true;
  331. + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
  332. + }
  333. +
  334. + o->enabled = enabled;
  335. +
  336. +out:
  337. + spin_unlock_bh(&offload_lock);
  338. +
  339. + if (flush)
  340. + flush_work(&o->gc_work);
  341. +}
  342. +
  343. +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
  344. +{
  345. + struct bridge_flow *f;
  346. + struct hlist_node *tmp;
  347. +
  348. + spin_lock_bh(&offload_lock);
  349. +
  350. + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
  351. + br_offload_flow_free(f);
  352. +
  353. + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
  354. + br_offload_flow_free(f);
  355. +
  356. + spin_unlock_bh(&offload_lock);
  357. +}
  358. +
  359. +static void
  360. +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
  361. + struct sk_buff *skb)
  362. +{
  363. + memset(key, 0, sizeof(*key));
  364. + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
  365. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  366. + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
  367. + return;
  368. +
  369. + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
  370. + return;
  371. +
  372. + key->vlan_present = true;
  373. + key->vlan_tag = skb_vlan_tag_get_id(skb);
  374. +#endif
  375. +}
  376. +
  377. +void br_offload_output(struct sk_buff *skb)
  378. +{
  379. + struct net_bridge_port_offload *o;
  380. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  381. + struct net_bridge_port *p, *inp;
  382. + struct net_device *dev;
  383. + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
  384. + struct net_bridge_vlan_group *vg;
  385. + struct bridge_flow_key key;
  386. + struct bridge_flow *flow;
  387. + u16 vlan;
  388. +
  389. + if (!cb->offload)
  390. + return;
  391. +
  392. + rcu_read_lock();
  393. +
  394. + p = br_port_get_rcu(skb->dev);
  395. + if (!p)
  396. + goto out;
  397. +
  398. + o = &p->offload;
  399. + if (!o->enabled)
  400. + goto out;
  401. +
  402. + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
  403. + goto out;
  404. +
  405. + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
  406. + if (!dev)
  407. + goto out;
  408. +
  409. + inp = br_port_get_rcu(dev);
  410. + if (!inp)
  411. + goto out;
  412. +
  413. + vg = nbp_vlan_group_rcu(inp);
  414. + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
  415. + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
  416. + if (!fdb_in || !fdb_in->dst)
  417. + goto out;
  418. +
  419. + vg = nbp_vlan_group_rcu(p);
  420. + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
  421. + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
  422. + if (!fdb_out || !fdb_out->dst)
  423. + goto out;
  424. +
  425. + br_offload_prepare_key(p, &key, skb);
  426. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  427. + key.vlan_present = cb->input_vlan_present;
  428. + key.vlan_tag = cb->input_vlan_tag;
  429. +#endif
  430. +
  431. + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
  432. + flow->port = inp;
  433. + memcpy(&flow->key, &key, sizeof(key));
  434. +
  435. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  436. + flow->vlan_out_present = skb_vlan_tag_present(skb);
  437. + flow->vlan_out = skb_vlan_tag_get(skb);
  438. +#endif
  439. +
  440. + flow->fdb_in = fdb_in;
  441. + flow->fdb_out = fdb_out;
  442. + flow->used = jiffies;
  443. +
  444. + spin_lock_bh(&offload_lock);
  445. + if (!o->enabled ||
  446. + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
  447. + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
  448. + kmem_cache_free(offload_cache, flow);
  449. + goto out_unlock;
  450. + }
  451. +
  452. + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
  453. + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
  454. +
  455. + if (br_offload_need_gc(p))
  456. + queue_work(system_long_wq, &p->offload.gc_work);
  457. +
  458. +out_unlock:
  459. + spin_unlock_bh(&offload_lock);
  460. +
  461. +out:
  462. + rcu_read_unlock();
  463. +}
  464. +
  465. +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
  466. +{
  467. + struct net_bridge_port_offload *o = &p->offload;
  468. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  469. + struct bridge_flow_key key;
  470. + struct net_bridge_port *dst;
  471. + struct bridge_flow *flow;
  472. + unsigned long now = jiffies;
  473. + bool ret = false;
  474. +
  475. + if (skb->len < sizeof(key))
  476. + return false;
  477. +
  478. + if (!o->enabled)
  479. + return false;
  480. +
  481. + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
  482. + return false;
  483. +
  484. + br_offload_prepare_key(p, &key, skb);
  485. +
  486. + rcu_read_lock();
  487. + flow = rhashtable_lookup(&o->rht, &key, flow_params);
  488. + if (!flow) {
  489. + cb->offload = 1;
  490. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  491. + cb->input_vlan_present = key.vlan_present != 0;
  492. + cb->input_vlan_tag = key.vlan_tag;
  493. +#endif
  494. + cb->input_ifindex = p->dev->ifindex;
  495. + goto out;
  496. + }
  497. +
  498. + if (flow->fdb_in->dst != p)
  499. + goto out;
  500. +
  501. + dst = flow->fdb_out->dst;
  502. + if (!dst)
  503. + goto out;
  504. +
  505. + ret = true;
  506. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  507. + if (!flow->vlan_out_present && key.vlan_present) {
  508. + __vlan_hwaccel_clear_tag(skb);
  509. + } else if (flow->vlan_out_present) {
  510. + if (skb_vlan_tag_present(skb) &&
  511. + skb->vlan_proto != p->br->vlan_proto) {
  512. + /* Protocol-mismatch, empty out vlan_tci for new tag */
  513. + skb_push(skb, ETH_HLEN);
  514. + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  515. + skb_vlan_tag_get(skb));
  516. + if (unlikely(!skb))
  517. + goto out;
  518. +
  519. + skb_pull(skb, ETH_HLEN);
  520. + skb_reset_mac_len(skb);
  521. + }
  522. +
  523. + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
  524. + flow->vlan_out);
  525. + }
  526. +#endif
  527. +
  528. + skb->dev = dst->dev;
  529. + skb_push(skb, ETH_HLEN);
  530. +
  531. + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
  532. + kfree_skb(skb);
  533. + goto out;
  534. + }
  535. +
  536. + if (now - flow->used >= HZ) {
  537. + flow->used = now;
  538. + br_offload_flow_refresh_time(flow);
  539. + }
  540. +
  541. + skb_forward_csum(skb);
  542. + dev_queue_xmit(skb);
  543. +
  544. +out:
  545. + rcu_read_unlock();
  546. + return ret;
  547. +}
  548. +
  549. +static void
  550. +br_offload_check_gc(struct net_bridge *br)
  551. +{
  552. + struct net_bridge_port *p;
  553. +
  554. + spin_lock_bh(&br->lock);
  555. + list_for_each_entry(p, &br->port_list, list)
  556. + if (br_offload_need_gc(p))
  557. + queue_work(system_long_wq, &p->offload.gc_work);
  558. + spin_unlock_bh(&br->lock);
  559. +}
  560. +
  561. +
  562. +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
  563. + struct netlink_ext_ack *extack)
  564. +{
  565. + br->offload_cache_size = val;
  566. + br_offload_check_gc(br);
  567. +
  568. + return 0;
  569. +}
  570. +
  571. +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
  572. + struct netlink_ext_ack *extack)
  573. +{
  574. + br->offload_cache_reserved = val;
  575. + br_offload_check_gc(br);
  576. +
  577. + return 0;
  578. +}
  579. +
  580. +int __init br_offload_init(void)
  581. +{
  582. + offload_cache = kmem_cache_create("bridge_offload_cache",
  583. + sizeof(struct bridge_flow),
  584. + 0, SLAB_HWCACHE_ALIGN, NULL);
  585. + if (!offload_cache)
  586. + return -ENOMEM;
  587. +
  588. + return 0;
  589. +}
  590. +
  591. +void br_offload_fini(void)
  592. +{
  593. + kmem_cache_destroy(offload_cache);
  594. +}
  595. --- a/net/bridge/br_private.h
  596. +++ b/net/bridge/br_private.h
  597. @@ -268,7 +268,13 @@ struct net_bridge_fdb_entry {
  598. unsigned long updated ____cacheline_aligned_in_smp;
  599. unsigned long used;
  600. - struct rcu_head rcu;
  601. + union {
  602. + struct {
  603. + struct hlist_head offload_in;
  604. + struct hlist_head offload_out;
  605. + };
  606. + struct rcu_head rcu;
  607. + };
  608. };
  609. #define MDB_PG_FLAGS_PERMANENT BIT(0)
  610. @@ -343,6 +349,12 @@ struct net_bridge_mdb_entry {
  611. struct rcu_head rcu;
  612. };
  613. +struct net_bridge_port_offload {
  614. + struct rhashtable rht;
  615. + struct work_struct gc_work;
  616. + bool enabled;
  617. +};
  618. +
  619. struct net_bridge_port {
  620. struct net_bridge *br;
  621. struct net_device *dev;
  622. @@ -403,6 +415,7 @@ struct net_bridge_port {
  623. u16 backup_redirected_cnt;
  624. struct bridge_stp_xstats stp_xstats;
  625. + struct net_bridge_port_offload offload;
  626. };
  627. #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
  628. @@ -519,6 +532,9 @@ struct net_bridge {
  629. struct kobject *ifobj;
  630. u32 auto_cnt;
  631. + u32 offload_cache_size;
  632. + u32 offload_cache_reserved;
  633. +
  634. #ifdef CONFIG_NET_SWITCHDEV
  635. /* Counter used to make sure that hardware domains get unique
  636. * identifiers in case a bridge spans multiple switchdev instances.
  637. @@ -553,6 +569,10 @@ struct br_input_skb_cb {
  638. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  639. u8 br_netfilter_broute:1;
  640. #endif
  641. + u8 offload:1;
  642. + u8 input_vlan_present:1;
  643. + u16 input_vlan_tag;
  644. + int input_ifindex;
  645. #ifdef CONFIG_NET_SWITCHDEV
  646. /* Set if TX data plane offloading is used towards at least one
  647. --- /dev/null
  648. +++ b/net/bridge/br_private_offload.h
  649. @@ -0,0 +1,23 @@
  650. +#ifndef __BR_OFFLOAD_H
  651. +#define __BR_OFFLOAD_H
  652. +
  653. +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
  654. +void br_offload_output(struct sk_buff *skb);
  655. +void br_offload_port_state(struct net_bridge_port *p);
  656. +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
  657. +int br_offload_init(void);
  658. +void br_offload_fini(void);
  659. +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
  660. + struct netlink_ext_ack *extack);
  661. +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
  662. + struct netlink_ext_ack *extack);
  663. +
  664. +static inline void br_offload_skb_disable(struct sk_buff *skb)
  665. +{
  666. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  667. +
  668. + if (cb->offload)
  669. + cb->offload = 0;
  670. +}
  671. +
  672. +#endif
  673. --- a/net/bridge/br_stp.c
  674. +++ b/net/bridge/br_stp.c
  675. @@ -12,6 +12,7 @@
  676. #include "br_private.h"
  677. #include "br_private_stp.h"
  678. +#include "br_private_offload.h"
  679. /* since time values in bpdu are in jiffies and then scaled (1/256)
  680. * before sending, make sure that is at least one STP tick.
  681. @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
  682. (unsigned int) p->port_no, p->dev->name,
  683. br_port_state_names[p->state]);
  684. + br_offload_port_state(p);
  685. +
  686. if (p->br->stp_enabled == BR_KERNEL_STP) {
  687. switch (p->state) {
  688. case BR_STATE_BLOCKING:
  689. --- a/net/bridge/br_sysfs_br.c
  690. +++ b/net/bridge/br_sysfs_br.c
  691. @@ -18,6 +18,7 @@
  692. #include <linux/sched/signal.h>
  693. #include "br_private.h"
  694. +#include "br_private_offload.h"
  695. /* IMPORTANT: new bridge options must be added with netlink support only
  696. * please do not add new sysfs entries
  697. @@ -930,6 +931,38 @@ static ssize_t vlan_stats_per_port_store
  698. static DEVICE_ATTR_RW(vlan_stats_per_port);
  699. #endif
  700. +static ssize_t offload_cache_size_show(struct device *d,
  701. + struct device_attribute *attr,
  702. + char *buf)
  703. +{
  704. + struct net_bridge *br = to_bridge(d);
  705. + return sprintf(buf, "%u\n", br->offload_cache_size);
  706. +}
  707. +
  708. +static ssize_t offload_cache_size_store(struct device *d,
  709. + struct device_attribute *attr,
  710. + const char *buf, size_t len)
  711. +{
  712. + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
  713. +}
  714. +static DEVICE_ATTR_RW(offload_cache_size);
  715. +
  716. +static ssize_t offload_cache_reserved_show(struct device *d,
  717. + struct device_attribute *attr,
  718. + char *buf)
  719. +{
  720. + struct net_bridge *br = to_bridge(d);
  721. + return sprintf(buf, "%u\n", br->offload_cache_reserved);
  722. +}
  723. +
  724. +static ssize_t offload_cache_reserved_store(struct device *d,
  725. + struct device_attribute *attr,
  726. + const char *buf, size_t len)
  727. +{
  728. + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
  729. +}
  730. +static DEVICE_ATTR_RW(offload_cache_reserved);
  731. +
  732. static struct attribute *bridge_attrs[] = {
  733. &dev_attr_forward_delay.attr,
  734. &dev_attr_hello_time.attr,
  735. @@ -984,6 +1017,8 @@ static struct attribute *bridge_attrs[]
  736. &dev_attr_vlan_stats_enabled.attr,
  737. &dev_attr_vlan_stats_per_port.attr,
  738. #endif
  739. + &dev_attr_offload_cache_size.attr,
  740. + &dev_attr_offload_cache_reserved.attr,
  741. NULL
  742. };
  743. --- a/net/bridge/br_sysfs_if.c
  744. +++ b/net/bridge/br_sysfs_if.c
  745. @@ -241,6 +241,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
  746. BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
  747. BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
  748. BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
  749. +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
  750. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  751. static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
  752. @@ -295,6 +296,7 @@ static const struct brport_attribute *br
  753. &brport_attr_isolated,
  754. &brport_attr_bpdu_filter,
  755. &brport_attr_backup_port,
  756. + &brport_attr_offload,
  757. NULL
  758. };
  759. --- a/net/bridge/br_vlan_tunnel.c
  760. +++ b/net/bridge/br_vlan_tunnel.c
  761. @@ -15,6 +15,7 @@
  762. #include "br_private.h"
  763. #include "br_private_tunnel.h"
  764. +#include "br_private_offload.h"
  765. static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
  766. const void *ptr)
  767. @@ -180,6 +181,7 @@ void br_handle_ingress_vlan_tunnel(struc
  768. skb_dst_drop(skb);
  769. __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
  770. + br_offload_skb_disable(skb);
  771. }
  772. int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
  773. @@ -201,6 +203,7 @@ int br_handle_egress_vlan_tunnel(struct
  774. if (err)
  775. return err;
  776. + br_offload_skb_disable(skb);
  777. tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
  778. if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
  779. skb_dst_set(skb, &tunnel_dst->dst);