600-bridge_offload.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. --- a/include/linux/if_bridge.h
  2. +++ b/include/linux/if_bridge.h
  3. @@ -57,6 +57,7 @@ struct br_ip_list {
  4. #define BR_MRP_LOST_CONT BIT(18)
  5. #define BR_MRP_LOST_IN_CONT BIT(19)
  6. #define BR_BPDU_FILTER BIT(20)
  7. +#define BR_OFFLOAD BIT(21)
  8. #define BR_DEFAULT_AGEING_TIME (300 * HZ)
  9. --- a/net/bridge/Makefile
  10. +++ b/net/bridge/Makefile
  11. @@ -5,7 +5,7 @@
  12. obj-$(CONFIG_BRIDGE) += bridge.o
  13. -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
  14. +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
  15. br_ioctl.o br_stp.o br_stp_bpdu.o \
  16. br_stp_if.o br_stp_timer.o br_netlink.o \
  17. br_netlink_tunnel.o br_arp_nd_proxy.o
  18. --- a/net/bridge/br.c
  19. +++ b/net/bridge/br.c
  20. @@ -18,6 +18,7 @@
  21. #include <net/switchdev.h>
  22. #include "br_private.h"
  23. +#include "br_private_offload.h"
  24. /*
  25. * Handle changes in state of network devices enslaved to a bridge.
  26. @@ -332,6 +333,10 @@ static int __init br_init(void)
  27. if (err)
  28. goto err_out;
  29. + err = br_offload_init();
  30. + if (err)
  31. + goto err_out0;
  32. +
  33. err = register_pernet_subsys(&br_net_ops);
  34. if (err)
  35. goto err_out1;
  36. @@ -375,6 +380,8 @@ err_out3:
  37. err_out2:
  38. unregister_pernet_subsys(&br_net_ops);
  39. err_out1:
  40. + br_offload_fini();
  41. +err_out0:
  42. br_fdb_fini();
  43. err_out:
  44. stp_proto_unregister(&br_stp_proto);
  45. @@ -396,6 +403,7 @@ static void __exit br_deinit(void)
  46. #if IS_ENABLED(CONFIG_ATM_LANE)
  47. br_fdb_test_addr_hook = NULL;
  48. #endif
  49. + br_offload_fini();
  50. br_fdb_fini();
  51. }
  52. --- a/net/bridge/br_device.c
  53. +++ b/net/bridge/br_device.c
  54. @@ -529,6 +529,8 @@ void br_dev_setup(struct net_device *dev
  55. br->bridge_hello_time = br->hello_time = 2 * HZ;
  56. br->bridge_forward_delay = br->forward_delay = 15 * HZ;
  57. br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
  58. + br->offload_cache_size = 128;
  59. + br->offload_cache_reserved = 8;
  60. dev->max_mtu = ETH_MAX_MTU;
  61. br_netfilter_rtable_init(br);
  62. --- a/net/bridge/br_fdb.c
  63. +++ b/net/bridge/br_fdb.c
  64. @@ -23,6 +23,7 @@
  65. #include <net/switchdev.h>
  66. #include <trace/events/bridge.h>
  67. #include "br_private.h"
  68. +#include "br_private_offload.h"
  69. static const struct rhashtable_params br_fdb_rht_params = {
  70. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  71. @@ -513,6 +514,8 @@ static struct net_bridge_fdb_entry *fdb_
  72. fdb->key.vlan_id = vid;
  73. fdb->flags = flags;
  74. fdb->updated = fdb->used = jiffies;
  75. + INIT_HLIST_HEAD(&fdb->offload_in);
  76. + INIT_HLIST_HEAD(&fdb->offload_out);
  77. if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
  78. &fdb->rhnode,
  79. br_fdb_rht_params)) {
  80. @@ -734,6 +737,8 @@ static void fdb_notify(struct net_bridge
  81. struct sk_buff *skb;
  82. int err = -ENOBUFS;
  83. + br_offload_fdb_update(fdb);
  84. +
  85. if (swdev_notify)
  86. br_switchdev_fdb_notify(br, fdb, type);
  87. --- a/net/bridge/br_forward.c
  88. +++ b/net/bridge/br_forward.c
  89. @@ -16,6 +16,7 @@
  90. #include <linux/if_vlan.h>
  91. #include <linux/netfilter_bridge.h>
  92. #include "br_private.h"
  93. +#include "br_private_offload.h"
  94. /* Don't forward packets to originating port or forwarding disabled */
  95. static inline int should_deliver(const struct net_bridge_port *p,
  96. @@ -32,6 +33,8 @@ static inline int should_deliver(const s
  97. int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  98. {
  99. + br_offload_output(skb);
  100. +
  101. skb_push(skb, ETH_HLEN);
  102. if (!is_skb_forwardable(skb->dev, skb))
  103. goto drop;
  104. --- a/net/bridge/br_if.c
  105. +++ b/net/bridge/br_if.c
  106. @@ -25,6 +25,7 @@
  107. #include <net/net_namespace.h>
  108. #include "br_private.h"
  109. +#include "br_private_offload.h"
  110. /*
  111. * Determine initial path cost based on speed.
  112. @@ -427,7 +428,7 @@ static struct net_bridge_port *new_nbp(s
  113. p->path_cost = port_cost(dev);
  114. p->priority = 0x8000 >> BR_PORT_BITS;
  115. p->port_no = index;
  116. - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
  117. + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
  118. br_init_port(p);
  119. br_set_state(p, BR_STATE_DISABLED);
  120. br_stp_port_timer_init(p);
  121. @@ -777,6 +778,9 @@ void br_port_flags_change(struct net_bri
  122. if (mask & BR_NEIGH_SUPPRESS)
  123. br_recalculate_neigh_suppress_enabled(br);
  124. +
  125. + if (mask & BR_OFFLOAD)
  126. + br_offload_port_state(p);
  127. }
  128. bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
  129. --- a/net/bridge/br_input.c
  130. +++ b/net/bridge/br_input.c
  131. @@ -22,6 +22,7 @@
  132. #include <linux/rculist.h>
  133. #include "br_private.h"
  134. #include "br_private_tunnel.h"
  135. +#include "br_private_offload.h"
  136. static int
  137. br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
  138. @@ -169,6 +170,7 @@ int br_handle_frame_finish(struct net *n
  139. dst->used = now;
  140. br_forward(dst->dst, skb, local_rcv, false);
  141. } else {
  142. + br_offload_skb_disable(skb);
  143. if (!mcast_hit)
  144. br_flood(br, skb, pkt_type, local_rcv, false);
  145. else
  146. @@ -287,6 +289,9 @@ static rx_handler_result_t br_handle_fra
  147. memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
  148. p = br_port_get_rcu(skb->dev);
  149. + if (br_offload_input(p, skb))
  150. + return RX_HANDLER_CONSUMED;
  151. +
  152. if (p->flags & BR_VLAN_TUNNEL) {
  153. if (br_handle_ingress_vlan_tunnel(skb, p,
  154. nbp_vlan_group_rcu(p)))
  155. --- /dev/null
  156. +++ b/net/bridge/br_offload.c
  157. @@ -0,0 +1,436 @@
  158. +// SPDX-License-Identifier: GPL-2.0-only
  159. +#include <linux/kernel.h>
  160. +#include <linux/workqueue.h>
  161. +#include "br_private.h"
  162. +#include "br_private_offload.h"
  163. +
  164. +static DEFINE_SPINLOCK(offload_lock);
  165. +
  166. +struct bridge_flow_key {
  167. + u8 dest[ETH_ALEN];
  168. + u8 src[ETH_ALEN];
  169. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  170. + u16 vlan_tag;
  171. + bool vlan_present;
  172. +#endif
  173. +};
  174. +
  175. +struct bridge_flow {
  176. + struct net_bridge_port *port;
  177. + struct rhash_head node;
  178. + struct bridge_flow_key key;
  179. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  180. + bool vlan_out_present;
  181. + u16 vlan_out;
  182. +#endif
  183. +
  184. + unsigned long used;
  185. + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
  186. + struct hlist_node fdb_list_in, fdb_list_out;
  187. +
  188. + struct rcu_head rcu;
  189. +};
  190. +
  191. +static const struct rhashtable_params flow_params = {
  192. + .automatic_shrinking = true,
  193. + .head_offset = offsetof(struct bridge_flow, node),
  194. + .key_len = sizeof(struct bridge_flow_key),
  195. + .key_offset = offsetof(struct bridge_flow, key),
  196. +};
  197. +
  198. +static struct kmem_cache *offload_cache __read_mostly;
  199. +
  200. +static void
  201. +flow_rcu_free(struct rcu_head *head)
  202. +{
  203. + struct bridge_flow *flow;
  204. +
  205. + flow = container_of(head, struct bridge_flow, rcu);
  206. + kmem_cache_free(offload_cache, flow);
  207. +}
  208. +
  209. +static void
  210. +__br_offload_flow_free(struct bridge_flow *flow)
  211. +{
  212. + flow->used = 0;
  213. + hlist_del(&flow->fdb_list_in);
  214. + hlist_del(&flow->fdb_list_out);
  215. +
  216. + call_rcu(&flow->rcu, flow_rcu_free);
  217. +}
  218. +
  219. +static void
  220. +br_offload_flow_free(struct bridge_flow *flow)
  221. +{
  222. + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
  223. + flow_params) != 0)
  224. + return;
  225. +
  226. + __br_offload_flow_free(flow);
  227. +}
  228. +
  229. +static bool
  230. +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
  231. + struct net_bridge_fdb_entry *fdb)
  232. +{
  233. + if (!time_after(flow->used, fdb->updated))
  234. + return false;
  235. +
  236. + fdb->updated = flow->used;
  237. +
  238. + return true;
  239. +}
  240. +
  241. +
  242. +static void
  243. +br_offload_flow_refresh_time(struct bridge_flow *flow)
  244. +{
  245. + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
  246. + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
  247. +}
  248. +
  249. +static void
  250. +br_offload_destroy_cb(void *ptr, void *arg)
  251. +{
  252. + struct bridge_flow *flow = ptr;
  253. +
  254. + __br_offload_flow_free(flow);
  255. +}
  256. +
  257. +static bool
  258. +br_offload_need_gc(struct net_bridge_port *p)
  259. +{
  260. + return (atomic_read(&p->offload.rht.nelems) +
  261. + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
  262. +}
  263. +
  264. +static void
  265. +br_offload_gc_work(struct work_struct *work)
  266. +{
  267. + struct rhashtable_iter hti;
  268. + struct net_bridge_port *p;
  269. + struct bridge_flow *gc_flow = NULL;
  270. + struct bridge_flow *flow;
  271. + unsigned long gc_used;
  272. +
  273. + p = container_of(work, struct net_bridge_port, offload.gc_work);
  274. +
  275. + if (!br_offload_need_gc(p))
  276. + return;
  277. +
  278. + rhashtable_walk_enter(&p->offload.rht, &hti);
  279. + rhashtable_walk_start(&hti);
  280. + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
  281. + unsigned long used;
  282. +
  283. + if (IS_ERR(flow))
  284. + continue;
  285. +
  286. + used = READ_ONCE(flow->used);
  287. + if (!used)
  288. + continue;
  289. +
  290. + if (gc_flow && !time_before(used, gc_used))
  291. + continue;
  292. +
  293. + gc_flow = flow;
  294. + gc_used = used;
  295. + }
  296. + rhashtable_walk_stop(&hti);
  297. + rhashtable_walk_exit(&hti);
  298. +
  299. + if (!gc_flow)
  300. + return;
  301. +
  302. + spin_lock_bh(&offload_lock);
  303. + if (br_offload_need_gc(p) && gc_flow &&
  304. + gc_flow->used == gc_used)
  305. + br_offload_flow_free(gc_flow);
  306. + if (p->offload.enabled && br_offload_need_gc(p))
  307. + queue_work(system_long_wq, work);
  308. + spin_unlock_bh(&offload_lock);
  309. +
  310. +}
  311. +
  312. +void br_offload_port_state(struct net_bridge_port *p)
  313. +{
  314. + struct net_bridge_port_offload *o = &p->offload;
  315. + bool enabled = true;
  316. + bool flush = false;
  317. +
  318. + if (p->state != BR_STATE_FORWARDING ||
  319. + !(p->flags & BR_OFFLOAD))
  320. + enabled = false;
  321. +
  322. + spin_lock_bh(&offload_lock);
  323. + if (o->enabled == enabled)
  324. + goto out;
  325. +
  326. + if (enabled) {
  327. + if (!o->gc_work.func)
  328. + INIT_WORK(&o->gc_work, br_offload_gc_work);
  329. + rhashtable_init(&o->rht, &flow_params);
  330. + } else {
  331. + flush = true;
  332. + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
  333. + }
  334. +
  335. + o->enabled = enabled;
  336. +
  337. +out:
  338. + spin_unlock_bh(&offload_lock);
  339. +
  340. + if (flush)
  341. + flush_work(&o->gc_work);
  342. +}
  343. +
  344. +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
  345. +{
  346. + struct bridge_flow *f;
  347. + struct hlist_node *tmp;
  348. +
  349. + spin_lock_bh(&offload_lock);
  350. +
  351. + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
  352. + br_offload_flow_free(f);
  353. +
  354. + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
  355. + br_offload_flow_free(f);
  356. +
  357. + spin_unlock_bh(&offload_lock);
  358. +}
  359. +
  360. +static void
  361. +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
  362. + struct sk_buff *skb)
  363. +{
  364. + memset(key, 0, sizeof(*key));
  365. + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
  366. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  367. + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
  368. + return;
  369. +
  370. + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
  371. + return;
  372. +
  373. + key->vlan_present = true;
  374. + key->vlan_tag = skb_vlan_tag_get_id(skb);
  375. +#endif
  376. +}
  377. +
  378. +void br_offload_output(struct sk_buff *skb)
  379. +{
  380. + struct net_bridge_port_offload *o;
  381. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  382. + struct net_bridge_port *p, *inp;
  383. + struct net_device *dev;
  384. + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
  385. + struct net_bridge_vlan_group *vg;
  386. + struct bridge_flow_key key;
  387. + struct bridge_flow *flow;
  388. + u16 vlan;
  389. +
  390. + if (!cb->offload)
  391. + return;
  392. +
  393. + rcu_read_lock();
  394. +
  395. + p = br_port_get_rcu(skb->dev);
  396. + if (!p)
  397. + goto out;
  398. +
  399. + o = &p->offload;
  400. + if (!o->enabled)
  401. + goto out;
  402. +
  403. + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
  404. + goto out;
  405. +
  406. + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
  407. + if (!dev)
  408. + goto out;
  409. +
  410. + inp = br_port_get_rcu(dev);
  411. + if (!inp)
  412. + goto out;
  413. +
  414. + vg = nbp_vlan_group_rcu(inp);
  415. + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
  416. + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
  417. + if (!fdb_in || !fdb_in->dst)
  418. + goto out;
  419. +
  420. + vg = nbp_vlan_group_rcu(p);
  421. + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
  422. + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
  423. + if (!fdb_out || !fdb_out->dst)
  424. + goto out;
  425. +
  426. + br_offload_prepare_key(p, &key, skb);
  427. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  428. + key.vlan_present = cb->input_vlan_present;
  429. + key.vlan_tag = cb->input_vlan_tag;
  430. +#endif
  431. +
  432. + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
  433. + flow->port = inp;
  434. + memcpy(&flow->key, &key, sizeof(key));
  435. +
  436. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  437. + flow->vlan_out_present = skb_vlan_tag_present(skb);
  438. + flow->vlan_out = skb_vlan_tag_get(skb);
  439. +#endif
  440. +
  441. + flow->fdb_in = fdb_in;
  442. + flow->fdb_out = fdb_out;
  443. + flow->used = jiffies;
  444. +
  445. + spin_lock_bh(&offload_lock);
  446. + if (!o->enabled ||
  447. + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
  448. + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
  449. + kmem_cache_free(offload_cache, flow);
  450. + goto out_unlock;
  451. + }
  452. +
  453. + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
  454. + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
  455. +
  456. + if (br_offload_need_gc(p))
  457. + queue_work(system_long_wq, &p->offload.gc_work);
  458. +
  459. +out_unlock:
  460. + spin_unlock_bh(&offload_lock);
  461. +
  462. +out:
  463. + rcu_read_unlock();
  464. +}
  465. +
  466. +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
  467. +{
  468. + struct net_bridge_port_offload *o = &p->offload;
  469. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  470. + struct bridge_flow_key key;
  471. + struct net_bridge_port *dst;
  472. + struct bridge_flow *flow;
  473. + unsigned long now = jiffies;
  474. + bool ret = false;
  475. +
  476. + if (skb->len < sizeof(key))
  477. + return false;
  478. +
  479. + if (!o->enabled)
  480. + return false;
  481. +
  482. + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
  483. + return false;
  484. +
  485. + br_offload_prepare_key(p, &key, skb);
  486. +
  487. + rcu_read_lock();
  488. + flow = rhashtable_lookup(&o->rht, &key, flow_params);
  489. + if (!flow) {
  490. + cb->offload = 1;
  491. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  492. + cb->input_vlan_present = key.vlan_present != 0;
  493. + cb->input_vlan_tag = key.vlan_tag;
  494. +#endif
  495. + cb->input_ifindex = p->dev->ifindex;
  496. + goto out;
  497. + }
  498. +
  499. + if (flow->fdb_in->dst != p)
  500. + goto out;
  501. +
  502. + dst = flow->fdb_out->dst;
  503. + if (!dst)
  504. + goto out;
  505. +
  506. + ret = true;
  507. +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
  508. + if (!flow->vlan_out_present && key.vlan_present) {
  509. + __vlan_hwaccel_clear_tag(skb);
  510. + } else if (flow->vlan_out_present) {
  511. + if (skb_vlan_tag_present(skb) &&
  512. + skb->vlan_proto != p->br->vlan_proto) {
  513. + /* Protocol-mismatch, empty out vlan_tci for new tag */
  514. + skb_push(skb, ETH_HLEN);
  515. + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  516. + skb_vlan_tag_get(skb));
  517. + if (unlikely(!skb))
  518. + goto out;
  519. +
  520. + skb_pull(skb, ETH_HLEN);
  521. + skb_reset_mac_len(skb);
  522. + }
  523. +
  524. + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
  525. + flow->vlan_out);
  526. + }
  527. +#endif
  528. +
  529. + skb->dev = dst->dev;
  530. + skb_push(skb, ETH_HLEN);
  531. +
  532. + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
  533. + kfree_skb(skb);
  534. + goto out;
  535. + }
  536. +
  537. + if (now - flow->used >= HZ) {
  538. + flow->used = now;
  539. + br_offload_flow_refresh_time(flow);
  540. + }
  541. +
  542. + skb_forward_csum(skb);
  543. + dev_queue_xmit(skb);
  544. +
  545. +out:
  546. + rcu_read_unlock();
  547. + return ret;
  548. +}
  549. +
  550. +static void
  551. +br_offload_check_gc(struct net_bridge *br)
  552. +{
  553. + struct net_bridge_port *p;
  554. +
  555. + spin_lock_bh(&br->lock);
  556. + list_for_each_entry(p, &br->port_list, list)
  557. + if (br_offload_need_gc(p))
  558. + queue_work(system_long_wq, &p->offload.gc_work);
  559. + spin_unlock_bh(&br->lock);
  560. +}
  561. +
  562. +
  563. +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val)
  564. +{
  565. + br->offload_cache_size = val;
  566. + br_offload_check_gc(br);
  567. +
  568. + return 0;
  569. +}
  570. +
  571. +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val)
  572. +{
  573. + br->offload_cache_reserved = val;
  574. + br_offload_check_gc(br);
  575. +
  576. + return 0;
  577. +}
  578. +
  579. +int __init br_offload_init(void)
  580. +{
  581. + offload_cache = kmem_cache_create("bridge_offload_cache",
  582. + sizeof(struct bridge_flow),
  583. + 0, SLAB_HWCACHE_ALIGN, NULL);
  584. + if (!offload_cache)
  585. + return -ENOMEM;
  586. +
  587. + return 0;
  588. +}
  589. +
  590. +void br_offload_fini(void)
  591. +{
  592. + kmem_cache_destroy(offload_cache);
  593. +}
  594. --- a/net/bridge/br_private.h
  595. +++ b/net/bridge/br_private.h
  596. @@ -207,7 +207,13 @@ struct net_bridge_fdb_entry {
  597. unsigned long updated ____cacheline_aligned_in_smp;
  598. unsigned long used;
  599. - struct rcu_head rcu;
  600. + union {
  601. + struct {
  602. + struct hlist_head offload_in;
  603. + struct hlist_head offload_out;
  604. + };
  605. + struct rcu_head rcu;
  606. + };
  607. };
  608. #define MDB_PG_FLAGS_PERMANENT BIT(0)
  609. @@ -280,6 +286,12 @@ struct net_bridge_mdb_entry {
  610. struct rcu_head rcu;
  611. };
  612. +struct net_bridge_port_offload {
  613. + struct rhashtable rht;
  614. + struct work_struct gc_work;
  615. + bool enabled;
  616. +};
  617. +
  618. struct net_bridge_port {
  619. struct net_bridge *br;
  620. struct net_device *dev;
  621. @@ -337,6 +349,7 @@ struct net_bridge_port {
  622. u16 backup_redirected_cnt;
  623. struct bridge_stp_xstats stp_xstats;
  624. + struct net_bridge_port_offload offload;
  625. };
  626. #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
  627. @@ -475,6 +488,9 @@ struct net_bridge {
  628. struct kobject *ifobj;
  629. u32 auto_cnt;
  630. + u32 offload_cache_size;
  631. + u32 offload_cache_reserved;
  632. +
  633. #ifdef CONFIG_NET_SWITCHDEV
  634. int offload_fwd_mark;
  635. #endif
  636. @@ -501,6 +517,10 @@ struct br_input_skb_cb {
  637. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  638. u8 br_netfilter_broute:1;
  639. #endif
  640. + u8 offload:1;
  641. + u8 input_vlan_present:1;
  642. + u16 input_vlan_tag;
  643. + int input_ifindex;
  644. #ifdef CONFIG_NET_SWITCHDEV
  645. int offload_fwd_mark;
  646. --- /dev/null
  647. +++ b/net/bridge/br_private_offload.h
  648. @@ -0,0 +1,21 @@
  649. +#ifndef __BR_OFFLOAD_H
  650. +#define __BR_OFFLOAD_H
  651. +
  652. +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
  653. +void br_offload_output(struct sk_buff *skb);
  654. +void br_offload_port_state(struct net_bridge_port *p);
  655. +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
  656. +int br_offload_init(void);
  657. +void br_offload_fini(void);
  658. +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val);
  659. +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val);
  660. +
  661. +static inline void br_offload_skb_disable(struct sk_buff *skb)
  662. +{
  663. + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
  664. +
  665. + if (cb->offload)
  666. + cb->offload = 0;
  667. +}
  668. +
  669. +#endif
  670. --- a/net/bridge/br_stp.c
  671. +++ b/net/bridge/br_stp.c
  672. @@ -12,6 +12,7 @@
  673. #include "br_private.h"
  674. #include "br_private_stp.h"
  675. +#include "br_private_offload.h"
  676. /* since time values in bpdu are in jiffies and then scaled (1/256)
  677. * before sending, make sure that is at least one STP tick.
  678. @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
  679. (unsigned int) p->port_no, p->dev->name,
  680. br_port_state_names[p->state]);
  681. + br_offload_port_state(p);
  682. +
  683. if (p->br->stp_enabled == BR_KERNEL_STP) {
  684. switch (p->state) {
  685. case BR_STATE_BLOCKING:
  686. --- a/net/bridge/br_sysfs_br.c
  687. +++ b/net/bridge/br_sysfs_br.c
  688. @@ -18,6 +18,7 @@
  689. #include <linux/sched/signal.h>
  690. #include "br_private.h"
  691. +#include "br_private_offload.h"
  692. #define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
  693. @@ -842,6 +843,38 @@ static ssize_t vlan_stats_per_port_store
  694. static DEVICE_ATTR_RW(vlan_stats_per_port);
  695. #endif
  696. +static ssize_t offload_cache_size_show(struct device *d,
  697. + struct device_attribute *attr,
  698. + char *buf)
  699. +{
  700. + struct net_bridge *br = to_bridge(d);
  701. + return sprintf(buf, "%u\n", br->offload_cache_size);
  702. +}
  703. +
  704. +static ssize_t offload_cache_size_store(struct device *d,
  705. + struct device_attribute *attr,
  706. + const char *buf, size_t len)
  707. +{
  708. + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
  709. +}
  710. +static DEVICE_ATTR_RW(offload_cache_size);
  711. +
  712. +static ssize_t offload_cache_reserved_show(struct device *d,
  713. + struct device_attribute *attr,
  714. + char *buf)
  715. +{
  716. + struct net_bridge *br = to_bridge(d);
  717. + return sprintf(buf, "%u\n", br->offload_cache_reserved);
  718. +}
  719. +
  720. +static ssize_t offload_cache_reserved_store(struct device *d,
  721. + struct device_attribute *attr,
  722. + const char *buf, size_t len)
  723. +{
  724. + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
  725. +}
  726. +static DEVICE_ATTR_RW(offload_cache_reserved);
  727. +
  728. static struct attribute *bridge_attrs[] = {
  729. &dev_attr_forward_delay.attr,
  730. &dev_attr_hello_time.attr,
  731. @@ -896,6 +929,8 @@ static struct attribute *bridge_attrs[]
  732. &dev_attr_vlan_stats_enabled.attr,
  733. &dev_attr_vlan_stats_per_port.attr,
  734. #endif
  735. + &dev_attr_offload_cache_size.attr,
  736. + &dev_attr_offload_cache_reserved.attr,
  737. NULL
  738. };
  739. --- a/net/bridge/br_sysfs_if.c
  740. +++ b/net/bridge/br_sysfs_if.c
  741. @@ -234,6 +234,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
  742. BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
  743. BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
  744. BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
  745. +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
  746. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  747. static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
  748. @@ -288,6 +289,7 @@ static const struct brport_attribute *br
  749. &brport_attr_isolated,
  750. &brport_attr_bpdu_filter,
  751. &brport_attr_backup_port,
  752. + &brport_attr_offload,
  753. NULL
  754. };
  755. --- a/net/bridge/br_vlan_tunnel.c
  756. +++ b/net/bridge/br_vlan_tunnel.c
  757. @@ -15,6 +15,7 @@
  758. #include "br_private.h"
  759. #include "br_private_tunnel.h"
  760. +#include "br_private_offload.h"
  761. static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
  762. const void *ptr)
  763. @@ -180,6 +181,7 @@ int br_handle_ingress_vlan_tunnel(struct
  764. skb_dst_drop(skb);
  765. __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
  766. + br_offload_skb_disable(skb);
  767. return 0;
  768. }
  769. @@ -203,6 +205,7 @@ int br_handle_egress_vlan_tunnel(struct
  770. if (err)
  771. return err;
  772. + br_offload_skb_disable(skb);
  773. tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
  774. if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
  775. skb_dst_set(skb, &tunnel_dst->dst);