680-net-add-TCP-fraglist-GRO-support.patch 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. From: Felix Fietkau <[email protected]>
  2. Date: Tue, 23 Apr 2024 11:23:03 +0200
  3. Subject: [PATCH] net: add TCP fraglist GRO support
  4. When forwarding TCP after GRO, software segmentation is very expensive,
  5. especially when the checksum needs to be recalculated.
  6. One case where that's currently unavoidable is when routing packets over
  7. PPPoE. Performance improves significantly when using fraglist GRO
  8. implemented in the same way as for UDP.
  9. Here's a measurement of running 2 TCP streams through a MediaTek MT7622
  10. device (2-core Cortex-A53), which runs NAT with flow offload enabled from
  11. one ethernet port to PPPoE on another ethernet port + cake qdisc set to
  12. 1Gbps.
  13. rx-gro-list off: 630 Mbit/s, CPU 35% idle
  14. rx-gro-list on: 770 Mbit/s, CPU 40% idle
  15. Signe-off-by: Felix Fietkau <[email protected]>
  16. ---
  17. --- a/include/net/gro.h
  18. +++ b/include/net/gro.h
  19. @@ -439,6 +439,7 @@ static inline __wsum ip6_gro_compute_pse
  20. }
  21. int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
  22. +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
  23. /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
  24. static inline void gro_normal_list(struct napi_struct *napi)
  25. --- a/include/net/tcp.h
  26. +++ b/include/net/tcp.h
  27. @@ -2084,7 +2084,10 @@ void tcp_v4_destroy_sock(struct sock *sk
  28. struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
  29. netdev_features_t features);
  30. -struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
  31. +struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
  32. +struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
  33. +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
  34. + struct tcphdr *th);
  35. INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
  36. INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
  37. INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
  38. --- a/net/core/gro.c
  39. +++ b/net/core/gro.c
  40. @@ -233,6 +233,33 @@ done:
  41. return 0;
  42. }
  43. +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
  44. +{
  45. + if (unlikely(p->len + skb->len >= 65536))
  46. + return -E2BIG;
  47. +
  48. + if (NAPI_GRO_CB(p)->last == p)
  49. + skb_shinfo(p)->frag_list = skb;
  50. + else
  51. + NAPI_GRO_CB(p)->last->next = skb;
  52. +
  53. + skb_pull(skb, skb_gro_offset(skb));
  54. +
  55. + NAPI_GRO_CB(p)->last = skb;
  56. + NAPI_GRO_CB(p)->count++;
  57. + p->data_len += skb->len;
  58. +
  59. + /* sk ownership - if any - completely transferred to the aggregated packet */
  60. + skb->destructor = NULL;
  61. + skb->sk = NULL;
  62. + p->truesize += skb->truesize;
  63. + p->len += skb->len;
  64. +
  65. + NAPI_GRO_CB(skb)->same_flow = 1;
  66. +
  67. + return 0;
  68. +}
  69. +
  70. static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
  71. {
  72. --- a/net/ipv4/tcp_offload.c
  73. +++ b/net/ipv4/tcp_offload.c
  74. @@ -28,6 +28,70 @@ static void tcp_gso_tstamp(struct sk_buf
  75. }
  76. }
  77. +static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
  78. + __be32 *oldip, __be32 newip,
  79. + __be16 *oldport, __be16 newport)
  80. +{
  81. + struct tcphdr *th;
  82. + struct iphdr *iph;
  83. +
  84. + if (*oldip == newip && *oldport == newport)
  85. + return;
  86. +
  87. + th = tcp_hdr(seg);
  88. + iph = ip_hdr(seg);
  89. +
  90. + inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
  91. + inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
  92. + *oldport = newport;
  93. +
  94. + csum_replace4(&iph->check, *oldip, newip);
  95. + *oldip = newip;
  96. +}
  97. +
  98. +static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
  99. +{
  100. + const struct tcphdr *th;
  101. + const struct iphdr *iph;
  102. + struct sk_buff *seg;
  103. + struct tcphdr *th2;
  104. + struct iphdr *iph2;
  105. +
  106. + seg = segs;
  107. + th = tcp_hdr(seg);
  108. + iph = ip_hdr(seg);
  109. + th2 = tcp_hdr(seg->next);
  110. + iph2 = ip_hdr(seg->next);
  111. +
  112. + if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
  113. + iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
  114. + return segs;
  115. +
  116. + while ((seg = seg->next)) {
  117. + th2 = tcp_hdr(seg);
  118. + iph2 = ip_hdr(seg);
  119. +
  120. + __tcpv4_gso_segment_csum(seg,
  121. + &iph2->saddr, iph->saddr,
  122. + &th2->source, th->source);
  123. + __tcpv4_gso_segment_csum(seg,
  124. + &iph2->daddr, iph->daddr,
  125. + &th2->dest, th->dest);
  126. + }
  127. +
  128. + return segs;
  129. +}
  130. +
  131. +static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
  132. + netdev_features_t features)
  133. +{
  134. + skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
  135. + if (IS_ERR(skb))
  136. + return skb;
  137. +
  138. + return __tcpv4_gso_segment_list_csum(skb);
  139. +}
  140. +
  141. static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
  142. netdev_features_t features)
  143. {
  144. @@ -37,6 +101,9 @@ static struct sk_buff *tcp4_gso_segment(
  145. if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
  146. return ERR_PTR(-EINVAL);
  147. + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
  148. + return __tcp4_gso_segment_list(skb, features);
  149. +
  150. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  151. const struct iphdr *iph = ip_hdr(skb);
  152. struct tcphdr *th = tcp_hdr(skb);
  153. @@ -181,61 +248,76 @@ out:
  154. return segs;
  155. }
  156. -struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
  157. +struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
  158. {
  159. - struct sk_buff *pp = NULL;
  160. + struct tcphdr *th2;
  161. struct sk_buff *p;
  162. +
  163. + list_for_each_entry(p, head, list) {
  164. + if (!NAPI_GRO_CB(p)->same_flow)
  165. + continue;
  166. +
  167. + th2 = tcp_hdr(p);
  168. + if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  169. + NAPI_GRO_CB(p)->same_flow = 0;
  170. + continue;
  171. + }
  172. +
  173. + return p;
  174. + }
  175. +
  176. + return NULL;
  177. +}
  178. +
  179. +struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
  180. +{
  181. + unsigned int thlen, hlen, off;
  182. struct tcphdr *th;
  183. - struct tcphdr *th2;
  184. - unsigned int len;
  185. - unsigned int thlen;
  186. - __be32 flags;
  187. - unsigned int mss = 1;
  188. - unsigned int hlen;
  189. - unsigned int off;
  190. - int flush = 1;
  191. - int i;
  192. off = skb_gro_offset(skb);
  193. hlen = off + sizeof(*th);
  194. th = skb_gro_header(skb, hlen, off);
  195. if (unlikely(!th))
  196. - goto out;
  197. + return NULL;
  198. thlen = th->doff * 4;
  199. if (thlen < sizeof(*th))
  200. - goto out;
  201. + return NULL;
  202. hlen = off + thlen;
  203. if (skb_gro_header_hard(skb, hlen)) {
  204. th = skb_gro_header_slow(skb, hlen, off);
  205. if (unlikely(!th))
  206. - goto out;
  207. + return NULL;
  208. }
  209. skb_gro_pull(skb, thlen);
  210. - len = skb_gro_len(skb);
  211. - flags = tcp_flag_word(th);
  212. -
  213. - list_for_each_entry(p, head, list) {
  214. - if (!NAPI_GRO_CB(p)->same_flow)
  215. - continue;
  216. + return th;
  217. +}
  218. - th2 = tcp_hdr(p);
  219. +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
  220. + struct tcphdr *th)
  221. +{
  222. + unsigned int thlen = th->doff * 4;
  223. + struct sk_buff *pp = NULL;
  224. + struct sk_buff *p;
  225. + struct tcphdr *th2;
  226. + unsigned int len;
  227. + __be32 flags;
  228. + unsigned int mss = 1;
  229. + int flush = 1;
  230. + int i;
  231. - if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  232. - NAPI_GRO_CB(p)->same_flow = 0;
  233. - continue;
  234. - }
  235. + len = skb_gro_len(skb);
  236. + flags = tcp_flag_word(th);
  237. - goto found;
  238. - }
  239. - p = NULL;
  240. - goto out_check_final;
  241. + p = tcp_gro_lookup(head, th);
  242. + if (!p)
  243. + goto out_check_final;
  244. -found:
  245. /* Include the IP ID check below from the inner most IP hdr */
  246. + th2 = tcp_hdr(p);
  247. flush = NAPI_GRO_CB(p)->flush;
  248. flush |= (__force int)(flags & TCP_FLAG_CWR);
  249. flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  250. @@ -272,6 +354,19 @@ found:
  251. flush |= p->decrypted ^ skb->decrypted;
  252. #endif
  253. + if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
  254. + flush |= (__force int)(flags ^ tcp_flag_word(th2));
  255. + flush |= skb->ip_summed != p->ip_summed;
  256. + flush |= skb->csum_level != p->csum_level;
  257. + flush |= !pskb_may_pull(skb, skb_gro_offset(skb));
  258. + flush |= NAPI_GRO_CB(p)->count >= 64;
  259. +
  260. + if (flush || skb_gro_receive_list(p, skb))
  261. + mss = 1;
  262. +
  263. + goto out_check_final;
  264. + }
  265. +
  266. if (flush || skb_gro_receive(p, skb)) {
  267. mss = 1;
  268. goto out_check_final;
  269. @@ -293,7 +388,6 @@ out_check_final:
  270. if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
  271. pp = p;
  272. -out:
  273. NAPI_GRO_CB(skb)->flush |= (flush != 0);
  274. return pp;
  275. @@ -317,18 +411,58 @@ void tcp_gro_complete(struct sk_buff *sk
  276. }
  277. EXPORT_SYMBOL(tcp_gro_complete);
  278. +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
  279. + struct tcphdr *th)
  280. +{
  281. + const struct iphdr *iph;
  282. + struct sk_buff *p;
  283. + struct sock *sk;
  284. + struct net *net;
  285. + int iif, sdif;
  286. +
  287. + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
  288. + return;
  289. +
  290. + p = tcp_gro_lookup(head, th);
  291. + if (p) {
  292. + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
  293. + return;
  294. + }
  295. +
  296. + inet_get_iif_sdif(skb, &iif, &sdif);
  297. + iph = skb_gro_network_header(skb);
  298. + net = dev_net(skb->dev);
  299. + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
  300. + iph->saddr, th->source,
  301. + iph->daddr, ntohs(th->dest),
  302. + iif, sdif);
  303. + NAPI_GRO_CB(skb)->is_flist = !sk;
  304. + if (sk)
  305. + sock_put(sk);
  306. +}
  307. +
  308. INDIRECT_CALLABLE_SCOPE
  309. struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
  310. {
  311. + struct tcphdr *th;
  312. +
  313. /* Don't bother verifying checksum if we're going to flush anyway. */
  314. if (!NAPI_GRO_CB(skb)->flush &&
  315. skb_gro_checksum_validate(skb, IPPROTO_TCP,
  316. - inet_gro_compute_pseudo)) {
  317. - NAPI_GRO_CB(skb)->flush = 1;
  318. - return NULL;
  319. - }
  320. + inet_gro_compute_pseudo))
  321. + goto flush;
  322. +
  323. + th = tcp_gro_pull_header(skb);
  324. + if (!th)
  325. + goto flush;
  326. - return tcp_gro_receive(head, skb);
  327. + tcp4_check_fraglist_gro(head, skb, th);
  328. +
  329. + return tcp_gro_receive(head, skb, th);
  330. +
  331. +flush:
  332. + NAPI_GRO_CB(skb)->flush = 1;
  333. + return NULL;
  334. }
  335. INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
  336. @@ -336,6 +470,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_com
  337. const struct iphdr *iph = ip_hdr(skb);
  338. struct tcphdr *th = tcp_hdr(skb);
  339. + if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
  340. + skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
  341. + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  342. +
  343. + __skb_incr_checksum_unnecessary(skb);
  344. +
  345. + return 0;
  346. + }
  347. +
  348. th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
  349. iph->daddr, 0);
  350. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
  351. --- a/net/ipv4/udp_offload.c
  352. +++ b/net/ipv4/udp_offload.c
  353. @@ -451,33 +451,6 @@ out:
  354. return segs;
  355. }
  356. -static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
  357. -{
  358. - if (unlikely(p->len + skb->len >= 65536))
  359. - return -E2BIG;
  360. -
  361. - if (NAPI_GRO_CB(p)->last == p)
  362. - skb_shinfo(p)->frag_list = skb;
  363. - else
  364. - NAPI_GRO_CB(p)->last->next = skb;
  365. -
  366. - skb_pull(skb, skb_gro_offset(skb));
  367. -
  368. - NAPI_GRO_CB(p)->last = skb;
  369. - NAPI_GRO_CB(p)->count++;
  370. - p->data_len += skb->len;
  371. -
  372. - /* sk ownership - if any - completely transferred to the aggregated packet */
  373. - skb->destructor = NULL;
  374. - skb->sk = NULL;
  375. - p->truesize += skb->truesize;
  376. - p->len += skb->len;
  377. -
  378. - NAPI_GRO_CB(skb)->same_flow = 1;
  379. -
  380. - return 0;
  381. -}
  382. -
  383. #define UDP_GRO_CNT_MAX 64
  384. static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
  385. --- a/net/ipv6/tcpv6_offload.c
  386. +++ b/net/ipv6/tcpv6_offload.c
  387. @@ -7,24 +7,67 @@
  388. */
  389. #include <linux/indirect_call_wrapper.h>
  390. #include <linux/skbuff.h>
  391. +#include <net/inet6_hashtables.h>
  392. #include <net/gro.h>
  393. #include <net/protocol.h>
  394. #include <net/tcp.h>
  395. #include <net/ip6_checksum.h>
  396. #include "ip6_offload.h"
  397. +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
  398. + struct tcphdr *th)
  399. +{
  400. +#if IS_ENABLED(CONFIG_IPV6)
  401. + const struct ipv6hdr *hdr;
  402. + struct sk_buff *p;
  403. + struct sock *sk;
  404. + struct net *net;
  405. + int iif, sdif;
  406. +
  407. + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
  408. + return;
  409. +
  410. + p = tcp_gro_lookup(head, th);
  411. + if (p) {
  412. + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
  413. + return;
  414. + }
  415. +
  416. + inet6_get_iif_sdif(skb, &iif, &sdif);
  417. + hdr = skb_gro_network_header(skb);
  418. + net = dev_net(skb->dev);
  419. + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
  420. + &hdr->saddr, th->source,
  421. + &hdr->daddr, ntohs(th->dest),
  422. + iif, sdif);
  423. + NAPI_GRO_CB(skb)->is_flist = !sk;
  424. + if (sk)
  425. + sock_put(sk);
  426. +#endif /* IS_ENABLED(CONFIG_IPV6) */
  427. +}
  428. +
  429. INDIRECT_CALLABLE_SCOPE
  430. struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
  431. {
  432. + struct tcphdr *th;
  433. +
  434. /* Don't bother verifying checksum if we're going to flush anyway. */
  435. if (!NAPI_GRO_CB(skb)->flush &&
  436. skb_gro_checksum_validate(skb, IPPROTO_TCP,
  437. - ip6_gro_compute_pseudo)) {
  438. - NAPI_GRO_CB(skb)->flush = 1;
  439. - return NULL;
  440. - }
  441. + ip6_gro_compute_pseudo))
  442. + goto flush;
  443. - return tcp_gro_receive(head, skb);
  444. + th = tcp_gro_pull_header(skb);
  445. + if (!th)
  446. + goto flush;
  447. +
  448. + tcp6_check_fraglist_gro(head, skb, th);
  449. +
  450. + return tcp_gro_receive(head, skb, th);
  451. +
  452. +flush:
  453. + NAPI_GRO_CB(skb)->flush = 1;
  454. + return NULL;
  455. }
  456. INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
  457. @@ -32,6 +75,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
  458. const struct ipv6hdr *iph = ipv6_hdr(skb);
  459. struct tcphdr *th = tcp_hdr(skb);
  460. + if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
  461. + skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
  462. + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  463. +
  464. + __skb_incr_checksum_unnecessary(skb);
  465. +
  466. + return 0;
  467. + }
  468. +
  469. th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
  470. &iph->daddr, 0);
  471. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
  472. @@ -40,6 +92,61 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
  473. return 0;
  474. }
  475. +static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
  476. + __be16 *oldport, __be16 newport)
  477. +{
  478. + struct tcphdr *th;
  479. +
  480. + if (*oldport == newport)
  481. + return;
  482. +
  483. + th = tcp_hdr(seg);
  484. + inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
  485. + *oldport = newport;
  486. +}
  487. +
  488. +static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
  489. +{
  490. + const struct tcphdr *th;
  491. + const struct ipv6hdr *iph;
  492. + struct sk_buff *seg;
  493. + struct tcphdr *th2;
  494. + struct ipv6hdr *iph2;
  495. +
  496. + seg = segs;
  497. + th = tcp_hdr(seg);
  498. + iph = ipv6_hdr(seg);
  499. + th2 = tcp_hdr(seg->next);
  500. + iph2 = ipv6_hdr(seg->next);
  501. +
  502. + if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
  503. + ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
  504. + ipv6_addr_equal(&iph->daddr, &iph2->daddr))
  505. + return segs;
  506. +
  507. + while ((seg = seg->next)) {
  508. + th2 = tcp_hdr(seg);
  509. + iph2 = ipv6_hdr(seg);
  510. +
  511. + iph2->saddr = iph->saddr;
  512. + iph2->daddr = iph->daddr;
  513. + __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
  514. + __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
  515. + }
  516. +
  517. + return segs;
  518. +}
  519. +
  520. +static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
  521. + netdev_features_t features)
  522. +{
  523. + skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
  524. + if (IS_ERR(skb))
  525. + return skb;
  526. +
  527. + return __tcpv6_gso_segment_list_csum(skb);
  528. +}
  529. +
  530. static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
  531. netdev_features_t features)
  532. {
  533. @@ -51,6 +158,9 @@ static struct sk_buff *tcp6_gso_segment(
  534. if (!pskb_may_pull(skb, sizeof(*th)))
  535. return ERR_PTR(-EINVAL);
  536. + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
  537. + return __tcp6_gso_segment_list(skb, features);
  538. +
  539. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  540. const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  541. struct tcphdr *th = tcp_hdr(skb);