|
@@ -1,4362 +0,0 @@
|
|
-From b0ca5138c0fc0e528a4a06437eedf136e75af5e9 Mon Sep 17 00:00:00 2001
|
|
|
|
-From: kiddin9 <[email protected]>
|
|
|
|
-Date: Fri, 28 Feb 2025 23:20:43 +0800
|
|
|
|
-Subject: [PATCH] update
|
|
|
|
-
|
|
|
|
----
|
|
|
|
- ...ss-output-TCP-BBRv3-diag-information.patch | 165 +
|
|
|
|
- ...den-app-limited-rate-sample-detectio.patch | 51 +
|
|
|
|
- ...hrink-delivered_mstamp-first_tx_msta.patch | 73 +
|
|
|
|
- ...napshot-packets-in-flight-at-transmi.patch | 108 +
|
|
|
|
- ...ount-packets-lost-over-TCP-rate-samp.patch | 69 +
|
|
|
|
- ...xport-FLAG_ECE-in-rate_sample.is_ece.patch | 37 +
|
|
|
|
- ...ntroduce-ca_ops-skb_marked_lost-CC-m.patch | 56 +
|
|
|
|
- ...djust-skb-tx.in_flight-upon-merge-in.patch | 58 +
|
|
|
|
- ...djust-skb-tx.in_flight-upon-split-in.patch | 96 +
|
|
|
|
- ...ca-opts-flag-TCP_CONG_WANTS_CE_EVENT.patch | 72 +
|
|
|
|
- ...alize-TSO-sizing-in-TCP-CC-module-AP.patch | 117 +
|
|
|
|
- ..._ack_mode-1-skip-rwin-check-in-tcp_f.patch | 71 +
|
|
|
|
- ...ecord-app-limited-status-of-TLP-repa.patch | 44 +
|
|
|
|
- ...nform-CC-module-of-losses-repaired-b.patch | 44 +
|
|
|
|
- ...ntroduce-is_acking_tlp_retrans_seq-i.patch | 72 +
|
|
|
|
- ...r-route-feature-RTAX_FEATURE_ECN_LOW.patch | 108 +
|
|
|
|
- ...pdate-TCP-bbr-congestion-control-mod.patch | 2822 +++++++++++++++++
|
|
|
|
- ...nsure-ECN-enabled-BBR-flows-set-ECT-.patch | 58 +
|
|
|
|
- ...OPT_ECN_LOW-in-tcp_info-tcpi_options.patch | 37 +
|
|
|
|
- ...hange-u64-to-unsigned-long-for-bytes.patch | 36 +
|
|
|
|
- 21 files changed, 4198 insertions(+), 4 deletions(-)
|
|
|
|
- create mode 100644 package/network/utils/iproute2/patches/500-ss-output-TCP-BBRv3-diag-information.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-01-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-broaden-app-limited-rate-sample-detectio.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-02-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-shrink-delivered_mstamp-first_tx_msta.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-03-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-snapshot-packets-in-flight-at-transmi.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-04-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-count-packets-lost-over-TCP-rate-samp.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-05-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-export-FLAG_ECE-in-rate_sample.is_ece.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-06-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-ca_ops-skb_marked_lost-CC-m.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-07-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-merge-in.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-08-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-split-in.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-09-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-new-ca-opts-flag-TCP_CONG_WANTS_CE_EVENT.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-10-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-re-generalize-TSO-sizing-in-TCP-CC-module-AP.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-11-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-fast_ack_mode-1-skip-rwin-check-in-tcp_f.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-12-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-record-app-limited-status-of-TLP-repa.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-13-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-inform-CC-module-of-losses-repaired-b.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-14-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-is_acking_tlp_retrans_seq-i.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-15-bbr-v3-upstream-prep-2024-02-19-01-tcp-introduce-per-route-feature-RTAX_FEATURE_ECN_LOW.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-16-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-update-TCP-bbr-congestion-control-mod.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-17-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-ensure-ECN-enabled-BBR-flows-set-ECT-.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-18-bbr-v3-upstream-prep-2024-02-19-01-tcp-export-TCPI_OPT_ECN_LOW-in-tcp_info-tcpi_options.patch
|
|
|
|
- create mode 100644 target/linux/generic/hack-6.6/601-19-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-change-u64-to-unsigned-long-for-bytes.patch
|
|
|
|
-
|
|
|
|
-diff --git a/package/network/utils/iproute2/patches/500-ss-output-TCP-BBRv3-diag-information.patch b/package/network/utils/iproute2/patches/500-ss-output-TCP-BBRv3-diag-information.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..6e20dc451b4b26
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/package/network/utils/iproute2/patches/500-ss-output-TCP-BBRv3-diag-information.patch
|
|
|
|
-@@ -0,0 +1,165 @@
|
|
|
|
-+From ca7f11ebc4d4a99ccfd44be8555d505b26996c12 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Arjun Roy <[email protected]>
|
|
|
|
-+Date: Mon, 25 Jul 2022 12:49:35 -0400
|
|
|
|
-+Subject: [PATCH 2/2] ss: output TCP BBRv3 diag information
|
|
|
|
-+
|
|
|
|
-+Add logic for printing diag information for TCP BBRv3 congestion
|
|
|
|
-+control. This commit leaves in place the support for printing the
|
|
|
|
-+earlier TCP BBRv1 congestion control information.
|
|
|
|
-+
|
|
|
|
-+Both BBRv1 and BBRv3 are using the same enum value. The BBRv3 struct
|
|
|
|
-+starts with the same data as BBRv1, so it is is backward-compatible
|
|
|
|
-+with BBRv1, to allow lder ss binaries to print basic information for
|
|
|
|
-+BBRv3. We use the size of the returned data and the version field to
|
|
|
|
-+check the version of the data.
|
|
|
|
-+
|
|
|
|
-+Signed-off-by: Arjun Roy <[email protected]>
|
|
|
|
-+Signed-off-by: Neal Cardwell <[email protected]>
|
|
|
|
-+Signed-off-by: David Morley <[email protected]>
|
|
|
|
-+---
|
|
|
|
-+ include/uapi/linux/inet_diag.h | 23 ++++++++++++
|
|
|
|
-+ misc/ss.c | 66 +++++++++++++++++++++++++++++++++-
|
|
|
|
-+ 2 files changed, 88 insertions(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
|
|
|
|
-+index d81cb69a26a9..dca833ecb783 100644
|
|
|
|
-+--- a/include/uapi/linux/inet_diag.h
|
|
|
|
-++++ b/include/uapi/linux/inet_diag.h
|
|
|
|
-+@@ -229,6 +229,29 @@ struct tcp_bbr_info {
|
|
|
|
-+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
|
|
|
|
-+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
|
|
|
|
-+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
|
|
|
|
-++ __u32 bbr_bw_hi_lsb; /* lower 32 bits of bw_hi */
|
|
|
|
-++ __u32 bbr_bw_hi_msb; /* upper 32 bits of bw_hi */
|
|
|
|
-++ __u32 bbr_bw_lo_lsb; /* lower 32 bits of bw_lo */
|
|
|
|
-++ __u32 bbr_bw_lo_msb; /* upper 32 bits of bw_lo */
|
|
|
|
-++ __u8 bbr_mode; /* current bbr_mode in state machine */
|
|
|
|
-++ __u8 bbr_phase; /* current state machine phase */
|
|
|
|
-++ __u8 unused1; /* alignment padding; not used yet */
|
|
|
|
-++ __u8 bbr_version; /* BBR algorithm version */
|
|
|
|
-++ __u32 bbr_inflight_lo; /* lower short-term data volume bound */
|
|
|
|
-++ __u32 bbr_inflight_hi; /* higher long-term data volume bound */
|
|
|
|
-++ __u32 bbr_extra_acked; /* max excess packets ACKed in epoch */
|
|
|
|
-++};
|
|
|
|
-++
|
|
|
|
-++/* TCP BBR congestion control bbr_phase as reported in netlink/ss stats. */
|
|
|
|
-++enum tcp_bbr_phase {
|
|
|
|
-++ BBR_PHASE_INVALID = 0,
|
|
|
|
-++ BBR_PHASE_STARTUP = 1,
|
|
|
|
-++ BBR_PHASE_DRAIN = 2,
|
|
|
|
-++ BBR_PHASE_PROBE_RTT = 3,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_UP = 4,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_DOWN = 5,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_CRUISE = 6,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_REFILL = 7,
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ union tcp_cc_info {
|
|
|
|
-+diff --git a/misc/ss.c b/misc/ss.c
|
|
|
|
-+index e9d813596b91..5f413118f0dd 100644
|
|
|
|
-+--- a/misc/ss.c
|
|
|
|
-++++ b/misc/ss.c
|
|
|
|
-+@@ -912,6 +912,7 @@ struct tcpstat {
|
|
|
|
-+ bool app_limited;
|
|
|
|
-+ struct dctcpstat *dctcp;
|
|
|
|
-+ struct tcp_bbr_info *bbr_info;
|
|
|
|
-++ unsigned int bbr_info_len;
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ /* SCTP assocs share the same inode number with their parent endpoint. So if we
|
|
|
|
-+@@ -2585,6 +2586,29 @@ static void sctp_stats_print(struct sctp_info *s)
|
|
|
|
-+ out(" fraginl:%d", s->sctpi_s_frag_interleave);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++static const char* bbr_phase_to_str(enum tcp_bbr_phase phase)
|
|
|
|
-++{
|
|
|
|
-++ switch (phase) {
|
|
|
|
-++ case BBR_PHASE_STARTUP:
|
|
|
|
-++ return "STARTUP";
|
|
|
|
-++ case BBR_PHASE_DRAIN:
|
|
|
|
-++ return "DRAIN";
|
|
|
|
-++ case BBR_PHASE_PROBE_RTT:
|
|
|
|
-++ return "PROBE_RTT";
|
|
|
|
-++ case BBR_PHASE_PROBE_BW_UP:
|
|
|
|
-++ return "PROBE_BW_UP";
|
|
|
|
-++ case BBR_PHASE_PROBE_BW_DOWN:
|
|
|
|
-++ return "PROBE_BW_DOWN";
|
|
|
|
-++ case BBR_PHASE_PROBE_BW_CRUISE:
|
|
|
|
-++ return "PROBE_BW_CRUISE";
|
|
|
|
-++ case BBR_PHASE_PROBE_BW_REFILL:
|
|
|
|
-++ return "PROBE_BW_REFILL";
|
|
|
|
-++ case BBR_PHASE_INVALID:
|
|
|
|
-++ default:
|
|
|
|
-++ return "INVALID";
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ static void tcp_stats_print(struct tcpstat *s)
|
|
|
|
-+ {
|
|
|
|
-+ char b1[64];
|
|
|
|
-+@@ -2658,7 +2682,14 @@ static void tcp_stats_print(struct tcpstat *s)
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ if (s->bbr_info) {
|
|
|
|
-+- __u64 bw;
|
|
|
|
-++ /* All versions of the BBR algorithm use the INET_DIAG_BBRINFO
|
|
|
|
-++ * enum value. Later versions of the tcp_bbr_info struct are
|
|
|
|
-++ * backward-compatible with earlier versions, to allow older ss
|
|
|
|
-++ * binaries to print basic information for newer versions of
|
|
|
|
-++ * the algorithm. We use the size of the returned tcp_bbr_info
|
|
|
|
-++ * struct to decide how much to print.
|
|
|
|
-++ */
|
|
|
|
-++ __u64 bw, bw_hi, bw_lo;
|
|
|
|
-+
|
|
|
|
-+ bw = s->bbr_info->bbr_bw_hi;
|
|
|
|
-+ bw <<= 32;
|
|
|
|
-+@@ -2673,6 +2704,38 @@ static void tcp_stats_print(struct tcpstat *s)
|
|
|
|
-+ if (s->bbr_info->bbr_cwnd_gain)
|
|
|
|
-+ out(",cwnd_gain:%g",
|
|
|
|
-+ (double)s->bbr_info->bbr_cwnd_gain / 256.0);
|
|
|
|
-++
|
|
|
|
-++ if (s->bbr_info_len >=
|
|
|
|
-++ (offsetof(struct tcp_bbr_info, bbr_extra_acked) +
|
|
|
|
-++ sizeof(__u32))) {
|
|
|
|
-++
|
|
|
|
-++ bw_hi = s->bbr_info->bbr_bw_hi_msb;
|
|
|
|
-++ bw_hi <<= 32;
|
|
|
|
-++ bw_hi |= s->bbr_info->bbr_bw_hi_lsb;
|
|
|
|
-++
|
|
|
|
-++ bw_lo = s->bbr_info->bbr_bw_lo_msb;
|
|
|
|
-++ bw_lo <<= 32;
|
|
|
|
-++ bw_lo |= s->bbr_info->bbr_bw_lo_lsb;
|
|
|
|
-++
|
|
|
|
-++ out(",version:%u", s->bbr_info->bbr_version);
|
|
|
|
-++ if (bw_hi != ~0UL)
|
|
|
|
-++ out(",bw_hi:%sbps", sprint_bw(b1, bw_hi * 8.0));
|
|
|
|
-++ if (bw_lo != ~0UL)
|
|
|
|
-++ out(",bw_lo:%sbps", sprint_bw(b1, bw_lo * 8.0));
|
|
|
|
-++ if (s->bbr_info->bbr_inflight_hi != ~0U)
|
|
|
|
-++ out(",inflight_hi:%u",
|
|
|
|
-++ s->bbr_info->bbr_inflight_hi);
|
|
|
|
-++ if (s->bbr_info->bbr_inflight_lo != ~0U)
|
|
|
|
-++ out(",inflight_lo:%u",
|
|
|
|
-++ s->bbr_info->bbr_inflight_lo);
|
|
|
|
-++ out(",extra_acked:%u", s->bbr_info->bbr_extra_acked);
|
|
|
|
-++ out(",mode:%d", (int)s->bbr_info->bbr_mode);
|
|
|
|
-++ out(",phase:%s",
|
|
|
|
-++ bbr_phase_to_str(
|
|
|
|
-++ (enum tcp_bbr_phase)
|
|
|
|
-++ s->bbr_info->bbr_phase));
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-+ out(")");
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+@@ -3147,6 +3210,7 @@ static void tcp_show_info(const struct nlmsghdr *nlh, struct inet_diag_msg *r,
|
|
|
|
-+ s.bbr_info = calloc(1, sizeof(*s.bbr_info));
|
|
|
|
-+ if (s.bbr_info && bbr_info)
|
|
|
|
-+ memcpy(s.bbr_info, bbr_info, len);
|
|
|
|
-++ s.bbr_info_len = len;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ if (rtt > 0 && info->tcpi_snd_mss && info->tcpi_snd_cwnd) {
|
|
|
|
-+--
|
|
|
|
-+2.41.0.487.g6d72f3e995-goog
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-01-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-broaden-app-limited-rate-sample-detectio.patch b/target/linux/generic/hack-6.6/601-01-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-broaden-app-limited-rate-sample-detectio.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..37b43a9c036891
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-01-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-broaden-app-limited-rate-sample-detectio.patch
|
|
|
|
-@@ -0,0 +1,51 @@
|
|
|
|
-+From 304c6ff637e53c8f3530a0bb50ba95e532d681b8 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Tue, 11 Jun 2019 12:26:55 -0400
|
|
|
|
-+Subject: [PATCH 01/19] net-tcp_bbr: broaden app-limited rate sample detection
|
|
|
|
-+
|
|
|
|
-+This commit is a bug fix for the Linux TCP app-limited
|
|
|
|
-+(application-limited) logic that is used for collecting rate
|
|
|
|
-+(bandwidth) samples.
|
|
|
|
-+
|
|
|
|
-+Previously the app-limited logic only looked for "bubbles" of
|
|
|
|
-+silence in between application writes, by checking at the start
|
|
|
|
-+of each sendmsg. But "bubbles" of silence can also happen before
|
|
|
|
-+retransmits: e.g. bubbles can happen between an application write
|
|
|
|
-+and a retransmit, or between two retransmits.
|
|
|
|
-+
|
|
|
|
-+Retransmits are triggered by ACKs or timers. So this commit checks
|
|
|
|
-+for bubbles of app-limited silence upon ACKs or timers.
|
|
|
|
-+
|
|
|
|
-+Why does this commit check for app-limited state at the start of
|
|
|
|
-+ACKs and timer handling? Because at that point we know whether
|
|
|
|
-+inflight was fully using the cwnd. During processing the ACK or
|
|
|
|
-+timer event we often change the cwnd; after changing the cwnd we
|
|
|
|
-+can't know whether inflight was fully using the old cwnd.
|
|
|
|
-+
|
|
|
|
-+Origin-9xx-SHA1: 3fe9b53291e018407780fb8c356adb5666722cbc
|
|
|
|
-+Change-Id: I37221506f5166877c2b110753d39bb0757985e68
|
|
|
|
-+---
|
|
|
|
-+ net/ipv4/tcp_input.c | 1 +
|
|
|
|
-+ net/ipv4/tcp_timer.c | 1 +
|
|
|
|
-+ 2 files changed, 2 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -3915,6 +3915,7 @@ static int tcp_ack(struct sock *sk, cons
|
|
|
|
-+
|
|
|
|
-+ prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
|
|
|
|
-+ rs.prior_in_flight = tcp_packets_in_flight(tp);
|
|
|
|
-++ tcp_rate_check_app_limited(sk);
|
|
|
|
-+
|
|
|
|
-+ /* ts_recent update must be made after we are sure that the packet
|
|
|
|
-+ * is in window.
|
|
|
|
-+--- a/net/ipv4/tcp_timer.c
|
|
|
|
-++++ b/net/ipv4/tcp_timer.c
|
|
|
|
-+@@ -677,6 +677,7 @@ void tcp_write_timer_handler(struct sock
|
|
|
|
-+ return;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++ tcp_rate_check_app_limited(sk);
|
|
|
|
-+ tcp_mstamp_refresh(tcp_sk(sk));
|
|
|
|
-+ event = icsk->icsk_pending;
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-02-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-shrink-delivered_mstamp-first_tx_msta.patch b/target/linux/generic/hack-6.6/601-02-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-shrink-delivered_mstamp-first_tx_msta.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..ee7195893eeaab
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-02-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-shrink-delivered_mstamp-first_tx_msta.patch
|
|
|
|
-@@ -0,0 +1,73 @@
|
|
|
|
-+From 5a47eb49ad406b439a00b90f5285359cd1e876f4 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Sun, 24 Jun 2018 21:55:59 -0400
|
|
|
|
-+Subject: [PATCH 02/19] net-tcp_bbr: v2: shrink delivered_mstamp,
|
|
|
|
-+ first_tx_mstamp to u32 to free up 8 bytes
|
|
|
|
-+
|
|
|
|
-+Free up some space for tracking inflight and losses for each
|
|
|
|
-+bw sample, in upcoming commits.
|
|
|
|
-+
|
|
|
|
-+These timestamps are in microseconds, and are now stored in 32
|
|
|
|
-+bits. So they can only hold time intervals up to roughly 2^12 = 4096
|
|
|
|
-+seconds. But Linux TCP RTT and RTO tracking has the same 32-bit
|
|
|
|
-+microsecond implementation approach and resulting deployment
|
|
|
|
-+limitations. So this is not introducing a new limit. And these should
|
|
|
|
-+not be a limitation for the foreseeable future.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 238a7e6b5d51625fef1ce7769826a7b21b02ae55
|
|
|
|
-+Change-Id: I3b779603797263b52a61ad57c565eb91fe42680c
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 9 +++++++--
|
|
|
|
-+ net/ipv4/tcp_rate.c | 7 ++++---
|
|
|
|
-+ 2 files changed, 11 insertions(+), 5 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -822,6 +822,11 @@ static inline u32 tcp_stamp_us_delta(u64
|
|
|
|
-+ return max_t(s64, t1 - t0, 0);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++static inline u32 tcp_stamp32_us_delta(u32 t1, u32 t0)
|
|
|
|
-++{
|
|
|
|
-++ return max_t(s32, t1 - t0, 0);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
|
|
|
|
-+ {
|
|
|
|
-+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
|
|
|
|
-+@@ -897,9 +902,9 @@ struct tcp_skb_cb {
|
|
|
|
-+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
|
|
|
|
-+ __u32 delivered;
|
|
|
|
-+ /* start of send pipeline phase */
|
|
|
|
-+- u64 first_tx_mstamp;
|
|
|
|
-++ u32 first_tx_mstamp;
|
|
|
|
-+ /* when we reached the "delivered" count */
|
|
|
|
-+- u64 delivered_mstamp;
|
|
|
|
-++ u32 delivered_mstamp;
|
|
|
|
-+ } tx; /* only used for outgoing skbs */
|
|
|
|
-+ union {
|
|
|
|
-+ struct inet_skb_parm h4;
|
|
|
|
-+--- a/net/ipv4/tcp_rate.c
|
|
|
|
-++++ b/net/ipv4/tcp_rate.c
|
|
|
|
-+@@ -101,8 +101,9 @@ void tcp_rate_skb_delivered(struct sock
|
|
|
|
-+ /* Record send time of most recently ACKed packet: */
|
|
|
|
-+ tp->first_tx_mstamp = tx_tstamp;
|
|
|
|
-+ /* Find the duration of the "send phase" of this window: */
|
|
|
|
-+- rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
|
|
|
|
-+- scb->tx.first_tx_mstamp);
|
|
|
|
-++ rs->interval_us = tcp_stamp32_us_delta(
|
|
|
|
-++ tp->first_tx_mstamp,
|
|
|
|
-++ scb->tx.first_tx_mstamp);
|
|
|
|
-+
|
|
|
|
-+ }
|
|
|
|
-+ /* Mark off the skb delivered once it's sacked to avoid being
|
|
|
|
-+@@ -155,7 +156,7 @@ void tcp_rate_gen(struct sock *sk, u32 d
|
|
|
|
-+ * longer phase.
|
|
|
|
-+ */
|
|
|
|
-+ snd_us = rs->interval_us; /* send phase */
|
|
|
|
-+- ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
|
|
|
|
-++ ack_us = tcp_stamp32_us_delta(tp->tcp_mstamp,
|
|
|
|
-+ rs->prior_mstamp); /* ack phase */
|
|
|
|
-+ rs->interval_us = max(snd_us, ack_us);
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-03-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-snapshot-packets-in-flight-at-transmi.patch b/target/linux/generic/hack-6.6/601-03-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-snapshot-packets-in-flight-at-transmi.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..f0e4e9b392ebeb
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-03-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-snapshot-packets-in-flight-at-transmi.patch
|
|
|
|
-@@ -0,0 +1,108 @@
|
|
|
|
-+From de4ff1045b5dec6998549c1a1d7d5eae3500360a Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Sat, 5 Aug 2017 11:49:50 -0400
|
|
|
|
-+Subject: [PATCH 03/19] net-tcp_bbr: v2: snapshot packets in flight at transmit
|
|
|
|
-+ time and pass in rate_sample
|
|
|
|
-+
|
|
|
|
-+CC algorithms may want to snapshot the number of packets in flight at
|
|
|
|
-+transmit time and pass in rate_sample, to understand the relationship
|
|
|
|
-+between inflight and losses or ECN signals, to try to find the highest
|
|
|
|
-+inflight value that has acceptable levels of loss/ECN marking.
|
|
|
|
-+
|
|
|
|
-+We split out the code to set an skb's tx.in_flight field into its own
|
|
|
|
-+function, so that this code can be used for the TCP_REPAIR "fake send"
|
|
|
|
-+code path that inserts skbs into the rtx queue without sending them.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: b3eb4f2d20efab4ca001f32c9294739036c493ea
|
|
|
|
-+Origin-9xx-SHA1: e880fc907d06ea7354333f60f712748ebce9497b
|
|
|
|
-+Origin-9xx-SHA1: 330f825a08a6fe92cef74d799cc468864c479f63
|
|
|
|
-+Change-Id: I7314047d0ff14dd261a04b1969a46dc658c8836a
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 6 ++++++
|
|
|
|
-+ net/ipv4/tcp_output.c | 1 +
|
|
|
|
-+ net/ipv4/tcp_rate.c | 20 ++++++++++++++++++++
|
|
|
|
-+ 3 files changed, 27 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -905,6 +905,10 @@ struct tcp_skb_cb {
|
|
|
|
-+ u32 first_tx_mstamp;
|
|
|
|
-+ /* when we reached the "delivered" count */
|
|
|
|
-+ u32 delivered_mstamp;
|
|
|
|
-++#define TCPCB_IN_FLIGHT_BITS 20
|
|
|
|
-++#define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
|
|
|
|
-++ u32 in_flight:20, /* packets in flight at transmit */
|
|
|
|
-++ unused2:12;
|
|
|
|
-+ } tx; /* only used for outgoing skbs */
|
|
|
|
-+ union {
|
|
|
|
-+ struct inet_skb_parm h4;
|
|
|
|
-+@@ -1052,6 +1056,7 @@ struct rate_sample {
|
|
|
|
-+ u64 prior_mstamp; /* starting timestamp for interval */
|
|
|
|
-+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
|
|
|
|
-+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
|
|
|
|
-++ u32 tx_in_flight; /* packets in flight at starting timestamp */
|
|
|
|
-+ s32 delivered; /* number of packets delivered over interval */
|
|
|
|
-+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
|
|
|
|
-+ long interval_us; /* time for tp->delivered to incr "delivered" */
|
|
|
|
-+@@ -1174,6 +1179,7 @@ static inline void tcp_ca_event(struct s
|
|
|
|
-+ void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
|
|
|
|
-+
|
|
|
|
-+ /* From tcp_rate.c */
|
|
|
|
-++void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb);
|
|
|
|
-+ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
|
|
|
|
-+ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
|
|
|
-+ struct rate_sample *rs);
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -2701,6 +2701,7 @@ static bool tcp_write_xmit(struct sock *
|
|
|
|
-+ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
|
|
|
|
-+ list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
|
|
|
|
-+ tcp_init_tso_segs(skb, mss_now);
|
|
|
|
-++ tcp_set_tx_in_flight(sk, skb);
|
|
|
|
-+ goto repair; /* Skip network transmission */
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+--- a/net/ipv4/tcp_rate.c
|
|
|
|
-++++ b/net/ipv4/tcp_rate.c
|
|
|
|
-+@@ -34,6 +34,24 @@
|
|
|
|
-+ * ready to send in the write queue.
|
|
|
|
-+ */
|
|
|
|
-+
|
|
|
|
-++void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ u32 in_flight;
|
|
|
|
-++
|
|
|
|
-++ /* Check, sanitize, and record packets in flight after skb was sent. */
|
|
|
|
-++ in_flight = tcp_packets_in_flight(tp) + tcp_skb_pcount(skb);
|
|
|
|
-++ if (WARN_ONCE(in_flight > TCPCB_IN_FLIGHT_MAX,
|
|
|
|
-++ "insane in_flight %u cc %s mss %u "
|
|
|
|
-++ "cwnd %u pif %u %u %u %u\n",
|
|
|
|
-++ in_flight, inet_csk(sk)->icsk_ca_ops->name,
|
|
|
|
-++ tp->mss_cache, tp->snd_cwnd,
|
|
|
|
-++ tp->packets_out, tp->retrans_out,
|
|
|
|
-++ tp->sacked_out, tp->lost_out))
|
|
|
|
-++ in_flight = TCPCB_IN_FLIGHT_MAX;
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight = in_flight;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ /* Snapshot the current delivery information in the skb, to generate
|
|
|
|
-+ * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
|
|
|
|
-+ */
|
|
|
|
-+@@ -67,6 +85,7 @@ void tcp_rate_skb_sent(struct sock *sk,
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
|
|
|
|
-++ tcp_set_tx_in_flight(sk, skb);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
|
|
|
|
-+@@ -96,6 +115,7 @@ void tcp_rate_skb_delivered(struct sock
|
|
|
|
-+ rs->prior_mstamp = scb->tx.delivered_mstamp;
|
|
|
|
-+ rs->is_app_limited = scb->tx.is_app_limited;
|
|
|
|
-+ rs->is_retrans = scb->sacked & TCPCB_RETRANS;
|
|
|
|
-++ rs->tx_in_flight = scb->tx.in_flight;
|
|
|
|
-+ rs->last_end_seq = scb->end_seq;
|
|
|
|
-+
|
|
|
|
-+ /* Record send time of most recently ACKed packet: */
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-04-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-count-packets-lost-over-TCP-rate-samp.patch b/target/linux/generic/hack-6.6/601-04-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-count-packets-lost-over-TCP-rate-samp.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..39c037e6544b78
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-04-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-count-packets-lost-over-TCP-rate-samp.patch
|
|
|
|
-@@ -0,0 +1,69 @@
|
|
|
|
-+From f370a83a7972e08799316b0de5fb67b63b14c5e4 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Thu, 12 Oct 2017 23:44:27 -0400
|
|
|
|
-+Subject: [PATCH 04/19] net-tcp_bbr: v2: count packets lost over TCP rate
|
|
|
|
-+ sampling interval
|
|
|
|
-+
|
|
|
|
-+For understanding the relationship between inflight and packet loss
|
|
|
|
-+signals, to try to find the highest inflight value that has acceptable
|
|
|
|
-+levels of packet losses.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 4527e26b2bd7756a88b5b9ef1ada3da33dd609ab
|
|
|
|
-+Change-Id: I594c2500868d9c530770e7ddd68ffc87c57f4fd5
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 5 ++++-
|
|
|
|
-+ net/ipv4/tcp_rate.c | 3 +++
|
|
|
|
-+ 2 files changed, 7 insertions(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -909,6 +909,7 @@ struct tcp_skb_cb {
|
|
|
|
-+ #define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
|
|
|
|
-+ u32 in_flight:20, /* packets in flight at transmit */
|
|
|
|
-+ unused2:12;
|
|
|
|
-++ u32 lost; /* packets lost so far upon tx of skb */
|
|
|
|
-+ } tx; /* only used for outgoing skbs */
|
|
|
|
-+ union {
|
|
|
|
-+ struct inet_skb_parm h4;
|
|
|
|
-+@@ -1054,11 +1055,13 @@ struct ack_sample {
|
|
|
|
-+ */
|
|
|
|
-+ struct rate_sample {
|
|
|
|
-+ u64 prior_mstamp; /* starting timestamp for interval */
|
|
|
|
-++ u32 prior_lost; /* tp->lost at "prior_mstamp" */
|
|
|
|
-+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
|
|
|
|
-+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
|
|
|
|
-+ u32 tx_in_flight; /* packets in flight at starting timestamp */
|
|
|
|
-++ s32 lost; /* number of packets lost over interval */
|
|
|
|
-+ s32 delivered; /* number of packets delivered over interval */
|
|
|
|
-+- s32 delivered_ce; /* number of packets delivered w/ CE marks*/
|
|
|
|
-++ s32 delivered_ce; /* packets delivered w/ CE mark over interval */
|
|
|
|
-+ long interval_us; /* time for tp->delivered to incr "delivered" */
|
|
|
|
-+ u32 snd_interval_us; /* snd interval for delivered packets */
|
|
|
|
-+ u32 rcv_interval_us; /* rcv interval for delivered packets */
|
|
|
|
-+--- a/net/ipv4/tcp_rate.c
|
|
|
|
-++++ b/net/ipv4/tcp_rate.c
|
|
|
|
-+@@ -84,6 +84,7 @@ void tcp_rate_skb_sent(struct sock *sk,
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.lost = tp->lost;
|
|
|
|
-+ TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
|
|
|
|
-+ tcp_set_tx_in_flight(sk, skb);
|
|
|
|
-+ }
|
|
|
|
-+@@ -110,6 +111,7 @@ void tcp_rate_skb_delivered(struct sock
|
|
|
|
-+ if (!rs->prior_delivered ||
|
|
|
|
-+ tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
|
|
|
|
-+ scb->end_seq, rs->last_end_seq)) {
|
|
|
|
-++ rs->prior_lost = scb->tx.lost;
|
|
|
|
-+ rs->prior_delivered_ce = scb->tx.delivered_ce;
|
|
|
|
-+ rs->prior_delivered = scb->tx.delivered;
|
|
|
|
-+ rs->prior_mstamp = scb->tx.delivered_mstamp;
|
|
|
|
-+@@ -165,6 +167,7 @@ void tcp_rate_gen(struct sock *sk, u32 d
|
|
|
|
-+ return;
|
|
|
|
-+ }
|
|
|
|
-+ rs->delivered = tp->delivered - rs->prior_delivered;
|
|
|
|
-++ rs->lost = tp->lost - rs->prior_lost;
|
|
|
|
-+
|
|
|
|
-+ rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
|
|
|
|
-+ /* delivered_ce occupies less than 32 bits in the skb control block */
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-05-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-export-FLAG_ECE-in-rate_sample.is_ece.patch b/target/linux/generic/hack-6.6/601-05-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-export-FLAG_ECE-in-rate_sample.is_ece.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..066f276d93fd64
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-05-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-export-FLAG_ECE-in-rate_sample.is_ece.patch
|
|
|
|
-@@ -0,0 +1,37 @@
|
|
|
|
-+From 39cad6f5ef38be886ef73766b8e01ab80a2ee6c6 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Mon, 19 Nov 2018 13:48:36 -0500
|
|
|
|
-+Subject: [PATCH 05/19] net-tcp_bbr: v2: export FLAG_ECE in rate_sample.is_ece
|
|
|
|
-+
|
|
|
|
-+For understanding the relationship between inflight and ECN signals,
|
|
|
|
-+to try to find the highest inflight value that has acceptable levels
|
|
|
|
-+ECN marking.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 3eba998f2898541406c2666781182200934965a8
|
|
|
|
-+Change-Id: I3a964e04cee83e11649a54507043d2dfe769a3b3
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 1 +
|
|
|
|
-+ net/ipv4/tcp_input.c | 1 +
|
|
|
|
-+ 2 files changed, 2 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1073,6 +1073,7 @@ struct rate_sample {
|
|
|
|
-+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
|
|
|
|
-+ bool is_retrans; /* is sample from retransmission? */
|
|
|
|
-+ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
|
|
|
|
-++ bool is_ece; /* did this ACK have ECN marked? */
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ struct tcp_congestion_ops {
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -4014,6 +4014,7 @@ static int tcp_ack(struct sock *sk, cons
|
|
|
|
-+ delivered = tcp_newly_delivered(sk, delivered, flag);
|
|
|
|
-+ lost = tp->lost - lost; /* freshly marked lost */
|
|
|
|
-+ rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
|
|
|
|
-++ rs.is_ece = !!(flag & FLAG_ECE);
|
|
|
|
-+ tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
|
|
|
|
-+ tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
|
|
|
|
-+ tcp_xmit_recovery(sk, rexmit);
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-06-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-ca_ops-skb_marked_lost-CC-m.patch b/target/linux/generic/hack-6.6/601-06-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-ca_ops-skb_marked_lost-CC-m.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..c1c3a706a01500
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-06-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-ca_ops-skb_marked_lost-CC-m.patch
|
|
|
|
-@@ -0,0 +1,56 @@
|
|
|
|
-+From 0dfc73a322b79b9bcc49ff662055051e2016e8d5 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Tue, 7 Aug 2018 21:52:06 -0400
|
|
|
|
-+Subject: [PATCH 06/19] net-tcp_bbr: v2: introduce ca_ops->skb_marked_lost() CC
|
|
|
|
-+ module callback API
|
|
|
|
-+
|
|
|
|
-+For connections experiencing reordering, RACK can mark packets lost
|
|
|
|
-+long after we receive the SACKs/ACKs hinting that the packets were
|
|
|
|
-+actually lost.
|
|
|
|
-+
|
|
|
|
-+This means that CC modules cannot easily learn the volume of inflight
|
|
|
|
-+data at which packet loss happens by looking at the current inflight
|
|
|
|
-+or even the packets in flight when the most recently SACKed packet was
|
|
|
|
-+sent. To learn this, CC modules need to know how many packets were in
|
|
|
|
-+flight at the time lost packets were sent. This new callback, combined
|
|
|
|
-+with TCP_SKB_CB(skb)->tx.in_flight, allows them to learn this.
|
|
|
|
-+
|
|
|
|
-+This also provides a consistent callback that is invoked whether
|
|
|
|
-+packets are marked lost upon ACK processing, using the RACK reordering
|
|
|
|
-+timer, or at RTO time.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: afcbebe3374e4632ac6714d39e4dc8a8455956f4
|
|
|
|
-+Change-Id: I54826ab53df636be537e5d3c618a46145d12d51a
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 3 +++
|
|
|
|
-+ net/ipv4/tcp_input.c | 5 +++++
|
|
|
|
-+ 2 files changed, 8 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1100,6 +1100,9 @@ struct tcp_congestion_ops {
|
|
|
|
-+ /* override sysctl_tcp_min_tso_segs */
|
|
|
|
-+ u32 (*min_tso_segs)(struct sock *sk);
|
|
|
|
-+
|
|
|
|
-++ /* react to a specific lost skb (optional) */
|
|
|
|
-++ void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
|
|
|
|
-++
|
|
|
|
-+ /* call when packets are delivered to update cwnd and pacing rate,
|
|
|
|
-+ * after all the ca_state processing. (optional)
|
|
|
|
-+ */
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -1103,7 +1103,12 @@ static void tcp_verify_retransmit_hint(s
|
|
|
|
-+ */
|
|
|
|
-+ static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
|
|
|
|
-+ {
|
|
|
|
-++ struct sock *sk = (struct sock *)tp;
|
|
|
|
-++ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
|
|
|
-++
|
|
|
|
-+ tp->lost += tcp_skb_pcount(skb);
|
|
|
|
-++ if (ca_ops->skb_marked_lost)
|
|
|
|
-++ ca_ops->skb_marked_lost(sk, skb);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-07-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-merge-in.patch b/target/linux/generic/hack-6.6/601-07-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-merge-in.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..086a6e6d07aad2
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-07-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-merge-in.patch
|
|
|
|
-@@ -0,0 +1,58 @@
|
|
|
|
-+From 558995126fb090c0428904cde7fa70fdd30027be Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Wed, 1 May 2019 20:16:33 -0400
|
|
|
|
-+Subject: [PATCH 07/19] net-tcp_bbr: v2: adjust skb tx.in_flight upon merge in
|
|
|
|
-+ tcp_shifted_skb()
|
|
|
|
-+
|
|
|
|
-+When tcp_shifted_skb() updates state as adjacent SACKed skbs are
|
|
|
|
-+coalesced, previously the tx.in_flight was not adjusted, so we could
|
|
|
|
-+get contradictory state where the skb's recorded pcount was bigger
|
|
|
|
-+than the tx.in_flight (the number of segments that were in_flight
|
|
|
|
-+after sending the skb).
|
|
|
|
-+
|
|
|
|
-+Normally have a SACKed skb with contradictory pcount/tx.in_flight
|
|
|
|
-+would not matter. However, with SACK reneging, the SACKed bit is
|
|
|
|
-+removed, and an skb once again becomes eligible for retransmitting,
|
|
|
|
-+fragmenting, SACKing, etc. Packetdrill testing verified the following
|
|
|
|
-+sequence is possible in a kernel that does not have this commit:
|
|
|
|
-+
|
|
|
|
-+ - skb N is SACKed
|
|
|
|
-+ - skb N+1 is SACKed and combined with skb N using tcp_shifted_skb()
|
|
|
|
-+ - tcp_shifted_skb() will increase the pcount of prev,
|
|
|
|
-+ but leave tx.in_flight as-is
|
|
|
|
-+ - so prev skb can have pcount > tx.in_flight
|
|
|
|
-+ - RTO, tcp_timeout_mark_lost(), detect reneg,
|
|
|
|
-+ remove "SACKed" bit, mark skb N as lost
|
|
|
|
-+ - find pcount of skb N is greater than its tx.in_flight
|
|
|
|
-+
|
|
|
|
-+I suspect this issue iw what caused the bbr2_inflight_hi_from_lost_skb():
|
|
|
|
-+ WARN_ON_ONCE(inflight_prev < 0)
|
|
|
|
-+to fire in production machines using bbr2.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 1a3e997e613d2dcf32b947992882854ebe873715
|
|
|
|
-+Change-Id: I1b0b75c27519953430c7db51c6f358f104c7af55
|
|
|
|
-+---
|
|
|
|
-+ net/ipv4/tcp_input.c | 11 +++++++++++
|
|
|
|
-+ 1 file changed, 11 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -1489,6 +1489,17 @@ static bool tcp_shifted_skb(struct sock
|
|
|
|
-+ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
|
|
|
|
-+ tcp_skb_pcount_add(skb, -pcount);
|
|
|
|
-+
|
|
|
|
-++ /* Adjust tx.in_flight as pcount is shifted from skb to prev. */
|
|
|
|
-++ if (WARN_ONCE(TCP_SKB_CB(skb)->tx.in_flight < pcount,
|
|
|
|
-++ "prev in_flight: %u skb in_flight: %u pcount: %u",
|
|
|
|
-++ TCP_SKB_CB(prev)->tx.in_flight,
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight,
|
|
|
|
-++ pcount))
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight = 0;
|
|
|
|
-++ else
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight -= pcount;
|
|
|
|
-++ TCP_SKB_CB(prev)->tx.in_flight += pcount;
|
|
|
|
-++
|
|
|
|
-+ /* When we're adding to gso_segs == 1, gso_size will be zero,
|
|
|
|
-+ * in theory this shouldn't be necessary but as long as DSACK
|
|
|
|
-+ * code can come after this skb later on it's better to keep
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-08-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-split-in.patch b/target/linux/generic/hack-6.6/601-08-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-split-in.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..db05aa94cedf75
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-08-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-adjust-skb-tx.in_flight-upon-split-in.patch
|
|
|
|
-@@ -0,0 +1,96 @@
|
|
|
|
-+From cc06e420c17a12db64fd8c460d00cc60d5f5106f Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Wed, 1 May 2019 20:16:25 -0400
|
|
|
|
-+Subject: [PATCH 08/19] net-tcp_bbr: v2: adjust skb tx.in_flight upon split in
|
|
|
|
-+ tcp_fragment()
|
|
|
|
-+
|
|
|
|
-+When we fragment an skb that has already been sent, we need to update
|
|
|
|
-+the tx.in_flight for the first skb in the resulting pair ("buff").
|
|
|
|
-+
|
|
|
|
-+Because we were not updating the tx.in_flight, the tx.in_flight value
|
|
|
|
-+was inconsistent with the pcount of the "buff" skb (tx.in_flight would
|
|
|
|
-+be too high). That meant that if the "buff" skb was lost, then
|
|
|
|
-+bbr2_inflight_hi_from_lost_skb() would calculate an inflight_hi value
|
|
|
|
-+that is too high. This could result in longer queues and higher packet
|
|
|
|
-+loss.
|
|
|
|
-+
|
|
|
|
-+Packetdrill testing verified that without this commit, when the second
|
|
|
|
-+half of an skb is SACKed and then later the first half of that skb is
|
|
|
|
-+marked lost, the calculated inflight_hi was incorrect.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 385f1ddc610798fab2837f9f372857438b25f874
|
|
|
|
-+Origin-9xx-SHA1: a0eb099690af net-tcp_bbr: v2: fix tcp_fragment() tx.in_flight recomputation [prod feb 8 2021; use as a fixup]
|
|
|
|
-+Origin-9xx-SHA1: 885503228153ff0c9114e net-tcp_bbr: v2: introduce tcp_skb_tx_in_flight_is_suspicious() helper for warnings
|
|
|
|
-+Change-Id: I617f8cab4e9be7a0b8e8d30b047bf8645393354d
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 15 +++++++++++++++
|
|
|
|
-+ net/ipv4/tcp_output.c | 26 +++++++++++++++++++++++++-
|
|
|
|
-+ 2 files changed, 40 insertions(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1199,6 +1199,21 @@ static inline bool tcp_skb_sent_after(u6
|
|
|
|
-+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++/* If a retransmit failed due to local qdisc congestion or other local issues,
|
|
|
|
-++ * then we may have called tcp_set_skb_tso_segs() to increase the number of
|
|
|
|
-++ * segments in the skb without increasing the tx.in_flight. In all other cases,
|
|
|
|
-++ * the tx.in_flight should be at least as big as the pcount of the sk_buff. We
|
|
|
|
-++ * do not have the state to know whether a retransmit failed due to local qdisc
|
|
|
|
-++ * congestion or other local issues, so to avoid spurious warnings we consider
|
|
|
|
-++ * that any skb marked lost may have suffered that fate.
|
|
|
|
-++ */
|
|
|
|
-++static inline bool tcp_skb_tx_in_flight_is_suspicious(u32 skb_pcount,
|
|
|
|
-++ u32 skb_sacked_flags,
|
|
|
|
-++ u32 tx_in_flight)
|
|
|
|
-++{
|
|
|
|
-++ return (skb_pcount > tx_in_flight) && !(skb_sacked_flags & TCPCB_LOST);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ /* These functions determine how the current flow behaves in respect of SACK
|
|
|
|
-+ * handling. SACK is negotiated with the peer, and therefore it can vary
|
|
|
|
-+ * between different flows.
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -1548,7 +1548,7 @@ int tcp_fragment(struct sock *sk, enum t
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct sk_buff *buff;
|
|
|
|
-+- int old_factor;
|
|
|
|
-++ int old_factor, inflight_prev;
|
|
|
|
-+ long limit;
|
|
|
|
-+ int nlen;
|
|
|
|
-+ u8 flags;
|
|
|
|
-+@@ -1623,6 +1623,30 @@ int tcp_fragment(struct sock *sk, enum t
|
|
|
|
-+
|
|
|
|
-+ if (diff)
|
|
|
|
-+ tcp_adjust_pcount(sk, skb, diff);
|
|
|
|
-++
|
|
|
|
-++ inflight_prev = TCP_SKB_CB(skb)->tx.in_flight - old_factor;
|
|
|
|
-++ if (inflight_prev < 0) {
|
|
|
|
-++ WARN_ONCE(tcp_skb_tx_in_flight_is_suspicious(
|
|
|
|
-++ old_factor,
|
|
|
|
-++ TCP_SKB_CB(skb)->sacked,
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight),
|
|
|
|
-++ "inconsistent: tx.in_flight: %u "
|
|
|
|
-++ "old_factor: %d mss: %u sacked: %u "
|
|
|
|
-++ "1st pcount: %d 2nd pcount: %d "
|
|
|
|
-++ "1st len: %u 2nd len: %u ",
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight, old_factor,
|
|
|
|
-++ mss_now, TCP_SKB_CB(skb)->sacked,
|
|
|
|
-++ tcp_skb_pcount(skb), tcp_skb_pcount(buff),
|
|
|
|
-++ skb->len, buff->len);
|
|
|
|
-++ inflight_prev = 0;
|
|
|
|
-++ }
|
|
|
|
-++ /* Set 1st tx.in_flight as if 1st were sent by itself: */
|
|
|
|
-++ TCP_SKB_CB(skb)->tx.in_flight = inflight_prev +
|
|
|
|
-++ tcp_skb_pcount(skb);
|
|
|
|
-++ /* Set 2nd tx.in_flight with new 1st and 2nd pcounts: */
|
|
|
|
-++ TCP_SKB_CB(buff)->tx.in_flight = inflight_prev +
|
|
|
|
-++ tcp_skb_pcount(skb) +
|
|
|
|
-++ tcp_skb_pcount(buff);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Link BUFF into the send queue. */
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-09-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-new-ca-opts-flag-TCP_CONG_WANTS_CE_EVENT.patch b/target/linux/generic/hack-6.6/601-09-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-new-ca-opts-flag-TCP_CONG_WANTS_CE_EVENT.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..97b7b6d4f4d960
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-09-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-new-ca-opts-flag-TCP_CONG_WANTS_CE_EVENT.patch
|
|
|
|
-@@ -0,0 +1,72 @@
|
|
|
|
-+From f9303263a3dedd48b477b226e15ebe171ff5ed09 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Yousuk Seung <[email protected]>
|
|
|
|
-+Date: Wed, 23 May 2018 17:55:54 -0700
|
|
|
|
-+Subject: [PATCH 09/19] net-tcp: add new ca opts flag TCP_CONG_WANTS_CE_EVENTS
|
|
|
|
-+
|
|
|
|
-+Add a a new ca opts flag TCP_CONG_WANTS_CE_EVENTS that allows a
|
|
|
|
-+congestion control module to receive CE events.
|
|
|
|
-+
|
|
|
|
-+Currently congestion control modules have to set the TCP_CONG_NEEDS_ECN
|
|
|
|
-+bit in opts flag to receive CE events but this may incur changes in ECN
|
|
|
|
-+behavior elsewhere. This patch adds a new bit TCP_CONG_WANTS_CE_EVENTS
|
|
|
|
-+that allows congestion control modules to receive CE events
|
|
|
|
-+independently of TCP_CONG_NEEDS_ECN.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp
|
|
|
|
-+Origin-9xx-SHA1: 9f7e14716cde760bc6c67ef8ef7e1ee48501d95b
|
|
|
|
-+Change-Id: I2255506985242f376d910c6fd37daabaf4744f24
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 14 +++++++++++++-
|
|
|
|
-+ net/ipv4/tcp_input.c | 4 ++--
|
|
|
|
-+ 2 files changed, 15 insertions(+), 3 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1035,7 +1035,11 @@ enum tcp_ca_ack_event_flags {
|
|
|
|
-+ #define TCP_CONG_NON_RESTRICTED 0x1
|
|
|
|
-+ /* Requires ECN/ECT set on all packets */
|
|
|
|
-+ #define TCP_CONG_NEEDS_ECN 0x2
|
|
|
|
-+-#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
|
|
|
|
-++/* Wants notification of CE events (CA_EVENT_ECN_IS_CE, CA_EVENT_ECN_NO_CE). */
|
|
|
|
-++#define TCP_CONG_WANTS_CE_EVENTS 0x4
|
|
|
|
-++#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | \
|
|
|
|
-++ TCP_CONG_NEEDS_ECN | \
|
|
|
|
-++ TCP_CONG_WANTS_CE_EVENTS)
|
|
|
|
-+
|
|
|
|
-+ union tcp_cc_info;
|
|
|
|
-+
|
|
|
|
-+@@ -1167,6 +1171,14 @@ static inline char *tcp_ca_get_name_by_k
|
|
|
|
-+ }
|
|
|
|
-+ #endif
|
|
|
|
-+
|
|
|
|
-++static inline bool tcp_ca_wants_ce_events(const struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
-++
|
|
|
|
-++ return icsk->icsk_ca_ops->flags & (TCP_CONG_NEEDS_ECN |
|
|
|
|
-++ TCP_CONG_WANTS_CE_EVENTS);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+ const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -376,7 +376,7 @@ static void __tcp_ecn_check_ce(struct so
|
|
|
|
-+ tcp_enter_quickack_mode(sk, 2);
|
|
|
|
-+ break;
|
|
|
|
-+ case INET_ECN_CE:
|
|
|
|
-+- if (tcp_ca_needs_ecn(sk))
|
|
|
|
-++ if (tcp_ca_wants_ce_events(sk))
|
|
|
|
-+ tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
|
|
|
|
-+
|
|
|
|
-+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
|
|
|
|
-+@@ -387,7 +387,7 @@ static void __tcp_ecn_check_ce(struct so
|
|
|
|
-+ tp->ecn_flags |= TCP_ECN_SEEN;
|
|
|
|
-+ break;
|
|
|
|
-+ default:
|
|
|
|
-+- if (tcp_ca_needs_ecn(sk))
|
|
|
|
-++ if (tcp_ca_wants_ce_events(sk))
|
|
|
|
-+ tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
|
|
|
|
-+ tp->ecn_flags |= TCP_ECN_SEEN;
|
|
|
|
-+ break;
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-10-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-re-generalize-TSO-sizing-in-TCP-CC-module-AP.patch b/target/linux/generic/hack-6.6/601-10-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-re-generalize-TSO-sizing-in-TCP-CC-module-AP.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..3675602dcd5ea5
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-10-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-re-generalize-TSO-sizing-in-TCP-CC-module-AP.patch
|
|
|
|
-@@ -0,0 +1,117 @@
|
|
|
|
-+From eb5151a8c0aba2b3cffc6fe370c7594f9bf39164 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Fri, 27 Sep 2019 17:10:26 -0400
|
|
|
|
-+Subject: [PATCH 10/19] net-tcp: re-generalize TSO sizing in TCP CC module API
|
|
|
|
-+
|
|
|
|
-+Reorganize the API for CC modules so that the CC module once again
|
|
|
|
-+gets complete control of the TSO sizing decision. This is how the API
|
|
|
|
-+was set up around 2016 and the initial BBRv1 upstreaming. Later Eric
|
|
|
|
-+Dumazet simplified it. But with wider testing it now seems that to
|
|
|
|
-+avoid CPU regressions BBR needs to have a different TSO sizing
|
|
|
|
-+function.
|
|
|
|
-+
|
|
|
|
-+This is necessary to handle cases where there are many flows
|
|
|
|
-+bottlenecked on the sender host's NIC, in which case BBR's pacing rate
|
|
|
|
-+is much lower than CUBIC/Reno/DCTCP's. Why does this happen? Because
|
|
|
|
-+BBR's pacing rate adapts to the low bandwidth share each flow sees. By
|
|
|
|
-+contrast, CUBIC/Reno/DCTCP see no loss or ECN, so they grow a very
|
|
|
|
-+large cwnd, and thus large pacing rate and large TSO burst size.
|
|
|
|
-+
|
|
|
|
-+Change-Id: Ic8ccfdbe4010ee8d4bf6a6334c48a2fceb2171ea
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 4 ++--
|
|
|
|
-+ net/ipv4/tcp_bbr.c | 37 ++++++++++++++++++++++++++-----------
|
|
|
|
-+ net/ipv4/tcp_output.c | 11 +++++------
|
|
|
|
-+ 3 files changed, 33 insertions(+), 19 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1101,8 +1101,8 @@ struct tcp_congestion_ops {
|
|
|
|
-+ /* hook for packet ack accounting (optional) */
|
|
|
|
-+ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
|
|
|
|
-+
|
|
|
|
-+- /* override sysctl_tcp_min_tso_segs */
|
|
|
|
-+- u32 (*min_tso_segs)(struct sock *sk);
|
|
|
|
-++ /* pick target number of segments per TSO/GSO skb (optional): */
|
|
|
|
-++ u32 (*tso_segs)(struct sock *sk, unsigned int mss_now);
|
|
|
|
-+
|
|
|
|
-+ /* react to a specific lost skb (optional) */
|
|
|
|
-+ void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
|
|
|
|
-+--- a/net/ipv4/tcp_bbr.c
|
|
|
|
-++++ b/net/ipv4/tcp_bbr.c
|
|
|
|
-+@@ -300,20 +300,35 @@ __bpf_kfunc static u32 bbr_min_tso_segs(
|
|
|
|
-+ return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++/* Return the number of segments BBR would like in a TSO/GSO skb, given
|
|
|
|
-++ * a particular max gso size as a constraint.
|
|
|
|
-++ */
|
|
|
|
-++static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
|
|
|
|
-++ u32 gso_max_size)
|
|
|
|
-++{
|
|
|
|
-++ u32 segs;
|
|
|
|
-++ u64 bytes;
|
|
|
|
-++
|
|
|
|
-++ /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
|
|
|
|
-++ bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
|
|
|
|
-++
|
|
|
|
-++ bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
|
|
|
|
-++ segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
|
|
|
|
-++ return segs;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
|
|
|
|
-++static u32 bbr_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
|
|
-++{
|
|
|
|
-++ return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
|
|
|
|
-+ static u32 bbr_tso_segs_goal(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- u32 segs, bytes;
|
|
|
|
-+-
|
|
|
|
-+- /* Sort of tcp_tso_autosize() but ignoring
|
|
|
|
-+- * driver provided sk_gso_max_size.
|
|
|
|
-+- */
|
|
|
|
-+- bytes = min_t(unsigned long,
|
|
|
|
-+- sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
|
|
|
|
-+- GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
|
|
|
|
-+- segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
|
|
|
|
-+
|
|
|
|
-+- return min(segs, 0x7FU);
|
|
|
|
-++ return bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
|
|
|
|
-+@@ -1149,7 +1164,7 @@ static struct tcp_congestion_ops tcp_bbr
|
|
|
|
-+ .undo_cwnd = bbr_undo_cwnd,
|
|
|
|
-+ .cwnd_event = bbr_cwnd_event,
|
|
|
|
-+ .ssthresh = bbr_ssthresh,
|
|
|
|
-+- .min_tso_segs = bbr_min_tso_segs,
|
|
|
|
-++ .tso_segs = bbr_tso_segs,
|
|
|
|
-+ .get_info = bbr_get_info,
|
|
|
|
-+ .set_state = bbr_set_state,
|
|
|
|
-+ };
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -2022,13 +2022,12 @@ static u32 tcp_tso_autosize(const struct
|
|
|
|
-+ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
|
|
-+ {
|
|
|
|
-+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
|
|
|
-+- u32 min_tso, tso_segs;
|
|
|
|
-++ u32 tso_segs;
|
|
|
|
-+
|
|
|
|
-+- min_tso = ca_ops->min_tso_segs ?
|
|
|
|
-+- ca_ops->min_tso_segs(sk) :
|
|
|
|
-+- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
|
|
|
-+-
|
|
|
|
-+- tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
|
|
|
|
-++ tso_segs = ca_ops->tso_segs ?
|
|
|
|
-++ ca_ops->tso_segs(sk, mss_now) :
|
|
|
|
-++ tcp_tso_autosize(sk, mss_now,
|
|
|
|
-++ sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
|
|
|
-+ return min_t(u32, tso_segs, sk->sk_gso_max_segs);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-11-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-fast_ack_mode-1-skip-rwin-check-in-tcp_f.patch b/target/linux/generic/hack-6.6/601-11-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-fast_ack_mode-1-skip-rwin-check-in-tcp_f.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..9f98ca87a03acf
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-11-bbr-v3-upstream-prep-2024-02-19-01-net-tcp-add-fast_ack_mode-1-skip-rwin-check-in-tcp_f.patch
|
|
|
|
-@@ -0,0 +1,71 @@
|
|
|
|
-+From 9a61c63fcbda3770a3d582069b3f20d5d02000fa Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Sat, 16 Nov 2019 13:16:25 -0500
|
|
|
|
-+Subject: [PATCH 11/19] net-tcp: add fast_ack_mode=1: skip rwin check in
|
|
|
|
-+ tcp_fast_ack_mode__tcp_ack_snd_check()
|
|
|
|
-+
|
|
|
|
-+Add logic for an experimental TCP connection behavior, enabled with
|
|
|
|
-+tp->fast_ack_mode = 1, which disables checking the receive window
|
|
|
|
-+before sending an ack in __tcp_ack_snd_check(). If this behavior is
|
|
|
|
-+enabled, the data receiver sends an ACK if the amount of data is >
|
|
|
|
-+RCV.MSS.
|
|
|
|
-+
|
|
|
|
-+Change-Id: Iaa0a0fd7108221f883137a79d5bfa724f1b096d4
|
|
|
|
-+---
|
|
|
|
-+ include/linux/tcp.h | 3 ++-
|
|
|
|
-+ net/ipv4/tcp.c | 1 +
|
|
|
|
-+ net/ipv4/tcp_cong.c | 1 +
|
|
|
|
-+ net/ipv4/tcp_input.c | 5 +++--
|
|
|
|
-+ 4 files changed, 7 insertions(+), 3 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/linux/tcp.h
|
|
|
|
-++++ b/include/linux/tcp.h
|
|
|
|
-+@@ -257,7 +257,8 @@ struct tcp_sock {
|
|
|
|
-+ u8 compressed_ack;
|
|
|
|
-+ u8 dup_ack_counter:2,
|
|
|
|
-+ tlp_retrans:1, /* TLP is a retransmission */
|
|
|
|
-+- unused:5;
|
|
|
|
-++ fast_ack_mode:2, /* which fast ack mode ? */
|
|
|
|
-++ unused:3;
|
|
|
|
-+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
|
|
|
|
-+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
|
|
|
|
-+ u8 chrono_type:2, /* current chronograph type */
|
|
|
|
-+--- a/net/ipv4/tcp.c
|
|
|
|
-++++ b/net/ipv4/tcp.c
|
|
|
|
-+@@ -3099,6 +3099,7 @@ int tcp_disconnect(struct sock *sk, int
|
|
|
|
-+ tp->rx_opt.dsack = 0;
|
|
|
|
-+ tp->rx_opt.num_sacks = 0;
|
|
|
|
-+ tp->rcv_ooopack = 0;
|
|
|
|
-++ tp->fast_ack_mode = 0;
|
|
|
|
-+
|
|
|
|
-+
|
|
|
|
-+ /* Clean up fastopen related fields */
|
|
|
|
-+--- a/net/ipv4/tcp_cong.c
|
|
|
|
-++++ b/net/ipv4/tcp_cong.c
|
|
|
|
-+@@ -240,6 +240,7 @@ void tcp_init_congestion_control(struct
|
|
|
|
-+ struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
-+
|
|
|
|
-+ tcp_sk(sk)->prior_ssthresh = 0;
|
|
|
|
-++ tcp_sk(sk)->fast_ack_mode = 0;
|
|
|
|
-+ if (icsk->icsk_ca_ops->init)
|
|
|
|
-+ icsk->icsk_ca_ops->init(sk);
|
|
|
|
-+ if (tcp_ca_needs_ecn(sk))
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -5662,13 +5662,14 @@ static void __tcp_ack_snd_check(struct s
|
|
|
|
-+
|
|
|
|
-+ /* More than one full frame received... */
|
|
|
|
-+ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
|
|
|
|
-++ (tp->fast_ack_mode == 1 ||
|
|
|
|
-+ /* ... and right edge of window advances far enough.
|
|
|
|
-+ * (tcp_recvmsg() will send ACK otherwise).
|
|
|
|
-+ * If application uses SO_RCVLOWAT, we want send ack now if
|
|
|
|
-+ * we have not received enough bytes to satisfy the condition.
|
|
|
|
-+ */
|
|
|
|
-+- (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
|
|
|
|
-+- __tcp_select_window(sk) >= tp->rcv_wnd)) ||
|
|
|
|
-++ (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
|
|
|
|
-++ __tcp_select_window(sk) >= tp->rcv_wnd))) ||
|
|
|
|
-+ /* We ACK each frame or... */
|
|
|
|
-+ tcp_in_quickack_mode(sk) ||
|
|
|
|
-+ /* Protocol state mandates a one-time immediate ACK */
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-12-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-record-app-limited-status-of-TLP-repa.patch b/target/linux/generic/hack-6.6/601-12-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-record-app-limited-status-of-TLP-repa.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..16550efca59cf7
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-12-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-record-app-limited-status-of-TLP-repa.patch
|
|
|
|
-@@ -0,0 +1,44 @@
|
|
|
|
-+From cec5a43a23d36dd7cfcaddb9aa40a90989017331 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Jianfeng Wang <[email protected]>
|
|
|
|
-+Date: Fri, 19 Jun 2020 17:33:45 +0000
|
|
|
|
-+Subject: [PATCH 12/19] net-tcp_bbr: v2: record app-limited status of
|
|
|
|
-+ TLP-repaired flight
|
|
|
|
-+
|
|
|
|
-+When sending a TLP retransmit, record whether the outstanding flight
|
|
|
|
-+of data is application limited. This is important for congestion
|
|
|
|
-+control modules that want to respond to losses repaired by TLP
|
|
|
|
-+retransmits. This is important because the following scenarios convey
|
|
|
|
-+very different information:
|
|
|
|
-+ (1) a packet loss with a small number of packets in flight;
|
|
|
|
-+ (2) a packet loss with the maximum amount of data in flight allowed
|
|
|
|
-+ by the CC module;
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Change-Id: Ic8ae567caa4e4bfd5fd82c3d4be12a5d9171655e
|
|
|
|
-+---
|
|
|
|
-+ include/linux/tcp.h | 3 ++-
|
|
|
|
-+ net/ipv4/tcp_output.c | 1 +
|
|
|
|
-+ 2 files changed, 3 insertions(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/linux/tcp.h
|
|
|
|
-++++ b/include/linux/tcp.h
|
|
|
|
-+@@ -258,7 +258,8 @@ struct tcp_sock {
|
|
|
|
-+ u8 dup_ack_counter:2,
|
|
|
|
-+ tlp_retrans:1, /* TLP is a retransmission */
|
|
|
|
-+ fast_ack_mode:2, /* which fast ack mode ? */
|
|
|
|
-+- unused:3;
|
|
|
|
-++ tlp_orig_data_app_limited:1, /* app-limited before TLP rtx? */
|
|
|
|
-++ unused:2;
|
|
|
|
-+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
|
|
|
|
-+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
|
|
|
|
-+ u8 chrono_type:2, /* current chronograph type */
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -2938,6 +2938,7 @@ void tcp_send_loss_probe(struct sock *sk
|
|
|
|
-+ if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
|
|
|
|
-+ goto rearm_timer;
|
|
|
|
-+
|
|
|
|
-++ tp->tlp_orig_data_app_limited = TCP_SKB_CB(skb)->tx.is_app_limited;
|
|
|
|
-+ if (__tcp_retransmit_skb(sk, skb, 1))
|
|
|
|
-+ goto rearm_timer;
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-13-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-inform-CC-module-of-losses-repaired-b.patch b/target/linux/generic/hack-6.6/601-13-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-inform-CC-module-of-losses-repaired-b.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..cca0cb6d11341f
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-13-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-inform-CC-module-of-losses-repaired-b.patch
|
|
|
|
-@@ -0,0 +1,44 @@
|
|
|
|
-+From 087d67017b72863366d5352616bb2a363e970117 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Jianfeng Wang <[email protected]>
|
|
|
|
-+Date: Tue, 16 Jun 2020 17:41:19 +0000
|
|
|
|
-+Subject: [PATCH 13/19] net-tcp_bbr: v2: inform CC module of losses repaired by
|
|
|
|
-+ TLP probe
|
|
|
|
-+
|
|
|
|
-+Before this commit, when there is a packet loss that creates a sequence
|
|
|
|
-+hole that is filled by a TLP loss probe, then tcp_process_tlp_ack()
|
|
|
|
-+only informs the congestion control (CC) module via a back-to-back entry
|
|
|
|
-+and exit of CWR. But some congestion control modules (e.g. BBR) do not
|
|
|
|
-+respond to CWR events.
|
|
|
|
-+
|
|
|
|
-+This commit adds a new CA event with which the core TCP stack notifies
|
|
|
|
-+the CC module when a loss is repaired by a TLP. This will allow CC
|
|
|
|
-+modules that do not use the CWR mechanism to have a custom handler for
|
|
|
|
-+such TLP recoveries.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Change-Id: Ieba72332b401b329bff5a641d2b2043a3fb8f632
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 1 +
|
|
|
|
-+ net/ipv4/tcp_input.c | 1 +
|
|
|
|
-+ 2 files changed, 2 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1013,6 +1013,7 @@ enum tcp_ca_event {
|
|
|
|
-+ CA_EVENT_LOSS, /* loss timeout */
|
|
|
|
-+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
|
|
|
|
-+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
|
|
|
|
-++ CA_EVENT_TLP_RECOVERY, /* a lost segment was repaired by TLP probe */
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -3813,6 +3813,7 @@ static void tcp_process_tlp_ack(struct s
|
|
|
|
-+ /* ACK advances: there was a loss, so reduce cwnd. Reset
|
|
|
|
-+ * tlp_high_seq in tcp_init_cwnd_reduction()
|
|
|
|
-+ */
|
|
|
|
-++ tcp_ca_event(sk, CA_EVENT_TLP_RECOVERY);
|
|
|
|
-+ tcp_init_cwnd_reduction(sk);
|
|
|
|
-+ tcp_set_ca_state(sk, TCP_CA_CWR);
|
|
|
|
-+ tcp_end_cwnd_reduction(sk);
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-14-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-is_acking_tlp_retrans_seq-i.patch b/target/linux/generic/hack-6.6/601-14-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-is_acking_tlp_retrans_seq-i.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..66375196cdee6a
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-14-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v2-introduce-is_acking_tlp_retrans_seq-i.patch
|
|
|
|
-@@ -0,0 +1,72 @@
|
|
|
|
-+From 0388564bd3250ae87983d2b40440f4b5fdee7853 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Mon, 21 Sep 2020 14:46:26 -0400
|
|
|
|
-+Subject: [PATCH 14/19] net-tcp_bbr: v2: introduce is_acking_tlp_retrans_seq
|
|
|
|
-+ into rate_sample
|
|
|
|
-+
|
|
|
|
-+Introduce is_acking_tlp_retrans_seq into rate_sample. This bool will
|
|
|
|
-+export to the CC module the knowledge of whether the current ACK
|
|
|
|
-+matched a TLP retransmit.
|
|
|
|
-+
|
|
|
|
-+Note that when this bool is true, we cannot yet tell (in general) whether
|
|
|
|
-+this ACK is for the original or the TLP retransmit.
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Change-Id: I2e6494332167e75efcbdc99bd5c119034e9c39b4
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 1 +
|
|
|
|
-+ net/ipv4/tcp_input.c | 12 +++++++++---
|
|
|
|
-+ 2 files changed, 10 insertions(+), 3 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -1077,6 +1077,7 @@ struct rate_sample {
|
|
|
|
-+ u32 last_end_seq; /* end_seq of most recently ACKed packet */
|
|
|
|
-+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
|
|
|
|
-+ bool is_retrans; /* is sample from retransmission? */
|
|
|
|
-++ bool is_acking_tlp_retrans_seq; /* ACKed a TLP retransmit sequence? */
|
|
|
|
-+ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
|
|
|
|
-+ bool is_ece; /* did this ACK have ECN marked? */
|
|
|
|
-+ };
|
|
|
|
-+--- a/net/ipv4/tcp_input.c
|
|
|
|
-++++ b/net/ipv4/tcp_input.c
|
|
|
|
-+@@ -3796,7 +3796,8 @@ static void tcp_replace_ts_recent(struct
|
|
|
|
-+ /* This routine deals with acks during a TLP episode and ends an episode by
|
|
|
|
-+ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
|
|
|
|
-+ */
|
|
|
|
-+-static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
|
|
|
|
-++static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag,
|
|
|
|
-++ struct rate_sample *rs)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+
|
|
|
|
-+@@ -3824,6 +3825,11 @@ static void tcp_process_tlp_ack(struct s
|
|
|
|
-+ FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
|
|
|
|
-+ /* Pure dupack: original and TLP probe arrived; no loss */
|
|
|
|
-+ tp->tlp_high_seq = 0;
|
|
|
|
-++ } else {
|
|
|
|
-++ /* This ACK matches a TLP retransmit. We cannot yet tell if
|
|
|
|
-++ * this ACK is for the original or the TLP retransmit.
|
|
|
|
-++ */
|
|
|
|
-++ rs->is_acking_tlp_retrans_seq = 1;
|
|
|
|
-+ }
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+@@ -4007,7 +4013,7 @@ static int tcp_ack(struct sock *sk, cons
|
|
|
|
-+ tcp_rack_update_reo_wnd(sk, &rs);
|
|
|
|
-+
|
|
|
|
-+ if (tp->tlp_high_seq)
|
|
|
|
-+- tcp_process_tlp_ack(sk, ack, flag);
|
|
|
|
-++ tcp_process_tlp_ack(sk, ack, flag, &rs);
|
|
|
|
-+
|
|
|
|
-+ if (tcp_ack_is_dubious(sk, flag)) {
|
|
|
|
-+ if (!(flag & (FLAG_SND_UNA_ADVANCED |
|
|
|
|
-+@@ -4051,7 +4057,7 @@ no_queue:
|
|
|
|
-+ tcp_ack_probe(sk);
|
|
|
|
-+
|
|
|
|
-+ if (tp->tlp_high_seq)
|
|
|
|
-+- tcp_process_tlp_ack(sk, ack, flag);
|
|
|
|
-++ tcp_process_tlp_ack(sk, ack, flag, &rs);
|
|
|
|
-+ return 1;
|
|
|
|
-+
|
|
|
|
-+ old_ack:
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-15-bbr-v3-upstream-prep-2024-02-19-01-tcp-introduce-per-route-feature-RTAX_FEATURE_ECN_LOW.patch b/target/linux/generic/hack-6.6/601-15-bbr-v3-upstream-prep-2024-02-19-01-tcp-introduce-per-route-feature-RTAX_FEATURE_ECN_LOW.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..ec870077a31181
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-15-bbr-v3-upstream-prep-2024-02-19-01-tcp-introduce-per-route-feature-RTAX_FEATURE_ECN_LOW.patch
|
|
|
|
-@@ -0,0 +1,108 @@
|
|
|
|
-+From a3a96f4abe4e8ae8b243ab5e490334f19fe64c7a Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: David Morley <[email protected]>
|
|
|
|
-+Date: Fri, 14 Jul 2023 11:07:56 -0400
|
|
|
|
-+Subject: [PATCH 15/19] tcp: introduce per-route feature RTAX_FEATURE_ECN_LOW
|
|
|
|
-+
|
|
|
|
-+Define and implement a new per-route feature, RTAX_FEATURE_ECN_LOW.
|
|
|
|
-+
|
|
|
|
-+This feature indicates that the given destination network is a
|
|
|
|
-+low-latency ECN environment, meaning both that ECN CE marks are
|
|
|
|
-+applied by the network using a low-latency marking threshold and also
|
|
|
|
-+that TCP endpoints provide precise per-data-segment ECN feedback in
|
|
|
|
-+ACKs (where the ACK ECE flag echoes the received CE status of all
|
|
|
|
-+newly-acknowledged data segments). This feature indication can be used
|
|
|
|
-+by congestion control algorithms to decide how to interpret ECN
|
|
|
|
-+signals over the given destination network.
|
|
|
|
-+
|
|
|
|
-+This feature is appropriate for datacenter-style ECN marking, such as
|
|
|
|
-+the ECN marking approach expected by DCTCP or BBR congestion control
|
|
|
|
-+modules.
|
|
|
|
-+
|
|
|
|
-+Signed-off-by: David Morley <[email protected]>
|
|
|
|
-+Signed-off-by: Neal Cardwell <[email protected]>
|
|
|
|
-+Signed-off-by: Yuchung Cheng <[email protected]>
|
|
|
|
-+Tested-by: David Morley <[email protected]>
|
|
|
|
-+Change-Id: I6bc06e9c6cb426fbae7243fc71c9a8c18175f5d3
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 10 ++++++++++
|
|
|
|
-+ include/uapi/linux/rtnetlink.h | 4 +++-
|
|
|
|
-+ net/ipv4/tcp_minisocks.c | 2 ++
|
|
|
|
-+ net/ipv4/tcp_output.c | 6 ++++--
|
|
|
|
-+ 4 files changed, 19 insertions(+), 3 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -372,6 +372,7 @@ static inline void tcp_dec_quickack_mode
|
|
|
|
-+ #define TCP_ECN_QUEUE_CWR 2
|
|
|
|
-+ #define TCP_ECN_DEMAND_CWR 4
|
|
|
|
-+ #define TCP_ECN_SEEN 8
|
|
|
|
-++#define TCP_ECN_LOW 16
|
|
|
|
-+
|
|
|
|
-+ enum tcp_tw_status {
|
|
|
|
-+ TCP_TW_SUCCESS = 0,
|
|
|
|
-+@@ -724,6 +725,15 @@ static inline void tcp_fast_path_check(s
|
|
|
|
-+ tcp_fast_path_on(tp);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++static inline void tcp_set_ecn_low_from_dst(struct sock *sk,
|
|
|
|
-++ const struct dst_entry *dst)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++
|
|
|
|
-++ if (dst_feature(dst, RTAX_FEATURE_ECN_LOW))
|
|
|
|
-++ tp->ecn_flags |= TCP_ECN_LOW;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ u32 tcp_delack_max(const struct sock *sk);
|
|
|
|
-+
|
|
|
|
-+ /* Compute the actual rto_min value */
|
|
|
|
-+--- a/include/uapi/linux/rtnetlink.h
|
|
|
|
-++++ b/include/uapi/linux/rtnetlink.h
|
|
|
|
-+@@ -509,9 +509,11 @@ enum {
|
|
|
|
-+ #define RTAX_FEATURE_SACK (1 << 1)
|
|
|
|
-+ #define RTAX_FEATURE_TIMESTAMP (1 << 2)
|
|
|
|
-+ #define RTAX_FEATURE_ALLFRAG (1 << 3)
|
|
|
|
-++#define RTAX_FEATURE_ECN_LOW (1 << 4)
|
|
|
|
-+
|
|
|
|
-+ #define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
|
|
|
|
-+- RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
|
|
|
|
-++ RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG \
|
|
|
|
-++ | RTAX_FEATURE_ECN_LOW)
|
|
|
|
-+
|
|
|
|
-+ struct rta_session {
|
|
|
|
-+ __u8 proto;
|
|
|
|
-+--- a/net/ipv4/tcp_minisocks.c
|
|
|
|
-++++ b/net/ipv4/tcp_minisocks.c
|
|
|
|
-+@@ -434,6 +434,8 @@ void tcp_ca_openreq_child(struct sock *s
|
|
|
|
-+ u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
|
|
|
|
-+ bool ca_got_dst = false;
|
|
|
|
-+
|
|
|
|
-++ tcp_set_ecn_low_from_dst(sk, dst);
|
|
|
|
-++
|
|
|
|
-+ if (ca_key != TCP_CA_UNSPEC) {
|
|
|
|
-+ const struct tcp_congestion_ops *ca;
|
|
|
|
-+
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -334,10 +334,9 @@ static void tcp_ecn_send_syn(struct sock
|
|
|
|
-+ bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
|
|
|
|
-+ bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
|
|
|
|
-+ tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
|
|
|
|
-++ const struct dst_entry *dst = __sk_dst_get(sk);
|
|
|
|
-+
|
|
|
|
-+ if (!use_ecn) {
|
|
|
|
-+- const struct dst_entry *dst = __sk_dst_get(sk);
|
|
|
|
-+-
|
|
|
|
-+ if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
|
|
|
|
-+ use_ecn = true;
|
|
|
|
-+ }
|
|
|
|
-+@@ -349,6 +348,9 @@ static void tcp_ecn_send_syn(struct sock
|
|
|
|
-+ tp->ecn_flags = TCP_ECN_OK;
|
|
|
|
-+ if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
|
|
|
|
-+ INET_ECN_xmit(sk);
|
|
|
|
-++
|
|
|
|
-++ if (dst)
|
|
|
|
-++ tcp_set_ecn_low_from_dst(sk, dst);
|
|
|
|
-+ }
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-16-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-update-TCP-bbr-congestion-control-mod.patch b/target/linux/generic/hack-6.6/601-16-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-update-TCP-bbr-congestion-control-mod.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..1a206c3e2f63e4
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-16-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-update-TCP-bbr-congestion-control-mod.patch
|
|
|
|
-@@ -0,0 +1,2822 @@
|
|
|
|
-+From ba71521b6189a839b1152c9e015718e3e2494614 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Tue, 11 Jun 2019 12:54:22 -0400
|
|
|
|
-+Subject: [PATCH 16/19] net-tcp_bbr: v3: update TCP "bbr" congestion control
|
|
|
|
-+ module to BBRv3
|
|
|
|
-+
|
|
|
|
-+BBR v3 is an enhacement to the BBR v1 algorithm. It's designed to aim for lower
|
|
|
|
-+queues, lower loss, and better Reno/CUBIC coexistence than BBR v1.
|
|
|
|
-+
|
|
|
|
-+BBR v3 maintains the core of BBR v1: an explicit model of the network
|
|
|
|
-+path that is two-dimensional, adapting to estimate the (a) maximum
|
|
|
|
-+available bandwidth and (b) maximum safe volume of data a flow can
|
|
|
|
-+keep in-flight in the network. It maintains the estimated BDP as a
|
|
|
|
-+core guide for estimating an appropriate level of in-flight data.
|
|
|
|
-+
|
|
|
|
-+BBR v3 makes several key enhancements:
|
|
|
|
-+
|
|
|
|
-+o Its bandwidth-probing time scale is adapted, within bounds, to allow improved
|
|
|
|
-+coexistence with Reno and CUBIC. The bandwidth-probing time scale is (a)
|
|
|
|
-+extended dynamically based on estimated BDP to improve coexistence with
|
|
|
|
-+Reno/CUBIC; (b) bounded by an interactive wall-clock time-scale to be more
|
|
|
|
-+scalable and responsive than Reno and CUBIC.
|
|
|
|
-+
|
|
|
|
-+o Rather than being largely agnostic to loss and ECN marks, it explicitly uses
|
|
|
|
-+loss and (DCTCP-style) ECN signals to maintain its model.
|
|
|
|
-+
|
|
|
|
-+o It aims for lower losses than v1 by adjusting its model to attempt to stay
|
|
|
|
-+within loss rate and ECN mark rate bounds (loss_thresh and ecn_thresh,
|
|
|
|
-+respectively).
|
|
|
|
-+
|
|
|
|
-+o It adapts to loss/ECN signals even when the application is running out of
|
|
|
|
-+data ("application-limited"), in case the "application-limited" flow is also
|
|
|
|
-+"network-limited" (the bw and/or inflight available to this flow is lower than
|
|
|
|
-+previously estimated when the flow ran out of data).
|
|
|
|
-+
|
|
|
|
-+o It has a three-part model: the model explicit three tracks operating points,
|
|
|
|
-+where an operating point is a tuple: (bandwidth, inflight). The three operating
|
|
|
|
-+points are:
|
|
|
|
-+
|
|
|
|
-+ o latest: the latest measurement from the current round trip
|
|
|
|
-+ o upper bound: robust, optimistic, long-term upper bound
|
|
|
|
-+ o lower bound: robust, conservative, short-term lower bound
|
|
|
|
-+
|
|
|
|
-+These are stored in the following state variables:
|
|
|
|
-+
|
|
|
|
-+ o latest: bw_latest, inflight_latest
|
|
|
|
-+ o lo: bw_lo, inflight_lo
|
|
|
|
-+ o hi: bw_hi[2], inflight_hi
|
|
|
|
-+
|
|
|
|
-+To gain intuition about the meaning of the three operating points, it
|
|
|
|
-+may help to consider the analogs in CUBIC, which has a somewhat
|
|
|
|
-+analogous three-part model used by its probing state machine:
|
|
|
|
-+
|
|
|
|
-+ BBR param CUBIC param
|
|
|
|
-+ ----------- -------------
|
|
|
|
-+ latest ~ cwnd
|
|
|
|
-+ lo ~ ssthresh
|
|
|
|
-+ hi ~ last_max_cwnd
|
|
|
|
-+
|
|
|
|
-+The analogy is only a loose one, though, since the BBR operating
|
|
|
|
-+points are calculated differently, and are 2-dimensional (bw,inflight)
|
|
|
|
-+rather than CUBIC's one-dimensional notion of operating point
|
|
|
|
-+(inflight).
|
|
|
|
-+
|
|
|
|
-+o It uses the three-part model to adapt the magnitude of its bandwidth
|
|
|
|
-+to match the estimated space available in the buffer, rather than (as
|
|
|
|
-+in BBR v1) assuming that it was always acceptable to place 0.25*BDP in
|
|
|
|
-+the bottleneck buffer when probing (commodity datacenter switches
|
|
|
|
-+commonly do not have that much buffer for WAN flows). When BBR v3
|
|
|
|
-+estimates it hit a buffer limit during probing, its bandwidth probing
|
|
|
|
-+then starts gently in case little space is still available in the
|
|
|
|
-+buffer, and the accelerates, slowly at first and then rapidly if it
|
|
|
|
-+can grow inflight without seeing congestion signals. In such cases,
|
|
|
|
-+probing is bounded by inflight_hi + inflight_probe, where
|
|
|
|
-+inflight_probe grows as: [0, 1, 2, 4, 8, 16,...]. This allows BBR to
|
|
|
|
-+keep losses low and bounded if a bottleneck remains congested, while
|
|
|
|
-+rapidly/scalably utilizing free bandwidth when it becomes available.
|
|
|
|
-+
|
|
|
|
-+o It has a slightly revised state machine, to achieve the goals above.
|
|
|
|
-+ BBR_BW_PROBE_UP: pushes up inflight to probe for bw/vol
|
|
|
|
-+ BBR_BW_PROBE_DOWN: drain excess inflight from the queue
|
|
|
|
-+ BBR_BW_PROBE_CRUISE: use pipe, w/ headroom in queue/pipe
|
|
|
|
-+ BBR_BW_PROBE_REFILL: try refill the pipe again to 100%, leaving queue empty
|
|
|
|
-+
|
|
|
|
-+o The estimated BDP: BBR v3 continues to maintain an estimate of the
|
|
|
|
-+path's two-way propagation delay, by tracking a windowed min_rtt, and
|
|
|
|
-+coordinating (on an as-ndeeded basis) to try to expose the two-way
|
|
|
|
-+propagation delay by draining the bottleneck queue.
|
|
|
|
-+
|
|
|
|
-+BBR v3 continues to use its min_rtt and (currently-applicable) bandwidth
|
|
|
|
-+estimate to estimate the current bandwidth-delay product. The estimated BDP
|
|
|
|
-+still provides one important guideline for bounding inflight data. However,
|
|
|
|
-+because any min-filtered RTT and max-filtered bw inherently tend to both
|
|
|
|
-+overestimate, the estimated BDP is often too high; in this case loss or ECN
|
|
|
|
-+marks can ensue, in which case BBR v3 adjusts inflight_hi and inflight_lo to
|
|
|
|
-+adapt its sending rate and inflight down to match the available capacity of the
|
|
|
|
-+path.
|
|
|
|
-+
|
|
|
|
-+o Space: Note that ICSK_CA_PRIV_SIZE increased. This is because BBR v3
|
|
|
|
-+requires more space. Note that much of the space is due to support for
|
|
|
|
-+per-socket parameterization and debugging in this release for research
|
|
|
|
-+and debugging. With that state removed, the full "struct bbr" is 140
|
|
|
|
-+bytes, or 144 with padding. This is an increase of 40 bytes over the
|
|
|
|
-+existing ca_priv space.
|
|
|
|
-+
|
|
|
|
-+o Code: BBR v3 reuses many pieces from BBR v1. But it omits the following
|
|
|
|
-+ significant pieces:
|
|
|
|
-+
|
|
|
|
-+ o "packet conservation" (bbr_set_cwnd_to_recover_or_restore(),
|
|
|
|
-+ bbr_can_grow_inflight())
|
|
|
|
-+ o long-term bandwidth estimator ("policer mode")
|
|
|
|
-+
|
|
|
|
-+ The code layout tries to keep BBR v3 code near the bottom of the
|
|
|
|
-+ file, so that v1-applicable code in the top does not accidentally
|
|
|
|
-+ refer to v3 code.
|
|
|
|
-+
|
|
|
|
-+o Docs:
|
|
|
|
-+ See the following docs for more details and diagrams decsribing the BBR v3
|
|
|
|
-+ algorithm:
|
|
|
|
-+ https://datatracker.ietf.org/meeting/104/materials/slides-104-iccrg-an-update-on-bbr-00
|
|
|
|
-+ https://datatracker.ietf.org/meeting/102/materials/slides-102-iccrg-an-update-on-bbr-work-at-google-00
|
|
|
|
-+
|
|
|
|
-+o Internal notes:
|
|
|
|
-+ For this upstream rebase, Neal started from:
|
|
|
|
-+ git show fed518041ac6:net/ipv4/tcp_bbr.c > net/ipv4/tcp_bbr.c
|
|
|
|
-+ then removed dev instrumentation (dynamic get/set for parameters)
|
|
|
|
-+ and code that was only used by BBRv1
|
|
|
|
-+
|
|
|
|
-+Effort: net-tcp_bbr
|
|
|
|
-+Origin-9xx-SHA1: 2c84098e60bed6d67dde23cd7538c51dee273102
|
|
|
|
-+Change-Id: I125cf26ba2a7a686f2fa5e87f4c2afceb65f7a05
|
|
|
|
-+---
|
|
|
|
-+ include/net/inet_connection_sock.h | 4 +-
|
|
|
|
-+ include/net/tcp.h | 2 +-
|
|
|
|
-+ include/uapi/linux/inet_diag.h | 23 +
|
|
|
|
-+ net/ipv4/Kconfig | 21 +-
|
|
|
|
-+ net/ipv4/tcp_bbr.c | 2217 +++++++++++++++++++++-------
|
|
|
|
-+ 5 files changed, 1742 insertions(+), 525 deletions(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/inet_connection_sock.h
|
|
|
|
-++++ b/include/net/inet_connection_sock.h
|
|
|
|
-+@@ -135,8 +135,8 @@ struct inet_connection_sock {
|
|
|
|
-+ u32 icsk_probes_tstamp;
|
|
|
|
-+ u32 icsk_user_timeout;
|
|
|
|
-+
|
|
|
|
-+- u64 icsk_ca_priv[104 / sizeof(u64)];
|
|
|
|
-+-#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
|
|
|
|
-++#define ICSK_CA_PRIV_SIZE (144)
|
|
|
|
-++ u64 icsk_ca_priv[ICSK_CA_PRIV_SIZE / sizeof(u64)];
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -2272,7 +2272,7 @@ struct tcp_plb_state {
|
|
|
|
-+ u8 consec_cong_rounds:5, /* consecutive congested rounds */
|
|
|
|
-+ unused:3;
|
|
|
|
-+ u32 pause_until; /* jiffies32 when PLB can resume rerouting */
|
|
|
|
-+-};
|
|
|
|
-++} __attribute__ ((__packed__));
|
|
|
|
-+
|
|
|
|
-+ static inline void tcp_plb_init(const struct sock *sk,
|
|
|
|
-+ struct tcp_plb_state *plb)
|
|
|
|
-+--- a/include/uapi/linux/inet_diag.h
|
|
|
|
-++++ b/include/uapi/linux/inet_diag.h
|
|
|
|
-+@@ -229,6 +229,29 @@ struct tcp_bbr_info {
|
|
|
|
-+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
|
|
|
|
-+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
|
|
|
|
-+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
|
|
|
|
-++ __u32 bbr_bw_hi_lsb; /* lower 32 bits of bw_hi */
|
|
|
|
-++ __u32 bbr_bw_hi_msb; /* upper 32 bits of bw_hi */
|
|
|
|
-++ __u32 bbr_bw_lo_lsb; /* lower 32 bits of bw_lo */
|
|
|
|
-++ __u32 bbr_bw_lo_msb; /* upper 32 bits of bw_lo */
|
|
|
|
-++ __u8 bbr_mode; /* current bbr_mode in state machine */
|
|
|
|
-++ __u8 bbr_phase; /* current state machine phase */
|
|
|
|
-++ __u8 unused1; /* alignment padding; not used yet */
|
|
|
|
-++ __u8 bbr_version; /* BBR algorithm version */
|
|
|
|
-++ __u32 bbr_inflight_lo; /* lower short-term data volume bound */
|
|
|
|
-++ __u32 bbr_inflight_hi; /* higher long-term data volume bound */
|
|
|
|
-++ __u32 bbr_extra_acked; /* max excess packets ACKed in epoch */
|
|
|
|
-++};
|
|
|
|
-++
|
|
|
|
-++/* TCP BBR congestion control bbr_phase as reported in netlink/ss stats. */
|
|
|
|
-++enum tcp_bbr_phase {
|
|
|
|
-++ BBR_PHASE_INVALID = 0,
|
|
|
|
-++ BBR_PHASE_STARTUP = 1,
|
|
|
|
-++ BBR_PHASE_DRAIN = 2,
|
|
|
|
-++ BBR_PHASE_PROBE_RTT = 3,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_UP = 4,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_DOWN = 5,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_CRUISE = 6,
|
|
|
|
-++ BBR_PHASE_PROBE_BW_REFILL = 7,
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+ union tcp_cc_info {
|
|
|
|
-+--- a/net/ipv4/Kconfig
|
|
|
|
-++++ b/net/ipv4/Kconfig
|
|
|
|
-+@@ -668,15 +668,18 @@ config TCP_CONG_BBR
|
|
|
|
-+ default n
|
|
|
|
-+ help
|
|
|
|
-+
|
|
|
|
-+- BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
|
|
|
|
-+- maximize network utilization and minimize queues. It builds an explicit
|
|
|
|
-+- model of the bottleneck delivery rate and path round-trip propagation
|
|
|
|
-+- delay. It tolerates packet loss and delay unrelated to congestion. It
|
|
|
|
-+- can operate over LAN, WAN, cellular, wifi, or cable modem links. It can
|
|
|
|
-+- coexist with flows that use loss-based congestion control, and can
|
|
|
|
-+- operate with shallow buffers, deep buffers, bufferbloat, policers, or
|
|
|
|
-+- AQM schemes that do not provide a delay signal. It requires the fq
|
|
|
|
-+- ("Fair Queue") pacing packet scheduler.
|
|
|
|
-++ BBR (Bottleneck Bandwidth and RTT) TCP congestion control is a
|
|
|
|
-++ model-based congestion control algorithm that aims to maximize
|
|
|
|
-++ network utilization, keep queues and retransmit rates low, and to be
|
|
|
|
-++ able to coexist with Reno/CUBIC in common scenarios. It builds an
|
|
|
|
-++ explicit model of the network path. It tolerates a targeted degree
|
|
|
|
-++ of random packet loss and delay. It can operate over LAN, WAN,
|
|
|
|
-++ cellular, wifi, or cable modem links, and can use shallow-threshold
|
|
|
|
-++ ECN signals. It can coexist to some degree with flows that use
|
|
|
|
-++ loss-based congestion control, and can operate with shallow buffers,
|
|
|
|
-++ deep buffers, bufferbloat, policers, or AQM schemes that do not
|
|
|
|
-++ provide a delay signal. It requires pacing, using either TCP internal
|
|
|
|
-++ pacing or the fq ("Fair Queue") pacing packet scheduler.
|
|
|
|
-+
|
|
|
|
-+ choice
|
|
|
|
-+ prompt "Default TCP congestion control"
|
|
|
|
-+--- a/net/ipv4/tcp_bbr.c
|
|
|
|
-++++ b/net/ipv4/tcp_bbr.c
|
|
|
|
-+@@ -1,18 +1,19 @@
|
|
|
|
-+-/* Bottleneck Bandwidth and RTT (BBR) congestion control
|
|
|
|
-++/* BBR (Bottleneck Bandwidth and RTT) congestion control
|
|
|
|
-+ *
|
|
|
|
-+- * BBR congestion control computes the sending rate based on the delivery
|
|
|
|
-+- * rate (throughput) estimated from ACKs. In a nutshell:
|
|
|
|
-++ * BBR is a model-based congestion control algorithm that aims for low queues,
|
|
|
|
-++ * low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model of the
|
|
|
|
-++ * network path, it uses measurements of bandwidth and RTT, as well as (if they
|
|
|
|
-++ * occur) packet loss and/or shallow-threshold ECN signals. Note that although
|
|
|
|
-++ * it can use ECN or loss signals explicitly, it does not require either; it
|
|
|
|
-++ * can bound its in-flight data based on its estimate of the BDP.
|
|
|
|
-+ *
|
|
|
|
-+- * On each ACK, update our model of the network path:
|
|
|
|
-+- * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
|
|
|
|
-+- * min_rtt = windowed_min(rtt, 10 seconds)
|
|
|
|
-+- * pacing_rate = pacing_gain * bottleneck_bandwidth
|
|
|
|
-+- * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
|
|
|
|
-+- *
|
|
|
|
-+- * The core algorithm does not react directly to packet losses or delays,
|
|
|
|
-+- * although BBR may adjust the size of next send per ACK when loss is
|
|
|
|
-+- * observed, or adjust the sending rate if it estimates there is a
|
|
|
|
-+- * traffic policer, in order to keep the drop rate reasonable.
|
|
|
|
-++ * The model has both higher and lower bounds for the operating range:
|
|
|
|
-++ * lo: bw_lo, inflight_lo: conservative short-term lower bound
|
|
|
|
-++ * hi: bw_hi, inflight_hi: robust long-term upper bound
|
|
|
|
-++ * The bandwidth-probing time scale is (a) extended dynamically based on
|
|
|
|
-++ * estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
|
|
|
|
-++ * an interactive wall-clock time-scale to be more scalable and responsive
|
|
|
|
-++ * than Reno and CUBIC.
|
|
|
|
-+ *
|
|
|
|
-+ * Here is a state transition diagram for BBR:
|
|
|
|
-+ *
|
|
|
|
-+@@ -65,6 +66,13 @@
|
|
|
|
-+ #include <linux/random.h>
|
|
|
|
-+ #include <linux/win_minmax.h>
|
|
|
|
-+
|
|
|
|
-++#include <trace/events/tcp.h>
|
|
|
|
-++#include "tcp_dctcp.h"
|
|
|
|
-++
|
|
|
|
-++#define BBR_VERSION 3
|
|
|
|
-++
|
|
|
|
-++#define bbr_param(sk,name) (bbr_ ## name)
|
|
|
|
-++
|
|
|
|
-+ /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
|
|
|
|
-+ * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
|
|
|
|
-+ * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
|
|
|
|
-+@@ -85,36 +93,41 @@ enum bbr_mode {
|
|
|
|
-+ BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-++/* How does the incoming ACK stream relate to our bandwidth probing? */
|
|
|
|
-++enum bbr_ack_phase {
|
|
|
|
-++ BBR_ACKS_INIT, /* not probing; not getting probe feedback */
|
|
|
|
-++ BBR_ACKS_REFILLING, /* sending at est. bw to fill pipe */
|
|
|
|
-++ BBR_ACKS_PROBE_STARTING, /* inflight rising to probe bw */
|
|
|
|
-++ BBR_ACKS_PROBE_FEEDBACK, /* getting feedback from bw probing */
|
|
|
|
-++ BBR_ACKS_PROBE_STOPPING, /* stopped probing; still getting feedback */
|
|
|
|
-++};
|
|
|
|
-++
|
|
|
|
-+ /* BBR congestion control block */
|
|
|
|
-+ struct bbr {
|
|
|
|
-+ u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
|
|
|
|
-+ u32 min_rtt_stamp; /* timestamp of min_rtt_us */
|
|
|
|
-+ u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
|
|
|
|
-+- struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
|
|
|
|
-+- u32 rtt_cnt; /* count of packet-timed rounds elapsed */
|
|
|
|
-++ u32 probe_rtt_min_us; /* min RTT in probe_rtt_win_ms win */
|
|
|
|
-++ u32 probe_rtt_min_stamp; /* timestamp of probe_rtt_min_us*/
|
|
|
|
-+ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
|
|
|
|
-+ u64 cycle_mstamp; /* time of this cycle phase start */
|
|
|
|
-+- u32 mode:3, /* current bbr_mode in state machine */
|
|
|
|
-++ u32 mode:2, /* current bbr_mode in state machine */
|
|
|
|
-+ prev_ca_state:3, /* CA state on previous ACK */
|
|
|
|
-+- packet_conservation:1, /* use packet conservation? */
|
|
|
|
-+ round_start:1, /* start of packet-timed tx->ack round? */
|
|
|
|
-++ ce_state:1, /* If most recent data has CE bit set */
|
|
|
|
-++ bw_probe_up_rounds:5, /* cwnd-limited rounds in PROBE_UP */
|
|
|
|
-++ try_fast_path:1, /* can we take fast path? */
|
|
|
|
-+ idle_restart:1, /* restarting after idle? */
|
|
|
|
-+ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
|
|
|
|
-+- unused:13,
|
|
|
|
-+- lt_is_sampling:1, /* taking long-term ("LT") samples now? */
|
|
|
|
-+- lt_rtt_cnt:7, /* round trips in long-term interval */
|
|
|
|
-+- lt_use_bw:1; /* use lt_bw as our bw estimate? */
|
|
|
|
-+- u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
|
|
|
|
-+- u32 lt_last_delivered; /* LT intvl start: tp->delivered */
|
|
|
|
-+- u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
|
|
|
|
-+- u32 lt_last_lost; /* LT intvl start: tp->lost */
|
|
|
|
-++ init_cwnd:7, /* initial cwnd */
|
|
|
|
-++ unused_1:10;
|
|
|
|
-+ u32 pacing_gain:10, /* current gain for setting pacing rate */
|
|
|
|
-+ cwnd_gain:10, /* current gain for setting cwnd */
|
|
|
|
-+ full_bw_reached:1, /* reached full bw in Startup? */
|
|
|
|
-+ full_bw_cnt:2, /* number of rounds without large bw gains */
|
|
|
|
-+- cycle_idx:3, /* current index in pacing_gain cycle array */
|
|
|
|
-++ cycle_idx:2, /* current index in pacing_gain cycle array */
|
|
|
|
-+ has_seen_rtt:1, /* have we seen an RTT sample yet? */
|
|
|
|
-+- unused_b:5;
|
|
|
|
-++ unused_2:6;
|
|
|
|
-+ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
|
|
|
|
-+ u32 full_bw; /* recent bw, to estimate if pipe is full */
|
|
|
|
-+
|
|
|
|
-+@@ -124,19 +137,67 @@ struct bbr {
|
|
|
|
-+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
|
|
|
|
-+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
|
|
|
|
-+ extra_acked_win_idx:1, /* current index in extra_acked array */
|
|
|
|
-+- unused_c:6;
|
|
|
|
-++ /* BBR v3 state: */
|
|
|
|
-++ full_bw_now:1, /* recently reached full bw plateau? */
|
|
|
|
-++ startup_ecn_rounds:2, /* consecutive hi ECN STARTUP rounds */
|
|
|
|
-++ loss_in_cycle:1, /* packet loss in this cycle? */
|
|
|
|
-++ ecn_in_cycle:1, /* ECN in this cycle? */
|
|
|
|
-++ unused_3:1;
|
|
|
|
-++ u32 loss_round_delivered; /* scb->tx.delivered ending loss round */
|
|
|
|
-++ u32 undo_bw_lo; /* bw_lo before latest losses */
|
|
|
|
-++ u32 undo_inflight_lo; /* inflight_lo before latest losses */
|
|
|
|
-++ u32 undo_inflight_hi; /* inflight_hi before latest losses */
|
|
|
|
-++ u32 bw_latest; /* max delivered bw in last round trip */
|
|
|
|
-++ u32 bw_lo; /* lower bound on sending bandwidth */
|
|
|
|
-++ u32 bw_hi[2]; /* max recent measured bw sample */
|
|
|
|
-++ u32 inflight_latest; /* max delivered data in last round trip */
|
|
|
|
-++ u32 inflight_lo; /* lower bound of inflight data range */
|
|
|
|
-++ u32 inflight_hi; /* upper bound of inflight data range */
|
|
|
|
-++ u32 bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
|
|
|
|
-++ u32 bw_probe_up_acks; /* packets (S)ACKed since inflight_hi incr */
|
|
|
|
-++ u32 probe_wait_us; /* PROBE_DOWN until next clock-driven probe */
|
|
|
|
-++ u32 prior_rcv_nxt; /* tp->rcv_nxt when CE state last changed */
|
|
|
|
-++ u32 ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
|
|
|
|
-++ ecn_alpha:9, /* EWMA delivered_ce/delivered; 0..256 */
|
|
|
|
-++ bw_probe_samples:1, /* rate samples reflect bw probing? */
|
|
|
|
-++ prev_probe_too_high:1, /* did last PROBE_UP go too high? */
|
|
|
|
-++ stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
|
|
|
|
-++ rounds_since_probe:8, /* packet-timed rounds since probed bw */
|
|
|
|
-++ loss_round_start:1, /* loss_round_delivered round trip? */
|
|
|
|
-++ loss_in_round:1, /* loss marked in this round trip? */
|
|
|
|
-++ ecn_in_round:1, /* ECN marked in this round trip? */
|
|
|
|
-++ ack_phase:3, /* bbr_ack_phase: meaning of ACKs */
|
|
|
|
-++ loss_events_in_round:4,/* losses in STARTUP round */
|
|
|
|
-++ initialized:1; /* has bbr_init() been called? */
|
|
|
|
-++ u32 alpha_last_delivered; /* tp->delivered at alpha update */
|
|
|
|
-++ u32 alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
|
|
|
|
-++
|
|
|
|
-++ u8 unused_4; /* to preserve alignment */
|
|
|
|
-++ struct tcp_plb_state plb;
|
|
|
|
-+ };
|
|
|
|
-+
|
|
|
|
-+-#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
|
|
|
|
-++struct bbr_context {
|
|
|
|
-++ u32 sample_bw;
|
|
|
|
-++};
|
|
|
|
-+
|
|
|
|
-+-/* Window length of bw filter (in rounds): */
|
|
|
|
-+-static const int bbr_bw_rtts = CYCLE_LEN + 2;
|
|
|
|
-+ /* Window length of min_rtt filter (in sec): */
|
|
|
|
-+ static const u32 bbr_min_rtt_win_sec = 10;
|
|
|
|
-+ /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
|
|
|
|
-+ static const u32 bbr_probe_rtt_mode_ms = 200;
|
|
|
|
-+-/* Skip TSO below the following bandwidth (bits/sec): */
|
|
|
|
-+-static const int bbr_min_tso_rate = 1200000;
|
|
|
|
-++/* Window length of probe_rtt_min_us filter (in ms), and consequently the
|
|
|
|
-++ * typical interval between PROBE_RTT mode entries. The default is 5000ms.
|
|
|
|
-++ * Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_probe_rtt_win_ms = 5000;
|
|
|
|
-++/* Proportion of cwnd to estimated BDP in PROBE_RTT, in units of BBR_UNIT: */
|
|
|
|
-++static const u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
|
|
|
|
-++
|
|
|
|
-++/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
|
|
|
|
-++ * in bigger TSO bursts. We cut the RTT-based allowance in half
|
|
|
|
-++ * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
|
|
|
|
-++ * is below 1500 bytes after 6 * ~500 usec = 3ms.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_tso_rtt_shift = 9;
|
|
|
|
-+
|
|
|
|
-+ /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
|
|
|
|
-+ * In order to help drive the network toward lower queues and low latency while
|
|
|
|
-+@@ -146,13 +207,15 @@ static const int bbr_min_tso_rate = 1200
|
|
|
|
-+ */
|
|
|
|
-+ static const int bbr_pacing_margin_percent = 1;
|
|
|
|
-+
|
|
|
|
-+-/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
|
|
|
|
-++/* We use a startup_pacing_gain of 4*ln(2) because it's the smallest value
|
|
|
|
-+ * that will allow a smoothly increasing pacing rate that will double each RTT
|
|
|
|
-+ * and send the same number of packets per RTT that an un-paced, slow-starting
|
|
|
|
-+ * Reno or CUBIC flow would:
|
|
|
|
-+ */
|
|
|
|
-+-static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
|
|
|
|
-+-/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
|
|
|
|
-++static const int bbr_startup_pacing_gain = BBR_UNIT * 277 / 100 + 1;
|
|
|
|
-++/* The gain for deriving startup cwnd: */
|
|
|
|
-++static const int bbr_startup_cwnd_gain = BBR_UNIT * 2;
|
|
|
|
-++/* The pacing gain in BBR_DRAIN is calculated to typically drain
|
|
|
|
-+ * the queue created in BBR_STARTUP in a single round:
|
|
|
|
-+ */
|
|
|
|
-+ static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
|
|
|
|
-+@@ -160,13 +223,17 @@ static const int bbr_drain_gain = BBR_UN
|
|
|
|
-+ static const int bbr_cwnd_gain = BBR_UNIT * 2;
|
|
|
|
-+ /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
|
|
|
|
-+ static const int bbr_pacing_gain[] = {
|
|
|
|
-+- BBR_UNIT * 5 / 4, /* probe for more available bw */
|
|
|
|
-+- BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
|
|
|
|
-+- BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
|
|
|
|
-+- BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
|
|
|
|
-++ BBR_UNIT * 5 / 4, /* UP: probe for more available bw */
|
|
|
|
-++ BBR_UNIT * 91 / 100, /* DOWN: drain queue and/or yield bw */
|
|
|
|
-++ BBR_UNIT, /* CRUISE: try to use pipe w/ some headroom */
|
|
|
|
-++ BBR_UNIT, /* REFILL: refill pipe to estimated 100% */
|
|
|
|
-++};
|
|
|
|
-++enum bbr_pacing_gain_phase {
|
|
|
|
-++ BBR_BW_PROBE_UP = 0, /* push up inflight to probe for bw/vol */
|
|
|
|
-++ BBR_BW_PROBE_DOWN = 1, /* drain excess inflight from the queue */
|
|
|
|
-++ BBR_BW_PROBE_CRUISE = 2, /* use pipe, w/ headroom in queue/pipe */
|
|
|
|
-++ BBR_BW_PROBE_REFILL = 3, /* v2: refill the pipe again to 100% */
|
|
|
|
-+ };
|
|
|
|
-+-/* Randomize the starting gain cycling phase over N phases: */
|
|
|
|
-+-static const u32 bbr_cycle_rand = 7;
|
|
|
|
-+
|
|
|
|
-+ /* Try to keep at least this many packets in flight, if things go smoothly. For
|
|
|
|
-+ * smooth functioning, a sliding window protocol ACKing every other packet
|
|
|
|
-+@@ -174,24 +241,12 @@ static const u32 bbr_cycle_rand = 7;
|
|
|
|
-+ */
|
|
|
|
-+ static const u32 bbr_cwnd_min_target = 4;
|
|
|
|
-+
|
|
|
|
-+-/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
|
|
|
|
-++/* To estimate if BBR_STARTUP or BBR_BW_PROBE_UP has filled pipe... */
|
|
|
|
-+ /* If bw has increased significantly (1.25x), there may be more bw available: */
|
|
|
|
-+ static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
|
|
|
|
-+ /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
|
|
|
|
-+ static const u32 bbr_full_bw_cnt = 3;
|
|
|
|
-+
|
|
|
|
-+-/* "long-term" ("LT") bandwidth estimator parameters... */
|
|
|
|
-+-/* The minimum number of rounds in an LT bw sampling interval: */
|
|
|
|
-+-static const u32 bbr_lt_intvl_min_rtts = 4;
|
|
|
|
-+-/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
|
|
|
|
-+-static const u32 bbr_lt_loss_thresh = 50;
|
|
|
|
-+-/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
|
|
|
|
-+-static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
|
|
|
|
-+-/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
|
|
|
|
-+-static const u32 bbr_lt_bw_diff = 4000 / 8;
|
|
|
|
-+-/* If we estimate we're policed, use lt_bw for this many round trips: */
|
|
|
|
-+-static const u32 bbr_lt_bw_max_rtts = 48;
|
|
|
|
-+-
|
|
|
|
-+ /* Gain factor for adding extra_acked to target cwnd: */
|
|
|
|
-+ static const int bbr_extra_acked_gain = BBR_UNIT;
|
|
|
|
-+ /* Window length of extra_acked window. */
|
|
|
|
-+@@ -201,8 +256,121 @@ static const u32 bbr_ack_epoch_acked_res
|
|
|
|
-+ /* Time period for clamping cwnd increment due to ack aggregation */
|
|
|
|
-+ static const u32 bbr_extra_acked_max_us = 100 * 1000;
|
|
|
|
-+
|
|
|
|
-++/* Flags to control BBR ECN-related behavior... */
|
|
|
|
-++
|
|
|
|
-++/* Ensure ACKs only ACK packets with consistent ECN CE status? */
|
|
|
|
-++static const bool bbr_precise_ece_ack = true;
|
|
|
|
-++
|
|
|
|
-++/* Max RTT (in usec) at which to use sender-side ECN logic.
|
|
|
|
-++ * Disabled when 0 (ECN allowed at any RTT).
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_ecn_max_rtt_us = 5000;
|
|
|
|
-++
|
|
|
|
-++/* On losses, scale down inflight and pacing rate by beta scaled by BBR_SCALE.
|
|
|
|
-++ * No loss response when 0.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_beta = BBR_UNIT * 30 / 100;
|
|
|
|
-++
|
|
|
|
-++/* Gain factor for ECN mark ratio samples, scaled by BBR_SCALE (1/16 = 6.25%) */
|
|
|
|
-++static const u32 bbr_ecn_alpha_gain = BBR_UNIT * 1 / 16;
|
|
|
|
-++
|
|
|
|
-++/* The initial value for ecn_alpha; 1.0 allows a flow to respond quickly
|
|
|
|
-++ * to congestion if the bottleneck is congested when the flow starts up.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_ecn_alpha_init = BBR_UNIT;
|
|
|
|
-++
|
|
|
|
-++/* On ECN, cut inflight_lo to (1 - ecn_factor * ecn_alpha) scaled by BBR_SCALE.
|
|
|
|
-++ * No ECN based bounding when 0.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_ecn_factor = BBR_UNIT * 1 / 3; /* 1/3 = 33% */
|
|
|
|
-++
|
|
|
|
-++/* Estimate bw probing has gone too far if CE ratio exceeds this threshold.
|
|
|
|
-++ * Scaled by BBR_SCALE. Disabled when 0.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_ecn_thresh = BBR_UNIT * 1 / 2; /* 1/2 = 50% */
|
|
|
|
-++
|
|
|
|
-++/* If non-zero, if in a cycle with no losses but some ECN marks, after ECN
|
|
|
|
-++ * clears then make the first round's increment to inflight_hi the following
|
|
|
|
-++ * fraction of inflight_hi.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_ecn_reprobe_gain = BBR_UNIT * 1 / 2;
|
|
|
|
-++
|
|
|
|
-++/* Estimate bw probing has gone too far if loss rate exceeds this level. */
|
|
|
|
-++static const u32 bbr_loss_thresh = BBR_UNIT * 2 / 100; /* 2% loss */
|
|
|
|
-++
|
|
|
|
-++/* Slow down for a packet loss recovered by TLP? */
|
|
|
|
-++static const bool bbr_loss_probe_recovery = true;
|
|
|
|
-++
|
|
|
|
-++/* Exit STARTUP if number of loss marking events in a Recovery round is >= N,
|
|
|
|
-++ * and loss rate is higher than bbr_loss_thresh.
|
|
|
|
-++ * Disabled if 0.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_full_loss_cnt = 6;
|
|
|
|
-++
|
|
|
|
-++/* Exit STARTUP if number of round trips with ECN mark rate above ecn_thresh
|
|
|
|
-++ * meets this count.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_full_ecn_cnt = 2;
|
|
|
|
-++
|
|
|
|
-++/* Fraction of unutilized headroom to try to leave in path upon high loss. */
|
|
|
|
-++static const u32 bbr_inflight_headroom = BBR_UNIT * 15 / 100;
|
|
|
|
-++
|
|
|
|
-++/* How much do we increase cwnd_gain when probing for bandwidth in
|
|
|
|
-++ * BBR_BW_PROBE_UP? This specifies the increment in units of
|
|
|
|
-++ * BBR_UNIT/4. The default is 1, meaning 0.25.
|
|
|
|
-++ * The min value is 0 (meaning 0.0); max is 3 (meaning 0.75).
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_bw_probe_cwnd_gain = 1;
|
|
|
|
-++
|
|
|
|
-++/* Max number of packet-timed rounds to wait before probing for bandwidth. If
|
|
|
|
-++ * we want to tolerate 1% random loss per round, and not have this cut our
|
|
|
|
-++ * inflight too much, we must probe for bw periodically on roughly this scale.
|
|
|
|
-++ * If low, limits Reno/CUBIC coexistence; if high, limits loss tolerance.
|
|
|
|
-++ * We aim to be fair with Reno/CUBIC up to a BDP of at least:
|
|
|
|
-++ * BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_bw_probe_max_rounds = 63;
|
|
|
|
-++
|
|
|
|
-++/* Max amount of randomness to inject in round counting for Reno-coexistence.
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_bw_probe_rand_rounds = 2;
|
|
|
|
-++
|
|
|
|
-++/* Use BBR-native probe time scale starting at this many usec.
|
|
|
|
-++ * We aim to be fair with Reno/CUBIC up to an inter-loss time epoch of at least:
|
|
|
|
-++ * BDP*RTT = 25Mbps * .030sec /(1514bytes) * 0.030sec = 1.9 secs
|
|
|
|
-++ */
|
|
|
|
-++static const u32 bbr_bw_probe_base_us = 2 * USEC_PER_SEC; /* 2 secs */
|
|
|
|
-++
|
|
|
|
-++/* Use BBR-native probes spread over this many usec: */
|
|
|
|
-++static const u32 bbr_bw_probe_rand_us = 1 * USEC_PER_SEC; /* 1 secs */
|
|
|
|
-++
|
|
|
|
-++/* Use fast path if app-limited, no loss/ECN, and target cwnd was reached? */
|
|
|
|
-++static const bool bbr_fast_path = true;
|
|
|
|
-++
|
|
|
|
-++/* Use fast ack mode? */
|
|
|
|
-++static const bool bbr_fast_ack_mode = true;
|
|
|
|
-++
|
|
|
|
-++static u32 bbr_max_bw(const struct sock *sk);
|
|
|
|
-++static u32 bbr_bw(const struct sock *sk);
|
|
|
|
-++static void bbr_exit_probe_rtt(struct sock *sk);
|
|
|
|
-++static void bbr_reset_congestion_signals(struct sock *sk);
|
|
|
|
-++static void bbr_run_loss_probe_recovery(struct sock *sk);
|
|
|
|
-++
|
|
|
|
-+ static void bbr_check_probe_rtt_done(struct sock *sk);
|
|
|
|
-+
|
|
|
|
-++/* This connection can use ECN if both endpoints have signaled ECN support in
|
|
|
|
-++ * the handshake and the per-route settings indicated this is a
|
|
|
|
-++ * shallow-threshold ECN environment, meaning both:
|
|
|
|
-++ * (a) ECN CE marks indicate low-latency/shallow-threshold congestion, and
|
|
|
|
-++ * (b) TCP endpoints provide precise ACKs that only ACK data segments
|
|
|
|
-++ * with consistent ECN CE status
|
|
|
|
-++ */
|
|
|
|
-++static bool bbr_can_use_ecn(const struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ return (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) &&
|
|
|
|
-++ (tcp_sk(sk)->ecn_flags & TCP_ECN_LOW);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ /* Do we estimate that STARTUP filled the pipe? */
|
|
|
|
-+ static bool bbr_full_bw_reached(const struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+@@ -214,17 +382,17 @@ static bool bbr_full_bw_reached(const st
|
|
|
|
-+ /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
|
|
|
|
-+ static u32 bbr_max_bw(const struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ const struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+- return minmax_get(&bbr->bw);
|
|
|
|
-++ return max(bbr->bw_hi[0], bbr->bw_hi[1]);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
|
|
|
|
-+ static u32 bbr_bw(const struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ const struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+- return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
|
|
|
|
-++ return min(bbr_max_bw(sk), bbr->bw_lo);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Return maximum extra acked in past k-2k round trips,
|
|
|
|
-+@@ -241,15 +409,23 @@ static u16 bbr_extra_acked(const struct
|
|
|
|
-+ * The order here is chosen carefully to avoid overflow of u64. This should
|
|
|
|
-+ * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
|
|
|
|
-+ */
|
|
|
|
-+-static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
|
|
|
|
-++static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
|
|
|
|
-++ int margin)
|
|
|
|
-+ {
|
|
|
|
-+ unsigned int mss = tcp_sk(sk)->mss_cache;
|
|
|
|
-+
|
|
|
|
-+ rate *= mss;
|
|
|
|
-+ rate *= gain;
|
|
|
|
-+ rate >>= BBR_SCALE;
|
|
|
|
-+- rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
|
|
|
|
-+- return rate >> BW_SCALE;
|
|
|
|
-++ rate *= USEC_PER_SEC / 100 * (100 - margin);
|
|
|
|
-++ rate >>= BW_SCALE;
|
|
|
|
-++ rate = max(rate, 1ULL);
|
|
|
|
-++ return rate;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
|
|
|
|
-++{
|
|
|
|
-++ return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
|
|
|
|
-+@@ -257,12 +433,13 @@ static unsigned long bbr_bw_to_pacing_ra
|
|
|
|
-+ {
|
|
|
|
-+ u64 rate = bw;
|
|
|
|
-+
|
|
|
|
-+- rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
|
|
|
-++ rate = bbr_rate_bytes_per_sec(sk, rate, gain,
|
|
|
|
-++ bbr_pacing_margin_percent);
|
|
|
|
-+ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
|
|
|
-+ return rate;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
|
|
|
|
-++/* Initialize pacing rate to: startup_pacing_gain * init_cwnd / RTT. */
|
|
|
|
-+ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+@@ -278,7 +455,8 @@ static void bbr_init_pacing_rate_from_rt
|
|
|
|
-+ }
|
|
|
|
-+ bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
|
|
|
|
-+ do_div(bw, rtt_us);
|
|
|
|
-+- sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
|
|
|
-++ sk->sk_pacing_rate =
|
|
|
|
-++ bbr_bw_to_pacing_rate(sk, bw, bbr_param(sk, startup_pacing_gain));
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Pace using current bw estimate and a gain factor. */
|
|
|
|
-+@@ -294,31 +472,38 @@ static void bbr_set_pacing_rate(struct s
|
|
|
|
-+ sk->sk_pacing_rate = rate;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* override sysctl_tcp_min_tso_segs */
|
|
|
|
-+-__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Return the number of segments BBR would like in a TSO/GSO skb, given
|
|
|
|
-+- * a particular max gso size as a constraint.
|
|
|
|
-++/* Return the number of segments BBR would like in a TSO/GSO skb, given a
|
|
|
|
-++ * particular max gso size as a constraint. TODO: make this simpler and more
|
|
|
|
-++ * consistent by switching bbr to just call tcp_tso_autosize().
|
|
|
|
-+ */
|
|
|
|
-+ static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
|
|
|
|
-+ u32 gso_max_size)
|
|
|
|
-+ {
|
|
|
|
-+- u32 segs;
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 segs, r;
|
|
|
|
-+ u64 bytes;
|
|
|
|
-+
|
|
|
|
-+ /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
|
|
|
|
-+ bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
|
|
|
|
-+
|
|
|
|
-++ /* Budget a TSO/GSO burst size allowance based on min_rtt. For every
|
|
|
|
-++ * K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
|
|
|
|
-++ * The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr_param(sk, tso_rtt_shift)) {
|
|
|
|
-++ r = bbr->min_rtt_us >> bbr_param(sk, tso_rtt_shift);
|
|
|
|
-++ if (r < BITS_PER_TYPE(u32)) /* prevent undefined behavior */
|
|
|
|
-++ bytes += GSO_LEGACY_MAX_SIZE >> r;
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-+ bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
|
|
|
|
-+- segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
|
|
|
|
-++ segs = max_t(u32, bytes / mss_now,
|
|
|
|
-++ sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
|
|
|
-+ return segs;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
|
|
|
|
-+-static u32 bbr_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
|
|
-++__bpf_kfunc static u32 bbr_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
|
|
-+ {
|
|
|
|
-+ return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
|
|
|
|
-+ }
|
|
|
|
-+@@ -328,7 +513,7 @@ static u32 bbr_tso_segs_goal(struct sock
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+
|
|
|
|
-+- return bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
|
|
|
|
-++ return bbr_tso_segs_generic(sk, tp->mss_cache, GSO_LEGACY_MAX_SIZE);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
|
|
|
|
-+@@ -348,7 +533,9 @@ __bpf_kfunc static void bbr_cwnd_event(s
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+- if (event == CA_EVENT_TX_START && tp->app_limited) {
|
|
|
|
-++ if (event == CA_EVENT_TX_START) {
|
|
|
|
-++ if (!tp->app_limited)
|
|
|
|
-++ return;
|
|
|
|
-+ bbr->idle_restart = 1;
|
|
|
|
-+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
|
|
|
-+ bbr->ack_epoch_acked = 0;
|
|
|
|
-+@@ -359,6 +546,16 @@ __bpf_kfunc static void bbr_cwnd_event(s
|
|
|
|
-+ bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
|
|
|
|
-+ else if (bbr->mode == BBR_PROBE_RTT)
|
|
|
|
-+ bbr_check_probe_rtt_done(sk);
|
|
|
|
-++ } else if ((event == CA_EVENT_ECN_IS_CE ||
|
|
|
|
-++ event == CA_EVENT_ECN_NO_CE) &&
|
|
|
|
-++ bbr_can_use_ecn(sk) &&
|
|
|
|
-++ bbr_param(sk, precise_ece_ack)) {
|
|
|
|
-++ u32 state = bbr->ce_state;
|
|
|
|
-++ dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
|
|
|
|
-++ bbr->ce_state = state;
|
|
|
|
-++ } else if (event == CA_EVENT_TLP_RECOVERY &&
|
|
|
|
-++ bbr_param(sk, loss_probe_recovery)) {
|
|
|
|
-++ bbr_run_loss_probe_recovery(sk);
|
|
|
|
-+ }
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+@@ -381,10 +578,10 @@ static u32 bbr_bdp(struct sock *sk, u32
|
|
|
|
-+ * default. This should only happen when the connection is not using TCP
|
|
|
|
-+ * timestamps and has retransmitted all of the SYN/SYNACK/data packets
|
|
|
|
-+ * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
|
|
|
|
-+- * case we need to slow-start up toward something safe: TCP_INIT_CWND.
|
|
|
|
-++ * case we need to slow-start up toward something safe: initial cwnd.
|
|
|
|
-+ */
|
|
|
|
-+ if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
|
|
|
|
-+- return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
|
|
|
|
-++ return bbr->init_cwnd; /* be safe: cap at initial cwnd */
|
|
|
|
-+
|
|
|
|
-+ w = (u64)bw * bbr->min_rtt_us;
|
|
|
|
-+
|
|
|
|
-+@@ -401,23 +598,23 @@ static u32 bbr_bdp(struct sock *sk, u32
|
|
|
|
-+ * - one skb in sending host Qdisc,
|
|
|
|
-+ * - one skb in sending host TSO/GSO engine
|
|
|
|
-+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
|
|
|
|
-+- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
|
|
|
|
-+- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
|
|
|
|
-++ * Don't worry, at low rates this won't bloat cwnd because
|
|
|
|
-++ * in such cases tso_segs_goal is small. The minimum cwnd is 4 packets,
|
|
|
|
-+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
|
|
|
|
-+ * full even with ACK-every-other-packet delayed ACKs.
|
|
|
|
-+ */
|
|
|
|
-+ static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
|
|
|
|
-+ {
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 tso_segs_goal;
|
|
|
|
-+
|
|
|
|
-+- /* Allow enough full-sized skbs in flight to utilize end systems. */
|
|
|
|
-+- cwnd += 3 * bbr_tso_segs_goal(sk);
|
|
|
|
-+-
|
|
|
|
-+- /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
|
|
|
|
-+- cwnd = (cwnd + 1) & ~1U;
|
|
|
|
-++ tso_segs_goal = 3 * bbr_tso_segs_goal(sk);
|
|
|
|
-+
|
|
|
|
-++ /* Allow enough full-sized skbs in flight to utilize end systems. */
|
|
|
|
-++ cwnd = max_t(u32, cwnd, tso_segs_goal);
|
|
|
|
-++ cwnd = max_t(u32, cwnd, bbr_param(sk, cwnd_min_target));
|
|
|
|
-+ /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
|
|
|
|
-+- if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
|
|
|
|
-+ cwnd += 2;
|
|
|
|
-+
|
|
|
|
-+ return cwnd;
|
|
|
|
-+@@ -472,10 +669,10 @@ static u32 bbr_ack_aggregation_cwnd(stru
|
|
|
|
-+ {
|
|
|
|
-+ u32 max_aggr_cwnd, aggr_cwnd = 0;
|
|
|
|
-+
|
|
|
|
-+- if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
|
|
|
|
-++ if (bbr_param(sk, extra_acked_gain)) {
|
|
|
|
-+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
|
|
|
|
-+ / BW_UNIT;
|
|
|
|
-+- aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
|
|
|
|
-++ aggr_cwnd = (bbr_param(sk, extra_acked_gain) * bbr_extra_acked(sk))
|
|
|
|
-+ >> BBR_SCALE;
|
|
|
|
-+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
|
|
|
|
-+ }
|
|
|
|
-+@@ -483,66 +680,27 @@ static u32 bbr_ack_aggregation_cwnd(stru
|
|
|
|
-+ return aggr_cwnd;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* An optimization in BBR to reduce losses: On the first round of recovery, we
|
|
|
|
-+- * follow the packet conservation principle: send P packets per P packets acked.
|
|
|
|
-+- * After that, we slow-start and send at most 2*P packets per P packets acked.
|
|
|
|
-+- * After recovery finishes, or upon undo, we restore the cwnd we had when
|
|
|
|
-+- * recovery started (capped by the target cwnd based on estimated BDP).
|
|
|
|
-+- *
|
|
|
|
-+- * TODO(ycheng/ncardwell): implement a rate-based approach.
|
|
|
|
-+- */
|
|
|
|
-+-static bool bbr_set_cwnd_to_recover_or_restore(
|
|
|
|
-+- struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
|
|
|
|
-++/* Returns the cwnd for PROBE_RTT mode. */
|
|
|
|
-++static u32 bbr_probe_rtt_cwnd(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
|
|
|
|
-+- u32 cwnd = tcp_snd_cwnd(tp);
|
|
|
|
-+-
|
|
|
|
-+- /* An ACK for P pkts should release at most 2*P packets. We do this
|
|
|
|
-+- * in two steps. First, here we deduct the number of lost packets.
|
|
|
|
-+- * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
|
|
|
|
-+- */
|
|
|
|
-+- if (rs->losses > 0)
|
|
|
|
-+- cwnd = max_t(s32, cwnd - rs->losses, 1);
|
|
|
|
-+-
|
|
|
|
-+- if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
|
|
|
|
-+- /* Starting 1st round of Recovery, so do packet conservation. */
|
|
|
|
-+- bbr->packet_conservation = 1;
|
|
|
|
-+- bbr->next_rtt_delivered = tp->delivered; /* start round now */
|
|
|
|
-+- /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
|
|
|
|
-+- cwnd = tcp_packets_in_flight(tp) + acked;
|
|
|
|
-+- } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
|
|
|
|
-+- /* Exiting loss recovery; restore cwnd saved before recovery. */
|
|
|
|
-+- cwnd = max(cwnd, bbr->prior_cwnd);
|
|
|
|
-+- bbr->packet_conservation = 0;
|
|
|
|
-+- }
|
|
|
|
-+- bbr->prev_ca_state = state;
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->packet_conservation) {
|
|
|
|
-+- *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
|
|
|
|
-+- return true; /* yes, using packet conservation */
|
|
|
|
-+- }
|
|
|
|
-+- *new_cwnd = cwnd;
|
|
|
|
-+- return false;
|
|
|
|
-++ return max_t(u32, bbr_param(sk, cwnd_min_target),
|
|
|
|
-++ bbr_bdp(sk, bbr_bw(sk), bbr_param(sk, probe_rtt_cwnd_gain)));
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
|
|
|
|
-+ * has drawn us down below target), or snap down to target if we're above it.
|
|
|
|
-+ */
|
|
|
|
-+ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
|
|
|
|
-+- u32 acked, u32 bw, int gain)
|
|
|
|
-++ u32 acked, u32 bw, int gain, u32 cwnd,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
|
|
|
|
-++ u32 target_cwnd = 0;
|
|
|
|
-+
|
|
|
|
-+ if (!acked)
|
|
|
|
-+ goto done; /* no packet fully ACKed; just apply caps */
|
|
|
|
-+
|
|
|
|
-+- if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
|
|
|
|
-+- goto done;
|
|
|
|
-+-
|
|
|
|
-+ target_cwnd = bbr_bdp(sk, bw, gain);
|
|
|
|
-+
|
|
|
|
-+ /* Increment the cwnd to account for excess ACKed data that seems
|
|
|
|
-+@@ -551,74 +709,26 @@ static void bbr_set_cwnd(struct sock *sk
|
|
|
|
-+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
|
|
|
|
-+ target_cwnd = bbr_quantization_budget(sk, target_cwnd);
|
|
|
|
-+
|
|
|
|
-+- /* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
|
|
|
-+- if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
|
|
|
|
-+- cwnd = min(cwnd + acked, target_cwnd);
|
|
|
|
-+- else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
|
|
|
|
-+- cwnd = cwnd + acked;
|
|
|
|
-+- cwnd = max(cwnd, bbr_cwnd_min_target);
|
|
|
|
-++ /* Update cwnd and enable fast path if cwnd reaches target_cwnd. */
|
|
|
|
-++ bbr->try_fast_path = 0;
|
|
|
|
-++ if (bbr_full_bw_reached(sk)) { /* only cut cwnd if we filled the pipe */
|
|
|
|
-++ cwnd += acked;
|
|
|
|
-++ if (cwnd >= target_cwnd) {
|
|
|
|
-++ cwnd = target_cwnd;
|
|
|
|
-++ bbr->try_fast_path = 1;
|
|
|
|
-++ }
|
|
|
|
-++ } else if (cwnd < target_cwnd || cwnd < 2 * bbr->init_cwnd) {
|
|
|
|
-++ cwnd += acked;
|
|
|
|
-++ } else {
|
|
|
|
-++ bbr->try_fast_path = 1;
|
|
|
|
-++ }
|
|
|
|
-+
|
|
|
|
-++ cwnd = max_t(u32, cwnd, bbr_param(sk, cwnd_min_target));
|
|
|
|
-+ done:
|
|
|
|
-+- tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */
|
|
|
|
-++ tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* global cap */
|
|
|
|
-+ if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
|
|
|
|
-+- tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target));
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
|
|
|
|
-+-static bool bbr_is_next_cycle_phase(struct sock *sk,
|
|
|
|
-+- const struct rate_sample *rs)
|
|
|
|
-+-{
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- bool is_full_length =
|
|
|
|
-+- tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
|
|
|
|
-+- bbr->min_rtt_us;
|
|
|
|
-+- u32 inflight, bw;
|
|
|
|
-+-
|
|
|
|
-+- /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
|
|
|
|
-+- * use the pipe without increasing the queue.
|
|
|
|
-+- */
|
|
|
|
-+- if (bbr->pacing_gain == BBR_UNIT)
|
|
|
|
-+- return is_full_length; /* just use wall clock time */
|
|
|
|
-+-
|
|
|
|
-+- inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
|
|
|
|
-+- bw = bbr_max_bw(sk);
|
|
|
|
-+-
|
|
|
|
-+- /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
|
|
|
|
-+- * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
|
|
|
|
-+- * small (e.g. on a LAN). We do not persist if packets are lost, since
|
|
|
|
-+- * a path with small buffers may not hold that much.
|
|
|
|
-+- */
|
|
|
|
-+- if (bbr->pacing_gain > BBR_UNIT)
|
|
|
|
-+- return is_full_length &&
|
|
|
|
-+- (rs->losses || /* perhaps pacing_gain*BDP won't fit */
|
|
|
|
-+- inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
|
|
|
|
-+-
|
|
|
|
-+- /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
|
|
|
|
-+- * probing didn't find more bw. If inflight falls to match BDP then we
|
|
|
|
-+- * estimate queue is drained; persisting would underutilize the pipe.
|
|
|
|
-+- */
|
|
|
|
-+- return is_full_length ||
|
|
|
|
-+- inflight <= bbr_inflight(sk, bw, BBR_UNIT);
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-static void bbr_advance_cycle_phase(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
|
|
|
|
-+- bbr->cycle_mstamp = tp->delivered_mstamp;
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
|
|
|
|
-+-static void bbr_update_cycle_phase(struct sock *sk,
|
|
|
|
-+- const struct rate_sample *rs)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
|
|
|
|
-+- bbr_advance_cycle_phase(sk);
|
|
|
|
-++ tcp_snd_cwnd_set(tp, min_t(u32, tcp_snd_cwnd(tp),
|
|
|
|
-++ bbr_probe_rtt_cwnd(sk)));
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ static void bbr_reset_startup_mode(struct sock *sk)
|
|
|
|
-+@@ -628,191 +738,49 @@ static void bbr_reset_startup_mode(struc
|
|
|
|
-+ bbr->mode = BBR_STARTUP;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-static void bbr_reset_probe_bw_mode(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- bbr->mode = BBR_PROBE_BW;
|
|
|
|
-+- bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand);
|
|
|
|
-+- bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-static void bbr_reset_mode(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- if (!bbr_full_bw_reached(sk))
|
|
|
|
-+- bbr_reset_startup_mode(sk);
|
|
|
|
-+- else
|
|
|
|
-+- bbr_reset_probe_bw_mode(sk);
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Start a new long-term sampling interval. */
|
|
|
|
-+-static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
|
|
|
|
-+- bbr->lt_last_delivered = tp->delivered;
|
|
|
|
-+- bbr->lt_last_lost = tp->lost;
|
|
|
|
-+- bbr->lt_rtt_cnt = 0;
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Completely reset long-term bandwidth sampling. */
|
|
|
|
-+-static void bbr_reset_lt_bw_sampling(struct sock *sk)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- bbr->lt_bw = 0;
|
|
|
|
-+- bbr->lt_use_bw = 0;
|
|
|
|
-+- bbr->lt_is_sampling = false;
|
|
|
|
-+- bbr_reset_lt_bw_sampling_interval(sk);
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Long-term bw sampling interval is done. Estimate whether we're policed. */
|
|
|
|
-+-static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u32 diff;
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->lt_bw) { /* do we have bw from a previous interval? */
|
|
|
|
-+- /* Is new bw close to the lt_bw from the previous interval? */
|
|
|
|
-+- diff = abs(bw - bbr->lt_bw);
|
|
|
|
-+- if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
|
|
|
|
-+- (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
|
|
|
|
-+- bbr_lt_bw_diff)) {
|
|
|
|
-+- /* All criteria are met; estimate we're policed. */
|
|
|
|
-+- bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
|
|
|
|
-+- bbr->lt_use_bw = 1;
|
|
|
|
-+- bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
|
|
|
|
-+- bbr->lt_rtt_cnt = 0;
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+- }
|
|
|
|
-+- bbr->lt_bw = bw;
|
|
|
|
-+- bbr_reset_lt_bw_sampling_interval(sk);
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
|
|
|
|
-+- * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
|
|
|
|
-+- * explicitly models their policed rate, to reduce unnecessary losses. We
|
|
|
|
-+- * estimate that we're policed if we see 2 consecutive sampling intervals with
|
|
|
|
-+- * consistent throughput and high packet loss. If we think we're being policed,
|
|
|
|
-+- * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
|
|
|
|
-++/* See if we have reached next round trip. Upon start of the new round,
|
|
|
|
-++ * returns packets delivered since previous round start plus this ACK.
|
|
|
|
-+ */
|
|
|
|
-+-static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-++static u32 bbr_update_round_start(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u32 lost, delivered;
|
|
|
|
-+- u64 bw;
|
|
|
|
-+- u32 t;
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
|
|
|
|
-+- if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
|
|
|
|
-+- ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
|
|
|
|
-+- bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
|
|
|
|
-+- }
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+-
|
|
|
|
-+- /* Wait for the first loss before sampling, to let the policer exhaust
|
|
|
|
-+- * its tokens and estimate the steady-state rate allowed by the policer.
|
|
|
|
-+- * Starting samples earlier includes bursts that over-estimate the bw.
|
|
|
|
-+- */
|
|
|
|
-+- if (!bbr->lt_is_sampling) {
|
|
|
|
-+- if (!rs->losses)
|
|
|
|
-+- return;
|
|
|
|
-+- bbr_reset_lt_bw_sampling_interval(sk);
|
|
|
|
-+- bbr->lt_is_sampling = true;
|
|
|
|
-+- }
|
|
|
|
-+-
|
|
|
|
-+- /* To avoid underestimates, reset sampling if we run out of data. */
|
|
|
|
-+- if (rs->is_app_limited) {
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk);
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->round_start)
|
|
|
|
-+- bbr->lt_rtt_cnt++; /* count round trips in this interval */
|
|
|
|
-+- if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
|
|
|
|
-+- return; /* sampling interval needs to be longer */
|
|
|
|
-+- if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk); /* interval is too long */
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+-
|
|
|
|
-+- /* End sampling interval when a packet is lost, so we estimate the
|
|
|
|
-+- * policer tokens were exhausted. Stopping the sampling before the
|
|
|
|
-+- * tokens are exhausted under-estimates the policed rate.
|
|
|
|
-+- */
|
|
|
|
-+- if (!rs->losses)
|
|
|
|
-+- return;
|
|
|
|
-+-
|
|
|
|
-+- /* Calculate packets lost and delivered in sampling interval. */
|
|
|
|
-+- lost = tp->lost - bbr->lt_last_lost;
|
|
|
|
-+- delivered = tp->delivered - bbr->lt_last_delivered;
|
|
|
|
-+- /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
|
|
|
|
-+- if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
|
|
|
|
-+- return;
|
|
|
|
-+-
|
|
|
|
-+- /* Find average delivery rate in this sampling interval. */
|
|
|
|
-+- t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
|
|
|
|
-+- if ((s32)t < 1)
|
|
|
|
-+- return; /* interval is less than one ms, so wait */
|
|
|
|
-+- /* Check if can multiply without overflow */
|
|
|
|
-+- if (t >= ~0U / USEC_PER_MSEC) {
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+- t *= USEC_PER_MSEC;
|
|
|
|
-+- bw = (u64)delivered * BW_UNIT;
|
|
|
|
-+- do_div(bw, t);
|
|
|
|
-+- bbr_lt_bw_interval_done(sk, bw);
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* Estimate the bandwidth based on how fast packets are delivered */
|
|
|
|
-+-static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-+-{
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u64 bw;
|
|
|
|
-++ u32 round_delivered = 0;
|
|
|
|
-+
|
|
|
|
-+ bbr->round_start = 0;
|
|
|
|
-+- if (rs->delivered < 0 || rs->interval_us <= 0)
|
|
|
|
-+- return; /* Not a valid observation */
|
|
|
|
-+
|
|
|
|
-+ /* See if we've reached the next RTT */
|
|
|
|
-+- if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
|
|
|
|
-++ if (rs->interval_us > 0 &&
|
|
|
|
-++ !before(rs->prior_delivered, bbr->next_rtt_delivered)) {
|
|
|
|
-++ round_delivered = tp->delivered - bbr->next_rtt_delivered;
|
|
|
|
-+ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-+- bbr->rtt_cnt++;
|
|
|
|
-+ bbr->round_start = 1;
|
|
|
|
-+- bbr->packet_conservation = 0;
|
|
|
|
-+ }
|
|
|
|
-++ return round_delivered;
|
|
|
|
-++}
|
|
|
|
-+
|
|
|
|
-+- bbr_lt_bw_sampling(sk, rs);
|
|
|
|
-++/* Calculate the bandwidth based on how fast packets are delivered */
|
|
|
|
-++static void bbr_calculate_bw_sample(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ u64 bw = 0;
|
|
|
|
-+
|
|
|
|
-+ /* Divide delivered by the interval to find a (lower bound) bottleneck
|
|
|
|
-+ * bandwidth sample. Delivered is in packets and interval_us in uS and
|
|
|
|
-+ * ratio will be <<1 for most connections. So delivered is first scaled.
|
|
|
|
-++ * Round up to allow growth at low rates, even with integer division.
|
|
|
|
-+ */
|
|
|
|
-+- bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
|
|
|
|
-++ if (rs->interval_us > 0) {
|
|
|
|
-++ if (WARN_ONCE(rs->delivered < 0,
|
|
|
|
-++ "negative delivered: %d interval_us: %ld\n",
|
|
|
|
-++ rs->delivered, rs->interval_us))
|
|
|
|
-++ return;
|
|
|
|
-+
|
|
|
|
-+- /* If this sample is application-limited, it is likely to have a very
|
|
|
|
-+- * low delivered count that represents application behavior rather than
|
|
|
|
-+- * the available network rate. Such a sample could drag down estimated
|
|
|
|
-+- * bw, causing needless slow-down. Thus, to continue to send at the
|
|
|
|
-+- * last measured network rate, we filter out app-limited samples unless
|
|
|
|
-+- * they describe the path bw at least as well as our bw model.
|
|
|
|
-+- *
|
|
|
|
-+- * So the goal during app-limited phase is to proceed with the best
|
|
|
|
-+- * network rate no matter how long. We automatically leave this
|
|
|
|
-+- * phase when app writes faster than the network can deliver :)
|
|
|
|
-+- */
|
|
|
|
-+- if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
|
|
|
|
-+- /* Incorporate new sample into our max bw filter. */
|
|
|
|
-+- minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
|
|
|
|
-++ bw = DIV_ROUND_UP_ULL((u64)rs->delivered * BW_UNIT, rs->interval_us);
|
|
|
|
-+ }
|
|
|
|
-++
|
|
|
|
-++ ctx->sample_bw = bw;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* Estimates the windowed max degree of ack aggregation.
|
|
|
|
-+@@ -826,7 +794,7 @@ static void bbr_update_bw(struct sock *s
|
|
|
|
-+ *
|
|
|
|
-+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
|
|
|
|
-+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
|
|
|
|
-+- * trips.
|
|
|
|
-++ * trips for non-startup phase, and 1-2 round trips for startup.
|
|
|
|
-+ */
|
|
|
|
-+ static void bbr_update_ack_aggregation(struct sock *sk,
|
|
|
|
-+ const struct rate_sample *rs)
|
|
|
|
-+@@ -834,15 +802,19 @@ static void bbr_update_ack_aggregation(s
|
|
|
|
-+ u32 epoch_us, expected_acked, extra_acked;
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ u32 extra_acked_win_rtts_thresh = bbr_param(sk, extra_acked_win_rtts);
|
|
|
|
-+
|
|
|
|
-+- if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
|
|
|
|
-++ if (!bbr_param(sk, extra_acked_gain) || rs->acked_sacked <= 0 ||
|
|
|
|
-+ rs->delivered < 0 || rs->interval_us <= 0)
|
|
|
|
-+ return;
|
|
|
|
-+
|
|
|
|
-+ if (bbr->round_start) {
|
|
|
|
-+ bbr->extra_acked_win_rtts = min(0x1F,
|
|
|
|
-+ bbr->extra_acked_win_rtts + 1);
|
|
|
|
-+- if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
|
|
|
|
-++ if (!bbr_full_bw_reached(sk))
|
|
|
|
-++ extra_acked_win_rtts_thresh = 1;
|
|
|
|
-++ if (bbr->extra_acked_win_rtts >=
|
|
|
|
-++ extra_acked_win_rtts_thresh) {
|
|
|
|
-+ bbr->extra_acked_win_rtts = 0;
|
|
|
|
-+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
|
|
|
|
-+ 0 : 1;
|
|
|
|
-+@@ -876,49 +848,6 @@ static void bbr_update_ack_aggregation(s
|
|
|
|
-+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* Estimate when the pipe is full, using the change in delivery rate: BBR
|
|
|
|
-+- * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
|
|
|
|
-+- * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
|
|
|
|
-+- * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
|
|
|
|
-+- * higher rwin, 3: we get higher delivery rate samples. Or transient
|
|
|
|
-+- * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
|
|
|
|
-+- * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
|
|
|
|
-+- */
|
|
|
|
-+-static void bbr_check_full_bw_reached(struct sock *sk,
|
|
|
|
-+- const struct rate_sample *rs)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u32 bw_thresh;
|
|
|
|
-+-
|
|
|
|
-+- if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
|
|
|
|
-+- return;
|
|
|
|
-+-
|
|
|
|
-+- bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
|
|
|
|
-+- if (bbr_max_bw(sk) >= bw_thresh) {
|
|
|
|
-+- bbr->full_bw = bbr_max_bw(sk);
|
|
|
|
-+- bbr->full_bw_cnt = 0;
|
|
|
|
-+- return;
|
|
|
|
-+- }
|
|
|
|
-+- ++bbr->full_bw_cnt;
|
|
|
|
-+- bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+-/* If pipe is probably full, drain the queue and then enter steady-state. */
|
|
|
|
-+-static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-+-{
|
|
|
|
-+- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+-
|
|
|
|
-+- if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
|
|
|
|
-+- bbr->mode = BBR_DRAIN; /* drain queue we created */
|
|
|
|
-+- tcp_sk(sk)->snd_ssthresh =
|
|
|
|
-+- bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
|
|
|
|
-+- } /* fall through to check if in-flight is already small: */
|
|
|
|
-+- if (bbr->mode == BBR_DRAIN &&
|
|
|
|
-+- bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
|
|
|
|
-+- bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
|
|
|
|
-+- bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
|
|
|
|
-+-}
|
|
|
|
-+-
|
|
|
|
-+ static void bbr_check_probe_rtt_done(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+@@ -928,9 +857,9 @@ static void bbr_check_probe_rtt_done(str
|
|
|
|
-+ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
|
|
|
|
-+ return;
|
|
|
|
-+
|
|
|
|
-+- bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
|
|
|
|
-++ bbr->probe_rtt_min_stamp = tcp_jiffies32; /* schedule next PROBE_RTT */
|
|
|
|
-+ tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
|
|
|
|
-+- bbr_reset_mode(sk);
|
|
|
|
-++ bbr_exit_probe_rtt(sk);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
|
|
|
|
-+@@ -956,23 +885,35 @@ static void bbr_update_min_rtt(struct so
|
|
|
|
-+ {
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- bool filter_expired;
|
|
|
|
-++ bool probe_rtt_expired, min_rtt_expired;
|
|
|
|
-++ u32 expire;
|
|
|
|
-+
|
|
|
|
-+- /* Track min RTT seen in the min_rtt_win_sec filter window: */
|
|
|
|
-+- filter_expired = after(tcp_jiffies32,
|
|
|
|
-+- bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
|
|
|
|
-++ /* Track min RTT in probe_rtt_win_ms to time next PROBE_RTT state. */
|
|
|
|
-++ expire = bbr->probe_rtt_min_stamp +
|
|
|
|
-++ msecs_to_jiffies(bbr_param(sk, probe_rtt_win_ms));
|
|
|
|
-++ probe_rtt_expired = after(tcp_jiffies32, expire);
|
|
|
|
-+ if (rs->rtt_us >= 0 &&
|
|
|
|
-+- (rs->rtt_us < bbr->min_rtt_us ||
|
|
|
|
-+- (filter_expired && !rs->is_ack_delayed))) {
|
|
|
|
-+- bbr->min_rtt_us = rs->rtt_us;
|
|
|
|
-+- bbr->min_rtt_stamp = tcp_jiffies32;
|
|
|
|
-++ (rs->rtt_us < bbr->probe_rtt_min_us ||
|
|
|
|
-++ (probe_rtt_expired && !rs->is_ack_delayed))) {
|
|
|
|
-++ bbr->probe_rtt_min_us = rs->rtt_us;
|
|
|
|
-++ bbr->probe_rtt_min_stamp = tcp_jiffies32;
|
|
|
|
-++ }
|
|
|
|
-++ /* Track min RTT seen in the min_rtt_win_sec filter window: */
|
|
|
|
-++ expire = bbr->min_rtt_stamp + bbr_param(sk, min_rtt_win_sec) * HZ;
|
|
|
|
-++ min_rtt_expired = after(tcp_jiffies32, expire);
|
|
|
|
-++ if (bbr->probe_rtt_min_us <= bbr->min_rtt_us ||
|
|
|
|
-++ min_rtt_expired) {
|
|
|
|
-++ bbr->min_rtt_us = bbr->probe_rtt_min_us;
|
|
|
|
-++ bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+- if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
|
|
|
|
-++ if (bbr_param(sk, probe_rtt_mode_ms) > 0 && probe_rtt_expired &&
|
|
|
|
-+ !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
|
|
|
|
-+ bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
|
|
|
|
-+ bbr_save_cwnd(sk); /* note cwnd so we can restore it */
|
|
|
|
-+ bbr->probe_rtt_done_stamp = 0;
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
|
|
|
|
-++ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ if (bbr->mode == BBR_PROBE_RTT) {
|
|
|
|
-+@@ -981,9 +922,9 @@ static void bbr_update_min_rtt(struct so
|
|
|
|
-+ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
|
|
|
|
-+ /* Maintain min packets in flight for max(200 ms, 1 round). */
|
|
|
|
-+ if (!bbr->probe_rtt_done_stamp &&
|
|
|
|
-+- tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
|
|
|
|
-++ tcp_packets_in_flight(tp) <= bbr_probe_rtt_cwnd(sk)) {
|
|
|
|
-+ bbr->probe_rtt_done_stamp = tcp_jiffies32 +
|
|
|
|
-+- msecs_to_jiffies(bbr_probe_rtt_mode_ms);
|
|
|
|
-++ msecs_to_jiffies(bbr_param(sk, probe_rtt_mode_ms));
|
|
|
|
-+ bbr->probe_rtt_round_done = 0;
|
|
|
|
-+ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-+ } else if (bbr->probe_rtt_done_stamp) {
|
|
|
|
-+@@ -1004,18 +945,20 @@ static void bbr_update_gains(struct sock
|
|
|
|
-+
|
|
|
|
-+ switch (bbr->mode) {
|
|
|
|
-+ case BBR_STARTUP:
|
|
|
|
-+- bbr->pacing_gain = bbr_high_gain;
|
|
|
|
-+- bbr->cwnd_gain = bbr_high_gain;
|
|
|
|
-++ bbr->pacing_gain = bbr_param(sk, startup_pacing_gain);
|
|
|
|
-++ bbr->cwnd_gain = bbr_param(sk, startup_cwnd_gain);
|
|
|
|
-+ break;
|
|
|
|
-+ case BBR_DRAIN:
|
|
|
|
-+- bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */
|
|
|
|
-+- bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
|
|
|
|
-++ bbr->pacing_gain = bbr_param(sk, drain_gain); /* slow, to drain */
|
|
|
|
-++ bbr->cwnd_gain = bbr_param(sk, startup_cwnd_gain); /* keep cwnd */
|
|
|
|
-+ break;
|
|
|
|
-+ case BBR_PROBE_BW:
|
|
|
|
-+- bbr->pacing_gain = (bbr->lt_use_bw ?
|
|
|
|
-+- BBR_UNIT :
|
|
|
|
-+- bbr_pacing_gain[bbr->cycle_idx]);
|
|
|
|
-+- bbr->cwnd_gain = bbr_cwnd_gain;
|
|
|
|
-++ bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
|
|
|
|
-++ bbr->cwnd_gain = bbr_param(sk, cwnd_gain);
|
|
|
|
-++ if (bbr_param(sk, bw_probe_cwnd_gain) &&
|
|
|
|
-++ bbr->cycle_idx == BBR_BW_PROBE_UP)
|
|
|
|
-++ bbr->cwnd_gain +=
|
|
|
|
-++ BBR_UNIT * bbr_param(sk, bw_probe_cwnd_gain) / 4;
|
|
|
|
-+ break;
|
|
|
|
-+ case BBR_PROBE_RTT:
|
|
|
|
-+ bbr->pacing_gain = BBR_UNIT;
|
|
|
|
-+@@ -1027,27 +970,1108 @@ static void bbr_update_gains(struct sock
|
|
|
|
-+ }
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-++__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
|
|
|
|
-++ return 3;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Incorporate a new bw sample into the current window of our max filter. */
|
|
|
|
-++static void bbr_take_max_bw_sample(struct sock *sk, u32 bw)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->bw_hi[1] = max(bw, bbr->bw_hi[1]);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Keep max of last 1-2 cycles. Each PROBE_BW cycle, flip filter window. */
|
|
|
|
-++static void bbr_advance_max_bw_filter(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (!bbr->bw_hi[1])
|
|
|
|
-++ return; /* no samples in this window; remember old window */
|
|
|
|
-++ bbr->bw_hi[0] = bbr->bw_hi[1];
|
|
|
|
-++ bbr->bw_hi[1] = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Reset the estimator for reaching full bandwidth based on bw plateau. */
|
|
|
|
-++static void bbr_reset_full_bw(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->full_bw = 0;
|
|
|
|
-++ bbr->full_bw_cnt = 0;
|
|
|
|
-++ bbr->full_bw_now = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* How much do we want in flight? Our BDP, unless congestion cut cwnd. */
|
|
|
|
-++static u32 bbr_target_inflight(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ u32 bdp = bbr_inflight(sk, bbr_bw(sk), BBR_UNIT);
|
|
|
|
-++
|
|
|
|
-++ return min(bdp, tcp_sk(sk)->snd_cwnd);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static bool bbr_is_probing_bandwidth(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ return (bbr->mode == BBR_STARTUP) ||
|
|
|
|
-++ (bbr->mode == BBR_PROBE_BW &&
|
|
|
|
-++ (bbr->cycle_idx == BBR_BW_PROBE_REFILL ||
|
|
|
|
-++ bbr->cycle_idx == BBR_BW_PROBE_UP));
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Has the given amount of time elapsed since we marked the phase start? */
|
|
|
|
-++static bool bbr_has_elapsed_in_phase(const struct sock *sk, u32 interval_us)
|
|
|
|
-++{
|
|
|
|
-++ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ const struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ return tcp_stamp_us_delta(tp->tcp_mstamp,
|
|
|
|
-++ bbr->cycle_mstamp + interval_us) > 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static void bbr_handle_queue_too_high_in_startup(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 bdp; /* estimated BDP in packets, with quantization budget */
|
|
|
|
-++
|
|
|
|
-++ bbr->full_bw_reached = 1;
|
|
|
|
-++
|
|
|
|
-++ bdp = bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
|
|
|
|
-++ bbr->inflight_hi = max(bdp, bbr->inflight_latest);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Exit STARTUP upon N consecutive rounds with ECN mark rate > ecn_thresh. */
|
|
|
|
-++static void bbr_check_ecn_too_high_in_startup(struct sock *sk, u32 ce_ratio)
|
|
|
|
-+ {
|
|
|
|
-+- bbr_update_bw(sk, rs);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (bbr_full_bw_reached(sk) || !bbr->ecn_eligible ||
|
|
|
|
-++ !bbr_param(sk, full_ecn_cnt) || !bbr_param(sk, ecn_thresh))
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ if (ce_ratio >= bbr_param(sk, ecn_thresh))
|
|
|
|
-++ bbr->startup_ecn_rounds++;
|
|
|
|
-++ else
|
|
|
|
-++ bbr->startup_ecn_rounds = 0;
|
|
|
|
-++
|
|
|
|
-++ if (bbr->startup_ecn_rounds >= bbr_param(sk, full_ecn_cnt)) {
|
|
|
|
-++ bbr_handle_queue_too_high_in_startup(sk);
|
|
|
|
-++ return;
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Updates ecn_alpha and returns ce_ratio. -1 if not available. */
|
|
|
|
-++static int bbr_update_ecn_alpha(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct net *net = sock_net(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ s32 delivered, delivered_ce;
|
|
|
|
-++ u64 alpha, ce_ratio;
|
|
|
|
-++ u32 gain;
|
|
|
|
-++ bool want_ecn_alpha;
|
|
|
|
-++
|
|
|
|
-++ /* See if we should use ECN sender logic for this connection. */
|
|
|
|
-++ if (!bbr->ecn_eligible && bbr_can_use_ecn(sk) &&
|
|
|
|
-++ bbr_param(sk, ecn_factor) &&
|
|
|
|
-++ (bbr->min_rtt_us <= bbr_ecn_max_rtt_us ||
|
|
|
|
-++ !bbr_ecn_max_rtt_us))
|
|
|
|
-++ bbr->ecn_eligible = 1;
|
|
|
|
-++
|
|
|
|
-++ /* Skip updating alpha only if not ECN-eligible and PLB is disabled. */
|
|
|
|
-++ want_ecn_alpha = (bbr->ecn_eligible ||
|
|
|
|
-++ (bbr_can_use_ecn(sk) &&
|
|
|
|
-++ READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled)));
|
|
|
|
-++ if (!want_ecn_alpha)
|
|
|
|
-++ return -1;
|
|
|
|
-++
|
|
|
|
-++ delivered = tp->delivered - bbr->alpha_last_delivered;
|
|
|
|
-++ delivered_ce = tp->delivered_ce - bbr->alpha_last_delivered_ce;
|
|
|
|
-++
|
|
|
|
-++ if (delivered == 0 || /* avoid divide by zero */
|
|
|
|
-++ WARN_ON_ONCE(delivered < 0 || delivered_ce < 0)) /* backwards? */
|
|
|
|
-++ return -1;
|
|
|
|
-++
|
|
|
|
-++ BUILD_BUG_ON(BBR_SCALE != TCP_PLB_SCALE);
|
|
|
|
-++ ce_ratio = (u64)delivered_ce << BBR_SCALE;
|
|
|
|
-++ do_div(ce_ratio, delivered);
|
|
|
|
-++
|
|
|
|
-++ gain = bbr_param(sk, ecn_alpha_gain);
|
|
|
|
-++ alpha = ((BBR_UNIT - gain) * bbr->ecn_alpha) >> BBR_SCALE;
|
|
|
|
-++ alpha += (gain * ce_ratio) >> BBR_SCALE;
|
|
|
|
-++ bbr->ecn_alpha = min_t(u32, alpha, BBR_UNIT);
|
|
|
|
-++
|
|
|
|
-++ bbr->alpha_last_delivered = tp->delivered;
|
|
|
|
-++ bbr->alpha_last_delivered_ce = tp->delivered_ce;
|
|
|
|
-++
|
|
|
|
-++ bbr_check_ecn_too_high_in_startup(sk, ce_ratio);
|
|
|
|
-++ return (int)ce_ratio;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Protective Load Balancing (PLB). PLB rehashes outgoing data (to a new IPv6
|
|
|
|
-++ * flow label) if it encounters sustained congestion in the form of ECN marks.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_plb(struct sock *sk, const struct rate_sample *rs, int ce_ratio)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (bbr->round_start && ce_ratio >= 0)
|
|
|
|
-++ tcp_plb_update_state(sk, &bbr->plb, ce_ratio);
|
|
|
|
-++
|
|
|
|
-++ tcp_plb_check_rehash(sk, &bbr->plb);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Each round trip of BBR_BW_PROBE_UP, double volume of probing data. */
|
|
|
|
-++static void bbr_raise_inflight_hi_slope(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 growth_this_round, cnt;
|
|
|
|
-++
|
|
|
|
-++ /* Calculate "slope": packets S/Acked per inflight_hi increment. */
|
|
|
|
-++ growth_this_round = 1 << bbr->bw_probe_up_rounds;
|
|
|
|
-++ bbr->bw_probe_up_rounds = min(bbr->bw_probe_up_rounds + 1, 30);
|
|
|
|
-++ cnt = tcp_snd_cwnd(tp) / growth_this_round;
|
|
|
|
-++ cnt = max(cnt, 1U);
|
|
|
|
-++ bbr->bw_probe_up_cnt = cnt;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* In BBR_BW_PROBE_UP, not seeing high loss/ECN/queue, so raise inflight_hi. */
|
|
|
|
-++static void bbr_probe_inflight_hi_upward(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 delta;
|
|
|
|
-++
|
|
|
|
-++ if (!tp->is_cwnd_limited || tcp_snd_cwnd(tp) < bbr->inflight_hi)
|
|
|
|
-++ return; /* not fully using inflight_hi, so don't grow it */
|
|
|
|
-++
|
|
|
|
-++ /* For each bw_probe_up_cnt packets ACKed, increase inflight_hi by 1. */
|
|
|
|
-++ bbr->bw_probe_up_acks += rs->acked_sacked;
|
|
|
|
-++ if (bbr->bw_probe_up_acks >= bbr->bw_probe_up_cnt) {
|
|
|
|
-++ delta = bbr->bw_probe_up_acks / bbr->bw_probe_up_cnt;
|
|
|
|
-++ bbr->bw_probe_up_acks -= delta * bbr->bw_probe_up_cnt;
|
|
|
|
-++ bbr->inflight_hi += delta;
|
|
|
|
-++ bbr->try_fast_path = 0; /* Need to update cwnd */
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ if (bbr->round_start)
|
|
|
|
-++ bbr_raise_inflight_hi_slope(sk);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Does loss/ECN rate for this sample say inflight is "too high"?
|
|
|
|
-++ * This is used by both the bbr_check_loss_too_high_in_startup() function,
|
|
|
|
-++ * which can be used in either v1 or v2, and the PROBE_UP phase of v2, which
|
|
|
|
-++ * uses it to notice when loss/ECN rates suggest inflight is too high.
|
|
|
|
-++ */
|
|
|
|
-++static bool bbr_is_inflight_too_high(const struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ const struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 loss_thresh, ecn_thresh;
|
|
|
|
-++
|
|
|
|
-++ if (rs->lost > 0 && rs->tx_in_flight) {
|
|
|
|
-++ loss_thresh = (u64)rs->tx_in_flight * bbr_param(sk, loss_thresh) >>
|
|
|
|
-++ BBR_SCALE;
|
|
|
|
-++ if (rs->lost > loss_thresh) {
|
|
|
|
-++ return true;
|
|
|
|
-++ }
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ if (rs->delivered_ce > 0 && rs->delivered > 0 &&
|
|
|
|
-++ bbr->ecn_eligible && bbr_param(sk, ecn_thresh)) {
|
|
|
|
-++ ecn_thresh = (u64)rs->delivered * bbr_param(sk, ecn_thresh) >>
|
|
|
|
-++ BBR_SCALE;
|
|
|
|
-++ if (rs->delivered_ce > ecn_thresh) {
|
|
|
|
-++ return true;
|
|
|
|
-++ }
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ return false;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Calculate the tx_in_flight level that corresponded to excessive loss.
|
|
|
|
-++ * We find "lost_prefix" segs of the skb where loss rate went too high,
|
|
|
|
-++ * by solving for "lost_prefix" in the following equation:
|
|
|
|
-++ * lost / inflight >= loss_thresh
|
|
|
|
-++ * (lost_prev + lost_prefix) / (inflight_prev + lost_prefix) >= loss_thresh
|
|
|
|
-++ * Then we take that equation, convert it to fixed point, and
|
|
|
|
-++ * round up to the nearest packet.
|
|
|
|
-++ */
|
|
|
|
-++static u32 bbr_inflight_hi_from_lost_skb(const struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs,
|
|
|
|
-++ const struct sk_buff *skb)
|
|
|
|
-++{
|
|
|
|
-++ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ u32 loss_thresh = bbr_param(sk, loss_thresh);
|
|
|
|
-++ u32 pcount, divisor, inflight_hi;
|
|
|
|
-++ s32 inflight_prev, lost_prev;
|
|
|
|
-++ u64 loss_budget, lost_prefix;
|
|
|
|
-++
|
|
|
|
-++ pcount = tcp_skb_pcount(skb);
|
|
|
|
-++
|
|
|
|
-++ /* How much data was in flight before this skb? */
|
|
|
|
-++ inflight_prev = rs->tx_in_flight - pcount;
|
|
|
|
-++ if (inflight_prev < 0) {
|
|
|
|
-++ WARN_ONCE(tcp_skb_tx_in_flight_is_suspicious(
|
|
|
|
-++ pcount,
|
|
|
|
-++ TCP_SKB_CB(skb)->sacked,
|
|
|
|
-++ rs->tx_in_flight),
|
|
|
|
-++ "tx_in_flight: %u pcount: %u reneg: %u",
|
|
|
|
-++ rs->tx_in_flight, pcount, tcp_sk(sk)->is_sack_reneg);
|
|
|
|
-++ return ~0U;
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ /* How much inflight data was marked lost before this skb? */
|
|
|
|
-++ lost_prev = rs->lost - pcount;
|
|
|
|
-++ if (WARN_ONCE(lost_prev < 0,
|
|
|
|
-++ "cwnd: %u ca: %d out: %u lost: %u pif: %u "
|
|
|
|
-++ "tx_in_flight: %u tx.lost: %u tp->lost: %u rs->lost: %d "
|
|
|
|
-++ "lost_prev: %d pcount: %d seq: %u end_seq: %u reneg: %u",
|
|
|
|
-++ tcp_snd_cwnd(tp), inet_csk(sk)->icsk_ca_state,
|
|
|
|
-++ tp->packets_out, tp->lost_out, tcp_packets_in_flight(tp),
|
|
|
|
-++ rs->tx_in_flight, TCP_SKB_CB(skb)->tx.lost, tp->lost,
|
|
|
|
-++ rs->lost, lost_prev, pcount,
|
|
|
|
-++ TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
|
|
|
|
-++ tp->is_sack_reneg))
|
|
|
|
-++ return ~0U;
|
|
|
|
-++
|
|
|
|
-++ /* At what prefix of this lost skb did losss rate exceed loss_thresh? */
|
|
|
|
-++ loss_budget = (u64)inflight_prev * loss_thresh + BBR_UNIT - 1;
|
|
|
|
-++ loss_budget >>= BBR_SCALE;
|
|
|
|
-++ if (lost_prev >= loss_budget) {
|
|
|
|
-++ lost_prefix = 0; /* previous losses crossed loss_thresh */
|
|
|
|
-++ } else {
|
|
|
|
-++ lost_prefix = loss_budget - lost_prev;
|
|
|
|
-++ lost_prefix <<= BBR_SCALE;
|
|
|
|
-++ divisor = BBR_UNIT - loss_thresh;
|
|
|
|
-++ if (WARN_ON_ONCE(!divisor)) /* loss_thresh is 8 bits */
|
|
|
|
-++ return ~0U;
|
|
|
|
-++ do_div(lost_prefix, divisor);
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ inflight_hi = inflight_prev + lost_prefix;
|
|
|
|
-++ return inflight_hi;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* If loss/ECN rates during probing indicated we may have overfilled a
|
|
|
|
-++ * buffer, return an operating point that tries to leave unutilized headroom in
|
|
|
|
-++ * the path for other flows, for fairness convergence and lower RTTs and loss.
|
|
|
|
-++ */
|
|
|
|
-++static u32 bbr_inflight_with_headroom(const struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 headroom, headroom_fraction;
|
|
|
|
-++
|
|
|
|
-++ if (bbr->inflight_hi == ~0U)
|
|
|
|
-++ return ~0U;
|
|
|
|
-++
|
|
|
|
-++ headroom_fraction = bbr_param(sk, inflight_headroom);
|
|
|
|
-++ headroom = ((u64)bbr->inflight_hi * headroom_fraction) >> BBR_SCALE;
|
|
|
|
-++ headroom = max(headroom, 1U);
|
|
|
|
-++ return max_t(s32, bbr->inflight_hi - headroom,
|
|
|
|
-++ bbr_param(sk, cwnd_min_target));
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Bound cwnd to a sensible level, based on our current probing state
|
|
|
|
-++ * machine phase and model of a good inflight level (inflight_lo, inflight_hi).
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_bound_cwnd_for_inflight_model(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 cap;
|
|
|
|
-++
|
|
|
|
-++ /* tcp_rcv_synsent_state_process() currently calls tcp_ack()
|
|
|
|
-++ * and thus cong_control() without first initializing us(!).
|
|
|
|
-++ */
|
|
|
|
-++ if (!bbr->initialized)
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ cap = ~0U;
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW &&
|
|
|
|
-++ bbr->cycle_idx != BBR_BW_PROBE_CRUISE) {
|
|
|
|
-++ /* Probe to see if more packets fit in the path. */
|
|
|
|
-++ cap = bbr->inflight_hi;
|
|
|
|
-++ } else {
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_RTT ||
|
|
|
|
-++ (bbr->mode == BBR_PROBE_BW &&
|
|
|
|
-++ bbr->cycle_idx == BBR_BW_PROBE_CRUISE))
|
|
|
|
-++ cap = bbr_inflight_with_headroom(sk);
|
|
|
|
-++ }
|
|
|
|
-++ /* Adapt to any loss/ECN since our last bw probe. */
|
|
|
|
-++ cap = min(cap, bbr->inflight_lo);
|
|
|
|
-++
|
|
|
|
-++ cap = max_t(u32, cap, bbr_param(sk, cwnd_min_target));
|
|
|
|
-++ tcp_snd_cwnd_set(tp, min(cap, tcp_snd_cwnd(tp)));
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* How should we multiplicatively cut bw or inflight limits based on ECN? */
|
|
|
|
-++u32 bbr_ecn_cut(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ return BBR_UNIT -
|
|
|
|
-++ ((bbr->ecn_alpha * bbr_param(sk, ecn_factor)) >> BBR_SCALE);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Init lower bounds if have not inited yet. */
|
|
|
|
-++static void bbr_init_lower_bounds(struct sock *sk, bool init_bw)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (init_bw && bbr->bw_lo == ~0U)
|
|
|
|
-++ bbr->bw_lo = bbr_max_bw(sk);
|
|
|
|
-++ if (bbr->inflight_lo == ~0U)
|
|
|
|
-++ bbr->inflight_lo = tcp_snd_cwnd(tp);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Reduce bw and inflight to (1 - beta). */
|
|
|
|
-++static void bbr_loss_lower_bounds(struct sock *sk, u32 *bw, u32 *inflight)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr* bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 loss_cut = BBR_UNIT - bbr_param(sk, beta);
|
|
|
|
-++
|
|
|
|
-++ *bw = max_t(u32, bbr->bw_latest,
|
|
|
|
-++ (u64)bbr->bw_lo * loss_cut >> BBR_SCALE);
|
|
|
|
-++ *inflight = max_t(u32, bbr->inflight_latest,
|
|
|
|
-++ (u64)bbr->inflight_lo * loss_cut >> BBR_SCALE);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Reduce inflight to (1 - alpha*ecn_factor). */
|
|
|
|
-++static void bbr_ecn_lower_bounds(struct sock *sk, u32 *inflight)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 ecn_cut = bbr_ecn_cut(sk);
|
|
|
|
-++
|
|
|
|
-++ *inflight = (u64)bbr->inflight_lo * ecn_cut >> BBR_SCALE;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Estimate a short-term lower bound on the capacity available now, based
|
|
|
|
-++ * on measurements of the current delivery process and recent history. When we
|
|
|
|
-++ * are seeing loss/ECN at times when we are not probing bw, then conservatively
|
|
|
|
-++ * move toward flow balance by multiplicatively cutting our short-term
|
|
|
|
-++ * estimated safe rate and volume of data (bw_lo and inflight_lo). We use a
|
|
|
|
-++ * multiplicative decrease in order to converge to a lower capacity in time
|
|
|
|
-++ * logarithmic in the magnitude of the decrease.
|
|
|
|
-++ *
|
|
|
|
-++ * However, we do not cut our short-term estimates lower than the current rate
|
|
|
|
-++ * and volume of delivered data from this round trip, since from the current
|
|
|
|
-++ * delivery process we can estimate the measured capacity available now.
|
|
|
|
-++ *
|
|
|
|
-++ * Anything faster than that approach would knowingly risk high loss, which can
|
|
|
|
-++ * cause low bw for Reno/CUBIC and high loss recovery latency for
|
|
|
|
-++ * request/response flows using any congestion control.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_adapt_lower_bounds(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 ecn_inflight_lo = ~0U;
|
|
|
|
-++
|
|
|
|
-++ /* We only use lower-bound estimates when not probing bw.
|
|
|
|
-++ * When probing we need to push inflight higher to probe bw.
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr_is_probing_bandwidth(sk))
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ /* ECN response. */
|
|
|
|
-++ if (bbr->ecn_in_round && bbr_param(sk, ecn_factor)) {
|
|
|
|
-++ bbr_init_lower_bounds(sk, false);
|
|
|
|
-++ bbr_ecn_lower_bounds(sk, &ecn_inflight_lo);
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ /* Loss response. */
|
|
|
|
-++ if (bbr->loss_in_round) {
|
|
|
|
-++ bbr_init_lower_bounds(sk, true);
|
|
|
|
-++ bbr_loss_lower_bounds(sk, &bbr->bw_lo, &bbr->inflight_lo);
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ /* Adjust to the lower of the levels implied by loss/ECN. */
|
|
|
|
-++ bbr->inflight_lo = min(bbr->inflight_lo, ecn_inflight_lo);
|
|
|
|
-++ bbr->bw_lo = max(1U, bbr->bw_lo);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Reset any short-term lower-bound adaptation to congestion, so that we can
|
|
|
|
-++ * push our inflight up.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_reset_lower_bounds(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->bw_lo = ~0U;
|
|
|
|
-++ bbr->inflight_lo = ~0U;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* After bw probing (STARTUP/PROBE_UP), reset signals before entering a state
|
|
|
|
-++ * machine phase where we adapt our lower bound based on congestion signals.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_reset_congestion_signals(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->loss_in_round = 0;
|
|
|
|
-++ bbr->ecn_in_round = 0;
|
|
|
|
-++ bbr->loss_in_cycle = 0;
|
|
|
|
-++ bbr->ecn_in_cycle = 0;
|
|
|
|
-++ bbr->bw_latest = 0;
|
|
|
|
-++ bbr->inflight_latest = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static void bbr_exit_loss_recovery(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
|
|
|
|
-++ bbr->try_fast_path = 0; /* bound cwnd using latest model */
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Update rate and volume of delivered data from latest round trip. */
|
|
|
|
-++static void bbr_update_latest_delivery_signals(
|
|
|
|
-++ struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->loss_round_start = 0;
|
|
|
|
-++ if (rs->interval_us <= 0 || !rs->acked_sacked)
|
|
|
|
-++ return; /* Not a valid observation */
|
|
|
|
-++
|
|
|
|
-++ bbr->bw_latest = max_t(u32, bbr->bw_latest, ctx->sample_bw);
|
|
|
|
-++ bbr->inflight_latest = max_t(u32, bbr->inflight_latest, rs->delivered);
|
|
|
|
-++
|
|
|
|
-++ if (!before(rs->prior_delivered, bbr->loss_round_delivered)) {
|
|
|
|
-++ bbr->loss_round_delivered = tp->delivered;
|
|
|
|
-++ bbr->loss_round_start = 1; /* mark start of new round trip */
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Once per round, reset filter for latest rate and volume of delivered data. */
|
|
|
|
-++static void bbr_advance_latest_delivery_signals(
|
|
|
|
-++ struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ /* If ACK matches a TLP retransmit, persist the filter. If we detect
|
|
|
|
-++ * that a TLP retransmit plugged a tail loss, we'll want to remember
|
|
|
|
-++ * how much data the path delivered before the tail loss.
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr->loss_round_start && !rs->is_acking_tlp_retrans_seq) {
|
|
|
|
-++ bbr->bw_latest = ctx->sample_bw;
|
|
|
|
-++ bbr->inflight_latest = rs->delivered;
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Update (most of) our congestion signals: track the recent rate and volume of
|
|
|
|
-++ * delivered data, presence of loss, and EWMA degree of ECN marking.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_update_congestion_signals(
|
|
|
|
-++ struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u64 bw;
|
|
|
|
-++
|
|
|
|
-++ if (rs->interval_us <= 0 || !rs->acked_sacked)
|
|
|
|
-++ return; /* Not a valid observation */
|
|
|
|
-++ bw = ctx->sample_bw;
|
|
|
|
-++
|
|
|
|
-++ if (!rs->is_app_limited || bw >= bbr_max_bw(sk))
|
|
|
|
-++ bbr_take_max_bw_sample(sk, bw);
|
|
|
|
-++
|
|
|
|
-++ bbr->loss_in_round |= (rs->losses > 0);
|
|
|
|
-++
|
|
|
|
-++ if (!bbr->loss_round_start)
|
|
|
|
-++ return; /* skip the per-round-trip updates */
|
|
|
|
-++ /* Now do per-round-trip updates. */
|
|
|
|
-++ bbr_adapt_lower_bounds(sk, rs);
|
|
|
|
-++
|
|
|
|
-++ bbr->loss_in_round = 0;
|
|
|
|
-++ bbr->ecn_in_round = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Bandwidth probing can cause loss. To help coexistence with loss-based
|
|
|
|
-++ * congestion control we spread out our probing in a Reno-conscious way. Due to
|
|
|
|
-++ * the shape of the Reno sawtooth, the time required between loss epochs for an
|
|
|
|
-++ * idealized Reno flow is a number of round trips that is the BDP of that
|
|
|
|
-++ * flow. We count packet-timed round trips directly, since measured RTT can
|
|
|
|
-++ * vary widely, and Reno is driven by packet-timed round trips.
|
|
|
|
-++ */
|
|
|
|
-++static bool bbr_is_reno_coexistence_probe_time(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 rounds;
|
|
|
|
-++
|
|
|
|
-++ /* Random loss can shave some small percentage off of our inflight
|
|
|
|
-++ * in each round. To survive this, flows need robust periodic probes.
|
|
|
|
-++ */
|
|
|
|
-++ rounds = min_t(u32, bbr_param(sk, bw_probe_max_rounds), bbr_target_inflight(sk));
|
|
|
|
-++ return bbr->rounds_since_probe >= rounds;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* How long do we want to wait before probing for bandwidth (and risking
|
|
|
|
-++ * loss)? We randomize the wait, for better mixing and fairness convergence.
|
|
|
|
-++ *
|
|
|
|
-++ * We bound the Reno-coexistence inter-bw-probe time to be 62-63 round trips.
|
|
|
|
-++ * This is calculated to allow fairness with a 25Mbps, 30ms Reno flow,
|
|
|
|
-++ * (eg 4K video to a broadband user):
|
|
|
|
-++ * BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
|
|
|
|
-++ *
|
|
|
|
-++ * We bound the BBR-native inter-bw-probe wall clock time to be:
|
|
|
|
-++ * (a) higher than 2 sec: to try to avoid causing loss for a long enough time
|
|
|
|
-++ * to allow Reno at 30ms to get 4K video bw, the inter-bw-probe time must
|
|
|
|
-++ * be at least: 25Mbps * .030sec / (1514bytes) * 0.030sec = 1.9secs
|
|
|
|
-++ * (b) lower than 3 sec: to ensure flows can start probing in a reasonable
|
|
|
|
-++ * amount of time to discover unutilized bw on human-scale interactive
|
|
|
|
-++ * time-scales (e.g. perhaps traffic from a web page download that we
|
|
|
|
-++ * were competing with is now complete).
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_pick_probe_wait(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ /* Decide the random round-trip bound for wait until probe: */
|
|
|
|
-++ bbr->rounds_since_probe =
|
|
|
|
-++ get_random_u32_below(bbr_param(sk, bw_probe_rand_rounds));
|
|
|
|
-++ /* Decide the random wall clock bound for wait until probe: */
|
|
|
|
-++ bbr->probe_wait_us = bbr_param(sk, bw_probe_base_us) +
|
|
|
|
-++ get_random_u32_below(bbr_param(sk, bw_probe_rand_us));
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static void bbr_set_cycle_idx(struct sock *sk, int cycle_idx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->cycle_idx = cycle_idx;
|
|
|
|
-++ /* New phase, so need to update cwnd and pacing rate. */
|
|
|
|
-++ bbr->try_fast_path = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Send at estimated bw to fill the pipe, but not queue. We need this phase
|
|
|
|
-++ * before PROBE_UP, because as soon as we send faster than the available bw
|
|
|
|
-++ * we will start building a queue, and if the buffer is shallow we can cause
|
|
|
|
-++ * loss. If we do not fill the pipe before we cause this loss, our bw_hi and
|
|
|
|
-++ * inflight_hi estimates will underestimate.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_start_bw_probe_refill(struct sock *sk, u32 bw_probe_up_rounds)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr_reset_lower_bounds(sk);
|
|
|
|
-++ bbr->bw_probe_up_rounds = bw_probe_up_rounds;
|
|
|
|
-++ bbr->bw_probe_up_acks = 0;
|
|
|
|
-++ bbr->stopped_risky_probe = 0;
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_REFILLING;
|
|
|
|
-++ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_REFILL);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Now probe max deliverable data rate and volume. */
|
|
|
|
-++static void bbr_start_bw_probe_up(struct sock *sk, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_PROBE_STARTING;
|
|
|
|
-++ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-++ bbr->cycle_mstamp = tp->tcp_mstamp;
|
|
|
|
-++ bbr_reset_full_bw(sk);
|
|
|
|
-++ bbr->full_bw = ctx->sample_bw;
|
|
|
|
-++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_UP);
|
|
|
|
-++ bbr_raise_inflight_hi_slope(sk);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Start a new PROBE_BW probing cycle of some wall clock length. Pick a wall
|
|
|
|
-++ * clock time at which to probe beyond an inflight that we think to be
|
|
|
|
-++ * safe. This will knowingly risk packet loss, so we want to do this rarely, to
|
|
|
|
-++ * keep packet loss rates low. Also start a round-trip counter, to probe faster
|
|
|
|
-++ * if we estimate a Reno flow at our BDP would probe faster.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_start_bw_probe_down(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr_reset_congestion_signals(sk);
|
|
|
|
-++ bbr->bw_probe_up_cnt = ~0U; /* not growing inflight_hi any more */
|
|
|
|
-++ bbr_pick_probe_wait(sk);
|
|
|
|
-++ bbr->cycle_mstamp = tp->tcp_mstamp; /* start wall clock */
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
|
|
|
|
-++ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_DOWN);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Cruise: maintain what we estimate to be a neutral, conservative
|
|
|
|
-++ * operating point, without attempting to probe up for bandwidth or down for
|
|
|
|
-++ * RTT, and only reducing inflight in response to loss/ECN signals.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_start_bw_probe_cruise(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (bbr->inflight_lo != ~0U)
|
|
|
|
-++ bbr->inflight_lo = min(bbr->inflight_lo, bbr->inflight_hi);
|
|
|
|
-++
|
|
|
|
-++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Loss and/or ECN rate is too high while probing.
|
|
|
|
-++ * Adapt (once per bw probe) by cutting inflight_hi and then restarting cycle.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_handle_inflight_too_high(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ const u32 beta = bbr_param(sk, beta);
|
|
|
|
-++
|
|
|
|
-++ bbr->prev_probe_too_high = 1;
|
|
|
|
-++ bbr->bw_probe_samples = 0; /* only react once per probe */
|
|
|
|
-++ /* If we are app-limited then we are not robustly
|
|
|
|
-++ * probing the max volume of inflight data we think
|
|
|
|
-++ * might be safe (analogous to how app-limited bw
|
|
|
|
-++ * samples are not known to be robustly probing bw).
|
|
|
|
-++ */
|
|
|
|
-++ if (!rs->is_app_limited) {
|
|
|
|
-++ bbr->inflight_hi = max_t(u32, rs->tx_in_flight,
|
|
|
|
-++ (u64)bbr_target_inflight(sk) *
|
|
|
|
-++ (BBR_UNIT - beta) >> BBR_SCALE);
|
|
|
|
-++ }
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
|
|
|
|
-++ bbr_start_bw_probe_down(sk);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* If we're seeing bw and loss samples reflecting our bw probing, adapt
|
|
|
|
-++ * using the signals we see. If loss or ECN mark rate gets too high, then adapt
|
|
|
|
-++ * inflight_hi downward. If we're able to push inflight higher without such
|
|
|
|
-++ * signals, push higher: adapt inflight_hi upward.
|
|
|
|
-++ */
|
|
|
|
-++static bool bbr_adapt_upper_bounds(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ /* Track when we'll see bw/loss samples resulting from our bw probes. */
|
|
|
|
-++ if (bbr->ack_phase == BBR_ACKS_PROBE_STARTING && bbr->round_start)
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_PROBE_FEEDBACK;
|
|
|
|
-++ if (bbr->ack_phase == BBR_ACKS_PROBE_STOPPING && bbr->round_start) {
|
|
|
|
-++ /* End of samples from bw probing phase. */
|
|
|
|
-++ bbr->bw_probe_samples = 0;
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_INIT;
|
|
|
|
-++ /* At this point in the cycle, our current bw sample is also
|
|
|
|
-++ * our best recent chance at finding the highest available bw
|
|
|
|
-++ * for this flow. So now is the best time to forget the bw
|
|
|
|
-++ * samples from the previous cycle, by advancing the window.
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW && !rs->is_app_limited)
|
|
|
|
-++ bbr_advance_max_bw_filter(sk);
|
|
|
|
-++ /* If we had an inflight_hi, then probed and pushed inflight all
|
|
|
|
-++ * the way up to hit that inflight_hi without seeing any
|
|
|
|
-++ * high loss/ECN in all the resulting ACKs from that probing,
|
|
|
|
-++ * then probe up again, this time letting inflight persist at
|
|
|
|
-++ * inflight_hi for a round trip, then accelerating beyond.
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW &&
|
|
|
|
-++ bbr->stopped_risky_probe && !bbr->prev_probe_too_high) {
|
|
|
|
-++ bbr_start_bw_probe_refill(sk, 0);
|
|
|
|
-++ return true; /* yes, decided state transition */
|
|
|
|
-++ }
|
|
|
|
-++ }
|
|
|
|
-++ if (bbr_is_inflight_too_high(sk, rs)) {
|
|
|
|
-++ if (bbr->bw_probe_samples) /* sample is from bw probing? */
|
|
|
|
-++ bbr_handle_inflight_too_high(sk, rs);
|
|
|
|
-++ } else {
|
|
|
|
-++ /* Loss/ECN rate is declared safe. Adjust upper bound upward. */
|
|
|
|
-++
|
|
|
|
-++ if (bbr->inflight_hi == ~0U)
|
|
|
|
-++ return false; /* no excess queue signals yet */
|
|
|
|
-++
|
|
|
|
-++ /* To be resilient to random loss, we must raise bw/inflight_hi
|
|
|
|
-++ * if we observe in any phase that a higher level is safe.
|
|
|
|
-++ */
|
|
|
|
-++ if (rs->tx_in_flight > bbr->inflight_hi) {
|
|
|
|
-++ bbr->inflight_hi = rs->tx_in_flight;
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ if (bbr->mode == BBR_PROBE_BW &&
|
|
|
|
-++ bbr->cycle_idx == BBR_BW_PROBE_UP)
|
|
|
|
-++ bbr_probe_inflight_hi_upward(sk, rs);
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ return false;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Check if it's time to probe for bandwidth now, and if so, kick it off. */
|
|
|
|
-++static bool bbr_check_time_to_probe_bw(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 n;
|
|
|
|
-++
|
|
|
|
-++ /* If we seem to be at an operating point where we are not seeing loss
|
|
|
|
-++ * but we are seeing ECN marks, then when the ECN marks cease we reprobe
|
|
|
|
-++ * quickly (in case cross-traffic has ceased and freed up bw).
|
|
|
|
-++ */
|
|
|
|
-++ if (bbr_param(sk, ecn_reprobe_gain) && bbr->ecn_eligible &&
|
|
|
|
-++ bbr->ecn_in_cycle && !bbr->loss_in_cycle &&
|
|
|
|
-++ inet_csk(sk)->icsk_ca_state == TCP_CA_Open) {
|
|
|
|
-++ /* Calculate n so that when bbr_raise_inflight_hi_slope()
|
|
|
|
-++ * computes growth_this_round as 2^n it will be roughly the
|
|
|
|
-++ * desired volume of data (inflight_hi*ecn_reprobe_gain).
|
|
|
|
-++ */
|
|
|
|
-++ n = ilog2((((u64)bbr->inflight_hi *
|
|
|
|
-++ bbr_param(sk, ecn_reprobe_gain)) >> BBR_SCALE));
|
|
|
|
-++ bbr_start_bw_probe_refill(sk, n);
|
|
|
|
-++ return true;
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ if (bbr_has_elapsed_in_phase(sk, bbr->probe_wait_us) ||
|
|
|
|
-++ bbr_is_reno_coexistence_probe_time(sk)) {
|
|
|
|
-++ bbr_start_bw_probe_refill(sk, 0);
|
|
|
|
-++ return true;
|
|
|
|
-++ }
|
|
|
|
-++ return false;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Is it time to transition from PROBE_DOWN to PROBE_CRUISE? */
|
|
|
|
-++static bool bbr_check_time_to_cruise(struct sock *sk, u32 inflight, u32 bw)
|
|
|
|
-++{
|
|
|
|
-++ /* Always need to pull inflight down to leave headroom in queue. */
|
|
|
|
-++ if (inflight > bbr_inflight_with_headroom(sk))
|
|
|
|
-++ return false;
|
|
|
|
-++
|
|
|
|
-++ return inflight <= bbr_inflight(sk, bw, BBR_UNIT);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* PROBE_BW state machine: cruise, refill, probe for bw, or drain? */
|
|
|
|
-++static void bbr_update_cycle_phase(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ bool is_bw_probe_done = false;
|
|
|
|
-++ u32 inflight, bw;
|
|
|
|
-++
|
|
|
|
-++ if (!bbr_full_bw_reached(sk))
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ /* In DRAIN, PROBE_BW, or PROBE_RTT, adjust upper bounds. */
|
|
|
|
-++ if (bbr_adapt_upper_bounds(sk, rs, ctx))
|
|
|
|
-++ return; /* already decided state transition */
|
|
|
|
-++
|
|
|
|
-++ if (bbr->mode != BBR_PROBE_BW)
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
|
|
|
|
-++ bw = bbr_max_bw(sk);
|
|
|
|
-++
|
|
|
|
-++ switch (bbr->cycle_idx) {
|
|
|
|
-++ /* First we spend most of our time cruising with a pacing_gain of 1.0,
|
|
|
|
-++ * which paces at the estimated bw, to try to fully use the pipe
|
|
|
|
-++ * without building queue. If we encounter loss/ECN marks, we adapt
|
|
|
|
-++ * by slowing down.
|
|
|
|
-++ */
|
|
|
|
-++ case BBR_BW_PROBE_CRUISE:
|
|
|
|
-++ if (bbr_check_time_to_probe_bw(sk, rs))
|
|
|
|
-++ return; /* already decided state transition */
|
|
|
|
-++ break;
|
|
|
|
-++
|
|
|
|
-++ /* After cruising, when it's time to probe, we first "refill": we send
|
|
|
|
-++ * at the estimated bw to fill the pipe, before probing higher and
|
|
|
|
-++ * knowingly risking overflowing the bottleneck buffer (causing loss).
|
|
|
|
-++ */
|
|
|
|
-++ case BBR_BW_PROBE_REFILL:
|
|
|
|
-++ if (bbr->round_start) {
|
|
|
|
-++ /* After one full round trip of sending in REFILL, we
|
|
|
|
-++ * start to see bw samples reflecting our REFILL, which
|
|
|
|
-++ * may be putting too much data in flight.
|
|
|
|
-++ */
|
|
|
|
-++ bbr->bw_probe_samples = 1;
|
|
|
|
-++ bbr_start_bw_probe_up(sk, ctx);
|
|
|
|
-++ }
|
|
|
|
-++ break;
|
|
|
|
-++
|
|
|
|
-++ /* After we refill the pipe, we probe by using a pacing_gain > 1.0, to
|
|
|
|
-++ * probe for bw. If we have not seen loss/ECN, we try to raise inflight
|
|
|
|
-++ * to at least pacing_gain*BDP; note that this may take more than
|
|
|
|
-++ * min_rtt if min_rtt is small (e.g. on a LAN).
|
|
|
|
-++ *
|
|
|
|
-++ * We terminate PROBE_UP bandwidth probing upon any of the following:
|
|
|
|
-++ *
|
|
|
|
-++ * (1) We've pushed inflight up to hit the inflight_hi target set in the
|
|
|
|
-++ * most recent previous bw probe phase. Thus we want to start
|
|
|
|
-++ * draining the queue immediately because it's very likely the most
|
|
|
|
-++ * recently sent packets will fill the queue and cause drops.
|
|
|
|
-++ * (2) If inflight_hi has not limited bandwidth growth recently, and
|
|
|
|
-++ * yet delivered bandwidth has not increased much recently
|
|
|
|
-++ * (bbr->full_bw_now).
|
|
|
|
-++ * (3) Loss filter says loss rate is "too high".
|
|
|
|
-++ * (4) ECN filter says ECN mark rate is "too high".
|
|
|
|
-++ *
|
|
|
|
-++ * (1) (2) checked here, (3) (4) checked in bbr_is_inflight_too_high()
|
|
|
|
-++ */
|
|
|
|
-++ case BBR_BW_PROBE_UP:
|
|
|
|
-++ if (bbr->prev_probe_too_high &&
|
|
|
|
-++ inflight >= bbr->inflight_hi) {
|
|
|
|
-++ bbr->stopped_risky_probe = 1;
|
|
|
|
-++ is_bw_probe_done = true;
|
|
|
|
-++ } else {
|
|
|
|
-++ if (tp->is_cwnd_limited &&
|
|
|
|
-++ tcp_snd_cwnd(tp) >= bbr->inflight_hi) {
|
|
|
|
-++ /* inflight_hi is limiting bw growth */
|
|
|
|
-++ bbr_reset_full_bw(sk);
|
|
|
|
-++ bbr->full_bw = ctx->sample_bw;
|
|
|
|
-++ } else if (bbr->full_bw_now) {
|
|
|
|
-++ /* Plateau in estimated bw. Pipe looks full. */
|
|
|
|
-++ is_bw_probe_done = true;
|
|
|
|
-++ }
|
|
|
|
-++ }
|
|
|
|
-++ if (is_bw_probe_done) {
|
|
|
|
-++ bbr->prev_probe_too_high = 0; /* no loss/ECN (yet) */
|
|
|
|
-++ bbr_start_bw_probe_down(sk); /* restart w/ down */
|
|
|
|
-++ }
|
|
|
|
-++ break;
|
|
|
|
-++
|
|
|
|
-++ /* After probing in PROBE_UP, we have usually accumulated some data in
|
|
|
|
-++ * the bottleneck buffer (if bw probing didn't find more bw). We next
|
|
|
|
-++ * enter PROBE_DOWN to try to drain any excess data from the queue. To
|
|
|
|
-++ * do this, we use a pacing_gain < 1.0. We hold this pacing gain until
|
|
|
|
-++ * our inflight is less then that target cruising point, which is the
|
|
|
|
-++ * minimum of (a) the amount needed to leave headroom, and (b) the
|
|
|
|
-++ * estimated BDP. Once inflight falls to match the target, we estimate
|
|
|
|
-++ * the queue is drained; persisting would underutilize the pipe.
|
|
|
|
-++ */
|
|
|
|
-++ case BBR_BW_PROBE_DOWN:
|
|
|
|
-++ if (bbr_check_time_to_probe_bw(sk, rs))
|
|
|
|
-++ return; /* already decided state transition */
|
|
|
|
-++ if (bbr_check_time_to_cruise(sk, inflight, bw))
|
|
|
|
-++ bbr_start_bw_probe_cruise(sk);
|
|
|
|
-++ break;
|
|
|
|
-++
|
|
|
|
-++ default:
|
|
|
|
-++ WARN_ONCE(1, "BBR invalid cycle index %u\n", bbr->cycle_idx);
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Exiting PROBE_RTT, so return to bandwidth probing in STARTUP or PROBE_BW. */
|
|
|
|
-++static void bbr_exit_probe_rtt(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ bbr_reset_lower_bounds(sk);
|
|
|
|
-++ if (bbr_full_bw_reached(sk)) {
|
|
|
|
-++ bbr->mode = BBR_PROBE_BW;
|
|
|
|
-++ /* Raising inflight after PROBE_RTT may cause loss, so reset
|
|
|
|
-++ * the PROBE_BW clock and schedule the next bandwidth probe for
|
|
|
|
-++ * a friendly and randomized future point in time.
|
|
|
|
-++ */
|
|
|
|
-++ bbr_start_bw_probe_down(sk);
|
|
|
|
-++ /* Since we are exiting PROBE_RTT, we know inflight is
|
|
|
|
-++ * below our estimated BDP, so it is reasonable to cruise.
|
|
|
|
-++ */
|
|
|
|
-++ bbr_start_bw_probe_cruise(sk);
|
|
|
|
-++ } else {
|
|
|
|
-++ bbr->mode = BBR_STARTUP;
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Exit STARTUP based on loss rate > 1% and loss gaps in round >= N. Wait until
|
|
|
|
-++ * the end of the round in recovery to get a good estimate of how many packets
|
|
|
|
-++ * have been lost, and how many we need to drain with a low pacing rate.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_check_loss_too_high_in_startup(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (bbr_full_bw_reached(sk))
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ /* For STARTUP exit, check the loss rate at the end of each round trip
|
|
|
|
-++ * of Recovery episodes in STARTUP. We check the loss rate at the end
|
|
|
|
-++ * of the round trip to filter out noisy/low loss and have a better
|
|
|
|
-++ * sense of inflight (extent of loss), so we can drain more accurately.
|
|
|
|
-++ */
|
|
|
|
-++ if (rs->losses && bbr->loss_events_in_round < 0xf)
|
|
|
|
-++ bbr->loss_events_in_round++; /* update saturating counter */
|
|
|
|
-++ if (bbr_param(sk, full_loss_cnt) && bbr->loss_round_start &&
|
|
|
|
-++ inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery &&
|
|
|
|
-++ bbr->loss_events_in_round >= bbr_param(sk, full_loss_cnt) &&
|
|
|
|
-++ bbr_is_inflight_too_high(sk, rs)) {
|
|
|
|
-++ bbr_handle_queue_too_high_in_startup(sk);
|
|
|
|
-++ return;
|
|
|
|
-++ }
|
|
|
|
-++ if (bbr->loss_round_start)
|
|
|
|
-++ bbr->loss_events_in_round = 0;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Estimate when the pipe is full, using the change in delivery rate: BBR
|
|
|
|
-++ * estimates bw probing filled the pipe if the estimated bw hasn't changed by
|
|
|
|
-++ * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
|
|
|
|
-++ * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
|
|
|
|
-++ * higher rwin, 3: we get higher delivery rate samples. Or transient
|
|
|
|
-++ * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
|
|
|
|
-++ * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
|
|
|
|
-++ */
|
|
|
|
-++static void bbr_check_full_bw_reached(struct sock *sk,
|
|
|
|
-++ const struct rate_sample *rs,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 bw_thresh, full_cnt, thresh;
|
|
|
|
-++
|
|
|
|
-++ if (bbr->full_bw_now || rs->is_app_limited)
|
|
|
|
-++ return;
|
|
|
|
-++
|
|
|
|
-++ thresh = bbr_param(sk, full_bw_thresh);
|
|
|
|
-++ full_cnt = bbr_param(sk, full_bw_cnt);
|
|
|
|
-++ bw_thresh = (u64)bbr->full_bw * thresh >> BBR_SCALE;
|
|
|
|
-++ if (ctx->sample_bw >= bw_thresh) {
|
|
|
|
-++ bbr_reset_full_bw(sk);
|
|
|
|
-++ bbr->full_bw = ctx->sample_bw;
|
|
|
|
-++ return;
|
|
|
|
-++ }
|
|
|
|
-++ if (!bbr->round_start)
|
|
|
|
-++ return;
|
|
|
|
-++ ++bbr->full_bw_cnt;
|
|
|
|
-++ bbr->full_bw_now = bbr->full_bw_cnt >= full_cnt;
|
|
|
|
-++ bbr->full_bw_reached |= bbr->full_bw_now;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* If pipe is probably full, drain the queue and then enter steady-state. */
|
|
|
|
-++static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
|
|
|
|
-++ bbr->mode = BBR_DRAIN; /* drain queue we created */
|
|
|
|
-++ /* Set ssthresh to export purely for monitoring, to signal
|
|
|
|
-++ * completion of initial STARTUP by setting to a non-
|
|
|
|
-++ * TCP_INFINITE_SSTHRESH value (ssthresh is not used by BBR).
|
|
|
|
-++ */
|
|
|
|
-++ tcp_sk(sk)->snd_ssthresh =
|
|
|
|
-++ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
|
|
|
|
-++ bbr_reset_congestion_signals(sk);
|
|
|
|
-++ } /* fall through to check if in-flight is already small: */
|
|
|
|
-++ if (bbr->mode == BBR_DRAIN &&
|
|
|
|
-++ bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
|
|
|
|
-++ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) {
|
|
|
|
-++ bbr->mode = BBR_PROBE_BW;
|
|
|
|
-++ bbr_start_bw_probe_down(sk);
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static void bbr_update_model(struct sock *sk, const struct rate_sample *rs,
|
|
|
|
-++ struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ bbr_update_congestion_signals(sk, rs, ctx);
|
|
|
|
-+ bbr_update_ack_aggregation(sk, rs);
|
|
|
|
-+- bbr_update_cycle_phase(sk, rs);
|
|
|
|
-+- bbr_check_full_bw_reached(sk, rs);
|
|
|
|
-+- bbr_check_drain(sk, rs);
|
|
|
|
-++ bbr_check_loss_too_high_in_startup(sk, rs);
|
|
|
|
-++ bbr_check_full_bw_reached(sk, rs, ctx);
|
|
|
|
-++ bbr_check_drain(sk, rs, ctx);
|
|
|
|
-++ bbr_update_cycle_phase(sk, rs, ctx);
|
|
|
|
-+ bbr_update_min_rtt(sk, rs);
|
|
|
|
-+- bbr_update_gains(sk);
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-++/* Fast path for app-limited case.
|
|
|
|
-++ *
|
|
|
|
-++ * On each ack, we execute bbr state machine, which primarily consists of:
|
|
|
|
-++ * 1) update model based on new rate sample, and
|
|
|
|
-++ * 2) update control based on updated model or state change.
|
|
|
|
-++ *
|
|
|
|
-++ * There are certain workload/scenarios, e.g. app-limited case, where
|
|
|
|
-++ * either we can skip updating model or we can skip update of both model
|
|
|
|
-++ * as well as control. This provides signifcant softirq cpu savings for
|
|
|
|
-++ * processing incoming acks.
|
|
|
|
-++ *
|
|
|
|
-++ * In case of app-limited, if there is no congestion (loss/ecn) and
|
|
|
|
-++ * if observed bw sample is less than current estimated bw, then we can
|
|
|
|
-++ * skip some of the computation in bbr state processing:
|
|
|
|
-++ *
|
|
|
|
-++ * - if there is no rtt/mode/phase change: In this case, since all the
|
|
|
|
-++ * parameters of the network model are constant, we can skip model
|
|
|
|
-++ * as well control update.
|
|
|
|
-++ *
|
|
|
|
-++ * - else we can skip rest of the model update. But we still need to
|
|
|
|
-++ * update the control to account for the new rtt/mode/phase.
|
|
|
|
-++ *
|
|
|
|
-++ * Returns whether we can take fast path or not.
|
|
|
|
-++ */
|
|
|
|
-++static bool bbr_run_fast_path(struct sock *sk, bool *update_model,
|
|
|
|
-++ const struct rate_sample *rs, struct bbr_context *ctx)
|
|
|
|
-++{
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ u32 prev_min_rtt_us, prev_mode;
|
|
|
|
-++
|
|
|
|
-++ if (bbr_param(sk, fast_path) && bbr->try_fast_path &&
|
|
|
|
-++ rs->is_app_limited && ctx->sample_bw < bbr_max_bw(sk) &&
|
|
|
|
-++ !bbr->loss_in_round && !bbr->ecn_in_round ) {
|
|
|
|
-++ prev_mode = bbr->mode;
|
|
|
|
-++ prev_min_rtt_us = bbr->min_rtt_us;
|
|
|
|
-++ bbr_check_drain(sk, rs, ctx);
|
|
|
|
-++ bbr_update_cycle_phase(sk, rs, ctx);
|
|
|
|
-++ bbr_update_min_rtt(sk, rs);
|
|
|
|
-++
|
|
|
|
-++ if (bbr->mode == prev_mode &&
|
|
|
|
-++ bbr->min_rtt_us == prev_min_rtt_us &&
|
|
|
|
-++ bbr->try_fast_path) {
|
|
|
|
-++ return true;
|
|
|
|
-++ }
|
|
|
|
-++
|
|
|
|
-++ /* Skip model update, but control still needs to be updated */
|
|
|
|
-++ *update_model = false;
|
|
|
|
-++ }
|
|
|
|
-++ return false;
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++__bpf_kfunc void bbr_main(struct sock *sk, const struct rate_sample *rs)
|
|
|
|
-+ {
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u32 bw;
|
|
|
|
-++ struct bbr_context ctx = { 0 };
|
|
|
|
-++ bool update_model = true;
|
|
|
|
-++ u32 bw, round_delivered;
|
|
|
|
-++ int ce_ratio = -1;
|
|
|
|
-++
|
|
|
|
-++ round_delivered = bbr_update_round_start(sk, rs, &ctx);
|
|
|
|
-++ if (bbr->round_start) {
|
|
|
|
-++ bbr->rounds_since_probe =
|
|
|
|
-++ min_t(s32, bbr->rounds_since_probe + 1, 0xFF);
|
|
|
|
-++ ce_ratio = bbr_update_ecn_alpha(sk);
|
|
|
|
-++ }
|
|
|
|
-++ bbr_plb(sk, rs, ce_ratio);
|
|
|
|
-++
|
|
|
|
-++ bbr->ecn_in_round |= (bbr->ecn_eligible && rs->is_ece);
|
|
|
|
-++ bbr_calculate_bw_sample(sk, rs, &ctx);
|
|
|
|
-++ bbr_update_latest_delivery_signals(sk, rs, &ctx);
|
|
|
|
-+
|
|
|
|
-+- bbr_update_model(sk, rs);
|
|
|
|
-++ if (bbr_run_fast_path(sk, &update_model, rs, &ctx))
|
|
|
|
-++ goto out;
|
|
|
|
-+
|
|
|
|
-++ if (update_model)
|
|
|
|
-++ bbr_update_model(sk, rs, &ctx);
|
|
|
|
-++
|
|
|
|
-++ bbr_update_gains(sk);
|
|
|
|
-+ bw = bbr_bw(sk);
|
|
|
|
-+ bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
|
|
|
|
-+- bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
|
|
|
|
-++ bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain,
|
|
|
|
-++ tcp_snd_cwnd(tp), &ctx);
|
|
|
|
-++ bbr_bound_cwnd_for_inflight_model(sk);
|
|
|
|
-++
|
|
|
|
-++out:
|
|
|
|
-++ bbr_advance_latest_delivery_signals(sk, rs, &ctx);
|
|
|
|
-++ bbr->prev_ca_state = inet_csk(sk)->icsk_ca_state;
|
|
|
|
-++ bbr->loss_in_cycle |= rs->lost > 0;
|
|
|
|
-++ bbr->ecn_in_cycle |= rs->delivered_ce > 0;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ __bpf_kfunc static void bbr_init(struct sock *sk)
|
|
|
|
-+@@ -1055,20 +2079,21 @@ __bpf_kfunc static void bbr_init(struct
|
|
|
|
-+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+- bbr->prior_cwnd = 0;
|
|
|
|
-++ bbr->initialized = 1;
|
|
|
|
-++
|
|
|
|
-++ bbr->init_cwnd = min(0x7FU, tcp_snd_cwnd(tp));
|
|
|
|
-++ bbr->prior_cwnd = tp->prior_cwnd;
|
|
|
|
-+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
|
|
|
-+- bbr->rtt_cnt = 0;
|
|
|
|
-+ bbr->next_rtt_delivered = tp->delivered;
|
|
|
|
-+ bbr->prev_ca_state = TCP_CA_Open;
|
|
|
|
-+- bbr->packet_conservation = 0;
|
|
|
|
-+
|
|
|
|
-+ bbr->probe_rtt_done_stamp = 0;
|
|
|
|
-+ bbr->probe_rtt_round_done = 0;
|
|
|
|
-++ bbr->probe_rtt_min_us = tcp_min_rtt(tp);
|
|
|
|
-++ bbr->probe_rtt_min_stamp = tcp_jiffies32;
|
|
|
|
-+ bbr->min_rtt_us = tcp_min_rtt(tp);
|
|
|
|
-+ bbr->min_rtt_stamp = tcp_jiffies32;
|
|
|
|
-+
|
|
|
|
-+- minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
|
|
|
|
-+-
|
|
|
|
-+ bbr->has_seen_rtt = 0;
|
|
|
|
-+ bbr_init_pacing_rate_from_rtt(sk);
|
|
|
|
-+
|
|
|
|
-+@@ -1079,7 +2104,7 @@ __bpf_kfunc static void bbr_init(struct
|
|
|
|
-+ bbr->full_bw_cnt = 0;
|
|
|
|
-+ bbr->cycle_mstamp = 0;
|
|
|
|
-+ bbr->cycle_idx = 0;
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk);
|
|
|
|
-++
|
|
|
|
-+ bbr_reset_startup_mode(sk);
|
|
|
|
-+
|
|
|
|
-+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
|
|
|
-+@@ -1089,78 +2114,236 @@ __bpf_kfunc static void bbr_init(struct
|
|
|
|
-+ bbr->extra_acked[0] = 0;
|
|
|
|
-+ bbr->extra_acked[1] = 0;
|
|
|
|
-+
|
|
|
|
-++ bbr->ce_state = 0;
|
|
|
|
-++ bbr->prior_rcv_nxt = tp->rcv_nxt;
|
|
|
|
-++ bbr->try_fast_path = 0;
|
|
|
|
-++
|
|
|
|
-+ cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
|
|
|
|
-++
|
|
|
|
-++ /* Start sampling ECN mark rate after first full flight is ACKed: */
|
|
|
|
-++ bbr->loss_round_delivered = tp->delivered + 1;
|
|
|
|
-++ bbr->loss_round_start = 0;
|
|
|
|
-++ bbr->undo_bw_lo = 0;
|
|
|
|
-++ bbr->undo_inflight_lo = 0;
|
|
|
|
-++ bbr->undo_inflight_hi = 0;
|
|
|
|
-++ bbr->loss_events_in_round = 0;
|
|
|
|
-++ bbr->startup_ecn_rounds = 0;
|
|
|
|
-++ bbr_reset_congestion_signals(sk);
|
|
|
|
-++ bbr->bw_lo = ~0U;
|
|
|
|
-++ bbr->bw_hi[0] = 0;
|
|
|
|
-++ bbr->bw_hi[1] = 0;
|
|
|
|
-++ bbr->inflight_lo = ~0U;
|
|
|
|
-++ bbr->inflight_hi = ~0U;
|
|
|
|
-++ bbr_reset_full_bw(sk);
|
|
|
|
-++ bbr->bw_probe_up_cnt = ~0U;
|
|
|
|
-++ bbr->bw_probe_up_acks = 0;
|
|
|
|
-++ bbr->bw_probe_up_rounds = 0;
|
|
|
|
-++ bbr->probe_wait_us = 0;
|
|
|
|
-++ bbr->stopped_risky_probe = 0;
|
|
|
|
-++ bbr->ack_phase = BBR_ACKS_INIT;
|
|
|
|
-++ bbr->rounds_since_probe = 0;
|
|
|
|
-++ bbr->bw_probe_samples = 0;
|
|
|
|
-++ bbr->prev_probe_too_high = 0;
|
|
|
|
-++ bbr->ecn_eligible = 0;
|
|
|
|
-++ bbr->ecn_alpha = bbr_param(sk, ecn_alpha_init);
|
|
|
|
-++ bbr->alpha_last_delivered = 0;
|
|
|
|
-++ bbr->alpha_last_delivered_ce = 0;
|
|
|
|
-++ bbr->plb.pause_until = 0;
|
|
|
|
-++
|
|
|
|
-++ tp->fast_ack_mode = bbr_fast_ack_mode ? 1 : 0;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
|
|
|
|
-++/* BBR marks the current round trip as a loss round. */
|
|
|
|
-++static void bbr_note_loss(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+- /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
|
|
|
|
-+- return 3;
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-++ /* Capture "current" data over the full round trip of loss, to
|
|
|
|
-++ * have a better chance of observing the full capacity of the path.
|
|
|
|
-++ */
|
|
|
|
-++ if (!bbr->loss_in_round) /* first loss in this round trip? */
|
|
|
|
-++ bbr->loss_round_delivered = tp->delivered; /* set round trip */
|
|
|
|
-++ bbr->loss_in_round = 1;
|
|
|
|
-++ bbr->loss_in_cycle = 1;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* In theory BBR does not need to undo the cwnd since it does not
|
|
|
|
-+- * always reduce cwnd on losses (see bbr_main()). Keep it for now.
|
|
|
|
-+- */
|
|
|
|
-++/* Core TCP stack informs us that the given skb was just marked lost. */
|
|
|
|
-++__bpf_kfunc static void bbr_skb_marked_lost(struct sock *sk,
|
|
|
|
-++ const struct sk_buff *skb)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
|
|
|
-++ struct rate_sample rs = {};
|
|
|
|
-++
|
|
|
|
-++ bbr_note_loss(sk);
|
|
|
|
-++
|
|
|
|
-++ if (!bbr->bw_probe_samples)
|
|
|
|
-++ return; /* not an skb sent while probing for bandwidth */
|
|
|
|
-++ if (unlikely(!scb->tx.delivered_mstamp))
|
|
|
|
-++ return; /* skb was SACKed, reneged, marked lost; ignore it */
|
|
|
|
-++ /* We are probing for bandwidth. Construct a rate sample that
|
|
|
|
-++ * estimates what happened in the flight leading up to this lost skb,
|
|
|
|
-++ * then see if the loss rate went too high, and if so at which packet.
|
|
|
|
-++ */
|
|
|
|
-++ rs.tx_in_flight = scb->tx.in_flight;
|
|
|
|
-++ rs.lost = tp->lost - scb->tx.lost;
|
|
|
|
-++ rs.is_app_limited = scb->tx.is_app_limited;
|
|
|
|
-++ if (bbr_is_inflight_too_high(sk, &rs)) {
|
|
|
|
-++ rs.tx_in_flight = bbr_inflight_hi_from_lost_skb(sk, &rs, skb);
|
|
|
|
-++ bbr_handle_inflight_too_high(sk, &rs);
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++static void bbr_run_loss_probe_recovery(struct sock *sk)
|
|
|
|
-++{
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++ struct rate_sample rs = {0};
|
|
|
|
-++
|
|
|
|
-++ bbr_note_loss(sk);
|
|
|
|
-++
|
|
|
|
-++ if (!bbr->bw_probe_samples)
|
|
|
|
-++ return; /* not sent while probing for bandwidth */
|
|
|
|
-++ /* We are probing for bandwidth. Construct a rate sample that
|
|
|
|
-++ * estimates what happened in the flight leading up to this
|
|
|
|
-++ * loss, then see if the loss rate went too high.
|
|
|
|
-++ */
|
|
|
|
-++ rs.lost = 1; /* TLP probe repaired loss of a single segment */
|
|
|
|
-++ rs.tx_in_flight = bbr->inflight_latest + rs.lost;
|
|
|
|
-++ rs.is_app_limited = tp->tlp_orig_data_app_limited;
|
|
|
|
-++ if (bbr_is_inflight_too_high(sk, &rs))
|
|
|
|
-++ bbr_handle_inflight_too_high(sk, &rs);
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-++/* Revert short-term model if current loss recovery event was spurious. */
|
|
|
|
-+ __bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+- bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
|
|
|
|
-+- bbr->full_bw_cnt = 0;
|
|
|
|
-+- bbr_reset_lt_bw_sampling(sk);
|
|
|
|
-+- return tcp_snd_cwnd(tcp_sk(sk));
|
|
|
|
-++ bbr_reset_full_bw(sk); /* spurious slow-down; reset full bw detector */
|
|
|
|
-++ bbr->loss_in_round = 0;
|
|
|
|
-++
|
|
|
|
-++ /* Revert to cwnd and other state saved before loss episode. */
|
|
|
|
-++ bbr->bw_lo = max(bbr->bw_lo, bbr->undo_bw_lo);
|
|
|
|
-++ bbr->inflight_lo = max(bbr->inflight_lo, bbr->undo_inflight_lo);
|
|
|
|
-++ bbr->inflight_hi = max(bbr->inflight_hi, bbr->undo_inflight_hi);
|
|
|
|
-++ bbr->try_fast_path = 0; /* take slow path to set proper cwnd, pacing */
|
|
|
|
-++ return bbr->prior_cwnd;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+-/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
|
|
|
|
-++/* Entering loss recovery, so save state for when we undo recovery. */
|
|
|
|
-+ __bpf_kfunc static u32 bbr_ssthresh(struct sock *sk)
|
|
|
|
-+ {
|
|
|
|
-++ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-++
|
|
|
|
-+ bbr_save_cwnd(sk);
|
|
|
|
-++ /* For undo, save state that adapts based on loss signal. */
|
|
|
|
-++ bbr->undo_bw_lo = bbr->bw_lo;
|
|
|
|
-++ bbr->undo_inflight_lo = bbr->inflight_lo;
|
|
|
|
-++ bbr->undo_inflight_hi = bbr->inflight_hi;
|
|
|
|
-+ return tcp_sk(sk)->snd_ssthresh;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++static enum tcp_bbr_phase bbr_get_phase(struct bbr *bbr)
|
|
|
|
-++{
|
|
|
|
-++ switch (bbr->mode) {
|
|
|
|
-++ case BBR_STARTUP:
|
|
|
|
-++ return BBR_PHASE_STARTUP;
|
|
|
|
-++ case BBR_DRAIN:
|
|
|
|
-++ return BBR_PHASE_DRAIN;
|
|
|
|
-++ case BBR_PROBE_BW:
|
|
|
|
-++ break;
|
|
|
|
-++ case BBR_PROBE_RTT:
|
|
|
|
-++ return BBR_PHASE_PROBE_RTT;
|
|
|
|
-++ default:
|
|
|
|
-++ return BBR_PHASE_INVALID;
|
|
|
|
-++ }
|
|
|
|
-++ switch (bbr->cycle_idx) {
|
|
|
|
-++ case BBR_BW_PROBE_UP:
|
|
|
|
-++ return BBR_PHASE_PROBE_BW_UP;
|
|
|
|
-++ case BBR_BW_PROBE_DOWN:
|
|
|
|
-++ return BBR_PHASE_PROBE_BW_DOWN;
|
|
|
|
-++ case BBR_BW_PROBE_CRUISE:
|
|
|
|
-++ return BBR_PHASE_PROBE_BW_CRUISE;
|
|
|
|
-++ case BBR_BW_PROBE_REFILL:
|
|
|
|
-++ return BBR_PHASE_PROBE_BW_REFILL;
|
|
|
|
-++ default:
|
|
|
|
-++ return BBR_PHASE_INVALID;
|
|
|
|
-++ }
|
|
|
|
-++}
|
|
|
|
-++
|
|
|
|
-+ static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
|
|
|
|
-+- union tcp_cc_info *info)
|
|
|
|
-++ union tcp_cc_info *info)
|
|
|
|
-+ {
|
|
|
|
-+ if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
|
|
|
|
-+ ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
|
|
|
|
-+- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+- u64 bw = bbr_bw(sk);
|
|
|
|
-+-
|
|
|
|
-+- bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
|
|
|
|
-+- memset(&info->bbr, 0, sizeof(info->bbr));
|
|
|
|
-+- info->bbr.bbr_bw_lo = (u32)bw;
|
|
|
|
-+- info->bbr.bbr_bw_hi = (u32)(bw >> 32);
|
|
|
|
-+- info->bbr.bbr_min_rtt = bbr->min_rtt_us;
|
|
|
|
-+- info->bbr.bbr_pacing_gain = bbr->pacing_gain;
|
|
|
|
-+- info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
|
|
|
|
-++ u64 bw = bbr_bw_bytes_per_sec(sk, bbr_bw(sk));
|
|
|
|
-++ u64 bw_hi = bbr_bw_bytes_per_sec(sk, bbr_max_bw(sk));
|
|
|
|
-++ u64 bw_lo = bbr->bw_lo == ~0U ?
|
|
|
|
-++ ~0ULL : bbr_bw_bytes_per_sec(sk, bbr->bw_lo);
|
|
|
|
-++ struct tcp_bbr_info *bbr_info = &info->bbr;
|
|
|
|
-++
|
|
|
|
-++ memset(bbr_info, 0, sizeof(*bbr_info));
|
|
|
|
-++ bbr_info->bbr_bw_lo = (u32)bw;
|
|
|
|
-++ bbr_info->bbr_bw_hi = (u32)(bw >> 32);
|
|
|
|
-++ bbr_info->bbr_min_rtt = bbr->min_rtt_us;
|
|
|
|
-++ bbr_info->bbr_pacing_gain = bbr->pacing_gain;
|
|
|
|
-++ bbr_info->bbr_cwnd_gain = bbr->cwnd_gain;
|
|
|
|
-++ bbr_info->bbr_bw_hi_lsb = (u32)bw_hi;
|
|
|
|
-++ bbr_info->bbr_bw_hi_msb = (u32)(bw_hi >> 32);
|
|
|
|
-++ bbr_info->bbr_bw_lo_lsb = (u32)bw_lo;
|
|
|
|
-++ bbr_info->bbr_bw_lo_msb = (u32)(bw_lo >> 32);
|
|
|
|
-++ bbr_info->bbr_mode = bbr->mode;
|
|
|
|
-++ bbr_info->bbr_phase = (__u8)bbr_get_phase(bbr);
|
|
|
|
-++ bbr_info->bbr_version = (__u8)BBR_VERSION;
|
|
|
|
-++ bbr_info->bbr_inflight_lo = bbr->inflight_lo;
|
|
|
|
-++ bbr_info->bbr_inflight_hi = bbr->inflight_hi;
|
|
|
|
-++ bbr_info->bbr_extra_acked = bbr_extra_acked(sk);
|
|
|
|
-+ *attr = INET_DIAG_BBRINFO;
|
|
|
|
-+- return sizeof(info->bbr);
|
|
|
|
-++ return sizeof(*bbr_info);
|
|
|
|
-+ }
|
|
|
|
-+ return 0;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ __bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state)
|
|
|
|
-+ {
|
|
|
|
-++ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+
|
|
|
|
-+ if (new_state == TCP_CA_Loss) {
|
|
|
|
-+- struct rate_sample rs = { .losses = 1 };
|
|
|
|
-+
|
|
|
|
-+ bbr->prev_ca_state = TCP_CA_Loss;
|
|
|
|
-+- bbr->full_bw = 0;
|
|
|
|
-+- bbr->round_start = 1; /* treat RTO like end of a round */
|
|
|
|
-+- bbr_lt_bw_sampling(sk, &rs);
|
|
|
|
-++ tcp_plb_update_state_upon_rto(sk, &bbr->plb);
|
|
|
|
-++ /* The tcp_write_timeout() call to sk_rethink_txhash() likely
|
|
|
|
-++ * repathed this flow, so re-learn the min network RTT on the
|
|
|
|
-++ * new path:
|
|
|
|
-++ */
|
|
|
|
-++ bbr_reset_full_bw(sk);
|
|
|
|
-++ if (!bbr_is_probing_bandwidth(sk) && bbr->inflight_lo == ~0U) {
|
|
|
|
-++ /* bbr_adapt_lower_bounds() needs cwnd before
|
|
|
|
-++ * we suffered an RTO, to update inflight_lo:
|
|
|
|
-++ */
|
|
|
|
-++ bbr->inflight_lo =
|
|
|
|
-++ max(tcp_snd_cwnd(tp), bbr->prior_cwnd);
|
|
|
|
-++ }
|
|
|
|
-++ } else if (bbr->prev_ca_state == TCP_CA_Loss &&
|
|
|
|
-++ new_state != TCP_CA_Loss) {
|
|
|
|
-++ bbr_exit_loss_recovery(sk);
|
|
|
|
-+ }
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-++
|
|
|
|
-+ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
|
|
|
|
-+- .flags = TCP_CONG_NON_RESTRICTED,
|
|
|
|
-++ .flags = TCP_CONG_NON_RESTRICTED | TCP_CONG_WANTS_CE_EVENTS,
|
|
|
|
-+ .name = "bbr",
|
|
|
|
-+ .owner = THIS_MODULE,
|
|
|
|
-+ .init = bbr_init,
|
|
|
|
-+ .cong_control = bbr_main,
|
|
|
|
-+ .sndbuf_expand = bbr_sndbuf_expand,
|
|
|
|
-++ .skb_marked_lost = bbr_skb_marked_lost,
|
|
|
|
-+ .undo_cwnd = bbr_undo_cwnd,
|
|
|
|
-+ .cwnd_event = bbr_cwnd_event,
|
|
|
|
-+ .ssthresh = bbr_ssthresh,
|
|
|
|
-+@@ -1175,10 +2358,11 @@ BTF_SET8_START(tcp_bbr_check_kfunc_ids)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_init)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_main)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_sndbuf_expand)
|
|
|
|
-++BTF_ID_FLAGS(func, bbr_skb_marked_lost)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_undo_cwnd)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_cwnd_event)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_ssthresh)
|
|
|
|
-+-BTF_ID_FLAGS(func, bbr_min_tso_segs)
|
|
|
|
-++BTF_ID_FLAGS(func, bbr_tso_segs)
|
|
|
|
-+ BTF_ID_FLAGS(func, bbr_set_state)
|
|
|
|
-+ #endif
|
|
|
|
-+ #endif
|
|
|
|
-+@@ -1213,5 +2397,12 @@ MODULE_AUTHOR("Van Jacobson <vanj@google
|
|
|
|
-+ MODULE_AUTHOR("Neal Cardwell <[email protected]>");
|
|
|
|
-+ MODULE_AUTHOR("Yuchung Cheng <[email protected]>");
|
|
|
|
-+ MODULE_AUTHOR("Soheil Hassas Yeganeh <[email protected]>");
|
|
|
|
-++MODULE_AUTHOR("Priyaranjan Jha <[email protected]>");
|
|
|
|
-++MODULE_AUTHOR("Yousuk Seung <[email protected]>");
|
|
|
|
-++MODULE_AUTHOR("Kevin Yang <[email protected]>");
|
|
|
|
-++MODULE_AUTHOR("Arjun Roy <[email protected]>");
|
|
|
|
-++MODULE_AUTHOR("David Morley <[email protected]>");
|
|
|
|
-++
|
|
|
|
-+ MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
-+ MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
|
|
|
|
-++MODULE_VERSION(__stringify(BBR_VERSION));
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-17-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-ensure-ECN-enabled-BBR-flows-set-ECT-.patch b/target/linux/generic/hack-6.6/601-17-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-ensure-ECN-enabled-BBR-flows-set-ECT-.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..a19a77079639cd
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-17-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-ensure-ECN-enabled-BBR-flows-set-ECT-.patch
|
|
|
|
-@@ -0,0 +1,58 @@
|
|
|
|
-+From f17f10618636cc35a0b8c3ccee3023dc4c62b956 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Adithya Abraham Philip <[email protected]>
|
|
|
|
-+Date: Fri, 11 Jun 2021 21:56:10 +0000
|
|
|
|
-+Subject: [PATCH 17/19] net-tcp_bbr: v3: ensure ECN-enabled BBR flows set ECT
|
|
|
|
-+ on retransmits
|
|
|
|
-+
|
|
|
|
-+Adds a new flag TCP_ECN_ECT_PERMANENT that is used by CCAs to
|
|
|
|
-+indicate that retransmitted packets and pure ACKs must have the
|
|
|
|
-+ECT bit set. This is necessary for BBR, which when using
|
|
|
|
-+ECN expects ECT to be set even on retransmitted packets and ACKs.
|
|
|
|
-+
|
|
|
|
-+Previous to this addition of TCP_ECN_ECT_PERMANENT, CCAs which can use
|
|
|
|
-+ECN but don't "need" it did not have a way to indicate that ECT should
|
|
|
|
-+be set on retransmissions/ACKs.
|
|
|
|
-+
|
|
|
|
-+Signed-off-by: Adithya Abraham Philip <[email protected]>
|
|
|
|
-+Signed-off-by: Neal Cardwell <[email protected]>
|
|
|
|
-+Change-Id: I8b048eaab35e136fe6501ef6cd89fd9faa15e6d2
|
|
|
|
-+---
|
|
|
|
-+ include/net/tcp.h | 1 +
|
|
|
|
-+ net/ipv4/tcp_bbr.c | 3 +++
|
|
|
|
-+ net/ipv4/tcp_output.c | 3 ++-
|
|
|
|
-+ 3 files changed, 6 insertions(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+--- a/include/net/tcp.h
|
|
|
|
-++++ b/include/net/tcp.h
|
|
|
|
-+@@ -373,6 +373,7 @@ static inline void tcp_dec_quickack_mode
|
|
|
|
-+ #define TCP_ECN_DEMAND_CWR 4
|
|
|
|
-+ #define TCP_ECN_SEEN 8
|
|
|
|
-+ #define TCP_ECN_LOW 16
|
|
|
|
-++#define TCP_ECN_ECT_PERMANENT 32
|
|
|
|
-+
|
|
|
|
-+ enum tcp_tw_status {
|
|
|
|
-+ TCP_TW_SUCCESS = 0,
|
|
|
|
-+--- a/net/ipv4/tcp_bbr.c
|
|
|
|
-++++ b/net/ipv4/tcp_bbr.c
|
|
|
|
-+@@ -2151,6 +2151,9 @@ __bpf_kfunc static void bbr_init(struct
|
|
|
|
-+ bbr->plb.pause_until = 0;
|
|
|
|
-+
|
|
|
|
-+ tp->fast_ack_mode = bbr_fast_ack_mode ? 1 : 0;
|
|
|
|
-++
|
|
|
|
-++ if (bbr_can_use_ecn(sk))
|
|
|
|
-++ tp->ecn_flags |= TCP_ECN_ECT_PERMANENT;
|
|
|
|
-+ }
|
|
|
|
-+
|
|
|
|
-+ /* BBR marks the current round trip as a loss round. */
|
|
|
|
-+--- a/net/ipv4/tcp_output.c
|
|
|
|
-++++ b/net/ipv4/tcp_output.c
|
|
|
|
-+@@ -388,7 +388,8 @@ static void tcp_ecn_send(struct sock *sk
|
|
|
|
-+ th->cwr = 1;
|
|
|
|
-+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
|
|
|
-+ }
|
|
|
|
-+- } else if (!tcp_ca_needs_ecn(sk)) {
|
|
|
|
-++ } else if (!(tp->ecn_flags & TCP_ECN_ECT_PERMANENT) &&
|
|
|
|
-++ !tcp_ca_needs_ecn(sk)) {
|
|
|
|
-+ /* ACK or retransmitted segment: clear ECT|CE */
|
|
|
|
-+ INET_ECN_dontxmit(sk);
|
|
|
|
-+ }
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-18-bbr-v3-upstream-prep-2024-02-19-01-tcp-export-TCPI_OPT_ECN_LOW-in-tcp_info-tcpi_options.patch b/target/linux/generic/hack-6.6/601-18-bbr-v3-upstream-prep-2024-02-19-01-tcp-export-TCPI_OPT_ECN_LOW-in-tcp_info-tcpi_options.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..7440a33eb41d1a
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-18-bbr-v3-upstream-prep-2024-02-19-01-tcp-export-TCPI_OPT_ECN_LOW-in-tcp_info-tcpi_options.patch
|
|
|
|
-@@ -0,0 +1,37 @@
|
|
|
|
-+From 00a264b290051753e189ec6eadbd0cad3067b77d Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Neal Cardwell <[email protected]>
|
|
|
|
-+Date: Sun, 23 Jul 2023 23:25:34 -0400
|
|
|
|
-+Subject: [PATCH 18/19] tcp: export TCPI_OPT_ECN_LOW in tcp_info tcpi_options
|
|
|
|
-+ field
|
|
|
|
-+
|
|
|
|
-+Analogous to other important ECN information, export TCPI_OPT_ECN_LOW
|
|
|
|
-+in tcp_info tcpi_options field.
|
|
|
|
-+
|
|
|
|
-+Signed-off-by: Neal Cardwell <[email protected]>
|
|
|
|
-+Change-Id: I08d8d8c7e8780e6e37df54038ee50301ac5a0320
|
|
|
|
-+---
|
|
|
|
-+ include/uapi/linux/tcp.h | 1 +
|
|
|
|
-+ net/ipv4/tcp.c | 2 ++
|
|
|
|
-+ 2 files changed, 3 insertions(+)
|
|
|
|
-+
|
|
|
|
-+--- a/include/uapi/linux/tcp.h
|
|
|
|
-++++ b/include/uapi/linux/tcp.h
|
|
|
|
-+@@ -170,6 +170,7 @@ enum tcp_fastopen_client_fail {
|
|
|
|
-+ #define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
|
|
|
|
-+ #define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
|
|
|
|
-+ #define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
|
|
|
|
-++#define TCPI_OPT_ECN_LOW 64 /* Low-latency ECN configured at init */
|
|
|
|
-+
|
|
|
|
-+ /*
|
|
|
|
-+ * Sender's congestion state indicating normal or abnormal situations
|
|
|
|
-+--- a/net/ipv4/tcp.c
|
|
|
|
-++++ b/net/ipv4/tcp.c
|
|
|
|
-+@@ -3791,6 +3791,8 @@ void tcp_get_info(struct sock *sk, struc
|
|
|
|
-+ info->tcpi_options |= TCPI_OPT_ECN;
|
|
|
|
-+ if (tp->ecn_flags & TCP_ECN_SEEN)
|
|
|
|
-+ info->tcpi_options |= TCPI_OPT_ECN_SEEN;
|
|
|
|
-++ if (tp->ecn_flags & TCP_ECN_LOW)
|
|
|
|
-++ info->tcpi_options |= TCPI_OPT_ECN_LOW;
|
|
|
|
-+ if (tp->syn_data_acked)
|
|
|
|
-+ info->tcpi_options |= TCPI_OPT_SYN_DATA;
|
|
|
|
-+
|
|
|
|
-diff --git a/target/linux/generic/hack-6.6/601-19-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-change-u64-to-unsigned-long-for-bytes.patch b/target/linux/generic/hack-6.6/601-19-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-change-u64-to-unsigned-long-for-bytes.patch
|
|
|
|
-new file mode 100644
|
|
|
|
-index 00000000000000..f1d55df7a5d41f
|
|
|
|
---- /dev/null
|
|
|
|
-+++ b/target/linux/generic/hack-6.6/601-19-bbr-v3-upstream-prep-2024-02-19-01-net-tcp_bbr-v3-change-u64-to-unsigned-long-for-bytes.patch
|
|
|
|
-@@ -0,0 +1,36 @@
|
|
|
|
-+From 5ccaffc5456985926897afe8c79fa21a612eec70 Mon Sep 17 00:00:00 2001
|
|
|
|
-+From: Chen Minqiang <[email protected]>
|
|
|
|
-+Date: Sat, 21 Sep 2024 23:51:24 +0800
|
|
|
|
-+Subject: [PATCH 19/19] net-tcp_bbr: v3: change `u64` to `unsigned long` for
|
|
|
|
-+ `bytes` in `bbr_tso_segs_generic`
|
|
|
|
-+
|
|
|
|
-+This change addresses a build failure on 32-bit systems due to undefined division symbols:
|
|
|
|
-+
|
|
|
|
-+arm:
|
|
|
|
-+ERROR: modpost: "__aeabi_uldivmod" [net/ipv4/tcp_bbr.ko] undefined!
|
|
|
|
-+ERROR: modpost: "__aeabi_ldivmod" [net/ipv4/tcp_bbr.ko] undefined!
|
|
|
|
-+
|
|
|
|
-+x86, mips, ppc:
|
|
|
|
-+ERROR: modpost: "__udivdi3" [net/ipv4/tcp_bbr.ko] undefined!
|
|
|
|
-+ERROR: modpost: "__divdi3" [net/ipv4/tcp_bbr.ko] undefined!
|
|
|
|
-+
|
|
|
|
-+Since `sk->sk_pacing_rate` is already an `unsigned long`, the `bytes` variable is
|
|
|
|
-+updated to `unsigned long` to resolve these division issues and ensure compatibility
|
|
|
|
-+across both 32-bit and 64-bit platforms.
|
|
|
|
-+
|
|
|
|
-+Signed-off-by: Chen Minqiang <[email protected]>
|
|
|
|
-+---
|
|
|
|
-+ net/ipv4/tcp_bbr.c | 2 +-
|
|
|
|
-+ 1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
-+
|
|
|
|
-+--- a/net/ipv4/tcp_bbr.c
|
|
|
|
-++++ b/net/ipv4/tcp_bbr.c
|
|
|
|
-+@@ -481,7 +481,7 @@ static u32 bbr_tso_segs_generic(struct s
|
|
|
|
-+ {
|
|
|
|
-+ struct bbr *bbr = inet_csk_ca(sk);
|
|
|
|
-+ u32 segs, r;
|
|
|
|
-+- u64 bytes;
|
|
|
|
-++ unsigned long bytes;
|
|
|
|
-+
|
|
|
|
-+ /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
|
|
|
|
-+ bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
|
|
|