| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970 | 
							- From: Eric Dumazet <[email protected]>
 
- Date: Sat, 4 Jun 2016 12:55:13 -0700
 
- Subject: [PATCH] fq_codel: fix NET_XMIT_CN behavior
 
- My prior attempt to fix the backlogs of parents failed.
 
- If we return NET_XMIT_CN, our parents wont increase their backlog,
 
- so our qdisc_tree_reduce_backlog() should take this into account.
 
- v2: Florian Westphal pointed out that we could drop the packet,
 
- so we need to save qdisc_pkt_len(skb) in a temp variable before
 
- calling fq_codel_drop()
 
- Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()")
 
- Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
 
- Reported-by: Stas Nichiporovich <[email protected]>
 
- Signed-off-by: Eric Dumazet <[email protected]>
 
- Cc: WANG Cong <[email protected]>
 
- Cc: Jamal Hadi Salim <[email protected]>
 
- ---
 
- --- a/net/sched/sch_fq_codel.c
 
- +++ b/net/sched/sch_fq_codel.c
 
- @@ -197,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu
 
-  	unsigned int idx, prev_backlog, prev_qlen;
 
-  	struct fq_codel_flow *flow;
 
-  	int uninitialized_var(ret);
 
- +	unsigned int pkt_len;
 
-  	bool memory_limited;
 
-  
 
-  	idx = fq_codel_classify(skb, sch, &ret);
 
- @@ -228,6 +229,8 @@ static int fq_codel_enqueue(struct sk_bu
 
-  	prev_backlog = sch->qstats.backlog;
 
-  	prev_qlen = sch->q.qlen;
 
-  
 
- +	/* save this packet length as it might be dropped by fq_codel_drop() */
 
- +	pkt_len = qdisc_pkt_len(skb);
 
-  	/* fq_codel_drop() is quite expensive, as it performs a linear search
 
-  	 * in q->backlogs[] to find a fat flow.
 
-  	 * So instead of dropping a single packet, drop half of its backlog
 
- @@ -235,14 +238,23 @@ static int fq_codel_enqueue(struct sk_bu
 
-  	 */
 
-  	ret = fq_codel_drop(sch, q->drop_batch_size);
 
-  
 
- -	q->drop_overlimit += prev_qlen - sch->q.qlen;
 
- +	prev_qlen -= sch->q.qlen;
 
- +	prev_backlog -= sch->qstats.backlog;
 
- +	q->drop_overlimit += prev_qlen;
 
-  	if (memory_limited)
 
- -		q->drop_overmemory += prev_qlen - sch->q.qlen;
 
- -	/* As we dropped packet(s), better let upper stack know this */
 
- -	qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
 
- -				  prev_backlog - sch->qstats.backlog);
 
- +		q->drop_overmemory += prev_qlen;
 
-  
 
- -	return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
 
- +	/* As we dropped packet(s), better let upper stack know this.
 
- +	 * If we dropped a packet for this flow, return NET_XMIT_CN,
 
- +	 * but in this case, our parents wont increase their backlogs.
 
- +	 */
 
- +	if (ret == idx) {
 
- +		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
 
- +					  prev_backlog - pkt_len);
 
- +		return NET_XMIT_CN;
 
- +	}
 
- +	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
 
- +	return NET_XMIT_SUCCESS;
 
-  }
 
-  
 
-  /* This is the specific function called from codel_dequeue()
 
 
  |