Browse Source

kernel: add patches by Dave Täht to reduce buffer size in longer queues to reduce memory pressure

Signed-off-by: Felix Fietkau <[email protected]>

SVN-Revision: 36301
Felix Fietkau 12 years ago
parent
commit
307965026a

+ 41 - 0
target/linux/generic/patches-3.8/656-skb_reduce_truesize-helper.patch

@@ -0,0 +1,41 @@
+From 4593a806e31119c5bd3faa00c7210ad862d515af Mon Sep 17 00:00:00 2001
+From: Dave Taht <[email protected]>
+Date: Mon, 31 Dec 2012 10:02:21 -0800
+Subject: [PATCH 3/7] skb_reduce_truesize: helper function for shrinking skbs
+ whenever needed
+
+On embedded devices in particular, large queues of small packets from the rx
+path with a large truesize can exist. Reducing their size can reduce
+memory pressure. skb_reduce_truesize is a helper function for doing this,
+when needed.
+---
+ include/linux/skbuff.h |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1772,6 +1772,24 @@ static inline void pskb_trim_unique(stru
+ 	BUG_ON(err);
+ }
+ 
++/*
++ * Caller wants to reduce memory needs before queueing skb
++ * The (expensive) copy should not be be done in fast path.
++ */
++static inline struct sk_buff *skb_reduce_truesize(struct sk_buff *skb)
++{
++	if (skb->truesize > 2 * SKB_TRUESIZE(skb->len)) {
++		struct sk_buff *nskb;
++		nskb = skb_copy_expand(skb, skb_headroom(skb), 0,
++			GFP_ATOMIC | __GFP_NOWARN);
++		if (nskb) {
++			__kfree_skb(skb);
++			skb = nskb;
++		}
++	}
++	return skb;
++}
++
+ /**
+  *	skb_orphan - orphan a buffer
+  *	@skb: buffer to orphan

+ 63 - 0
target/linux/generic/patches-3.8/657-qdisc_reduce_truesize.patch

@@ -0,0 +1,63 @@
+From bc9fec2f87d57bdbff30d296605e24504513f65c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Dave=20T=C3=A4ht?= <[email protected]>
+Date: Mon, 17 Sep 2012 19:20:22 -0700
+Subject: [PATCH 4/7] net: add skb_reduce_truesize support to common qdiscs
+
+Reduce skb size under load when queues begin to fill on the
+commont qdiscs.
+---
+ net/sched/sch_codel.c    |    2 ++
+ net/sched/sch_fifo.c     |   12 ++++++++----
+ net/sched/sch_fq_codel.c |    2 ++
+ 3 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -96,6 +96,8 @@ static int codel_qdisc_enqueue(struct sk
+ 	struct codel_sched_data *q;
+ 
+ 	if (likely(qdisc_qlen(sch) < sch->limit)) {
++		if(qdisc_qlen(sch) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		codel_set_enqueue_time(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+ 	}
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -29,17 +29,21 @@ static int bfifo_enqueue(struct sk_buff 
+ 
+ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+-	if (likely(skb_queue_len(&sch->q) < sch->limit))
++	if (likely(skb_queue_len(&sch->q) < sch->limit)) {
++		if (skb_queue_len(&sch->q) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+-
++	}
+ 	return qdisc_reshape_fail(skb, sch);
+ }
+ 
+ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+-	if (likely(skb_queue_len(&sch->q) < sch->limit))
++	if (likely(skb_queue_len(&sch->q) < sch->limit)) {
++		if (skb_queue_len(&sch->q) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+-
++	}
+ 	/* queue full, remove one skb to fulfill the limit */
+ 	__qdisc_queue_drop_head(sch, &sch->q);
+ 	sch->qstats.drops++;
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -182,6 +182,8 @@ static int fq_codel_enqueue(struct sk_bu
+ 		return ret;
+ 	}
+ 	idx--;
++	if (sch->q.qlen > 128)
++		skb = skb_reduce_truesize(skb);
+ 
+ 	codel_set_enqueue_time(skb);
+ 	flow = &q->flows[idx];

+ 41 - 0
target/linux/generic/patches-3.9/656-skb_reduce_truesize-helper.patch

@@ -0,0 +1,41 @@
+From 4593a806e31119c5bd3faa00c7210ad862d515af Mon Sep 17 00:00:00 2001
+From: Dave Taht <[email protected]>
+Date: Mon, 31 Dec 2012 10:02:21 -0800
+Subject: [PATCH 3/7] skb_reduce_truesize: helper function for shrinking skbs
+ whenever needed
+
+On embedded devices in particular, large queues of small packets from the rx
+path with a large truesize can exist. Reducing their size can reduce
+memory pressure. skb_reduce_truesize is a helper function for doing this,
+when needed.
+---
+ include/linux/skbuff.h |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1801,6 +1801,24 @@ static inline void pskb_trim_unique(stru
+ 	BUG_ON(err);
+ }
+ 
++/*
++ * Caller wants to reduce memory needs before queueing skb
++ * The (expensive) copy should not be be done in fast path.
++ */
++static inline struct sk_buff *skb_reduce_truesize(struct sk_buff *skb)
++{
++	if (skb->truesize > 2 * SKB_TRUESIZE(skb->len)) {
++		struct sk_buff *nskb;
++		nskb = skb_copy_expand(skb, skb_headroom(skb), 0,
++			GFP_ATOMIC | __GFP_NOWARN);
++		if (nskb) {
++			__kfree_skb(skb);
++			skb = nskb;
++		}
++	}
++	return skb;
++}
++
+ /**
+  *	skb_orphan - orphan a buffer
+  *	@skb: buffer to orphan

+ 63 - 0
target/linux/generic/patches-3.9/657-qdisc_reduce_truesize.patch

@@ -0,0 +1,63 @@
+From bc9fec2f87d57bdbff30d296605e24504513f65c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Dave=20T=C3=A4ht?= <[email protected]>
+Date: Mon, 17 Sep 2012 19:20:22 -0700
+Subject: [PATCH 4/7] net: add skb_reduce_truesize support to common qdiscs
+
+Reduce skb size under load when queues begin to fill on the
+commont qdiscs.
+---
+ net/sched/sch_codel.c    |    2 ++
+ net/sched/sch_fifo.c     |   12 ++++++++----
+ net/sched/sch_fq_codel.c |    2 ++
+ 3 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -96,6 +96,8 @@ static int codel_qdisc_enqueue(struct sk
+ 	struct codel_sched_data *q;
+ 
+ 	if (likely(qdisc_qlen(sch) < sch->limit)) {
++		if(qdisc_qlen(sch) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		codel_set_enqueue_time(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+ 	}
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -29,17 +29,21 @@ static int bfifo_enqueue(struct sk_buff 
+ 
+ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+-	if (likely(skb_queue_len(&sch->q) < sch->limit))
++	if (likely(skb_queue_len(&sch->q) < sch->limit)) {
++		if (skb_queue_len(&sch->q) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+-
++	}
+ 	return qdisc_reshape_fail(skb, sch);
+ }
+ 
+ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+-	if (likely(skb_queue_len(&sch->q) < sch->limit))
++	if (likely(skb_queue_len(&sch->q) < sch->limit)) {
++		if (skb_queue_len(&sch->q) > 128)
++			skb = skb_reduce_truesize(skb);
+ 		return qdisc_enqueue_tail(skb, sch);
+-
++	}
+ 	/* queue full, remove one skb to fulfill the limit */
+ 	__qdisc_queue_drop_head(sch, &sch->q);
+ 	sch->qstats.drops++;
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -182,6 +182,8 @@ static int fq_codel_enqueue(struct sk_bu
+ 		return ret;
+ 	}
+ 	idx--;
++	if (sch->q.qlen > 128)
++		skb = skb_reduce_truesize(skb);
+ 
+ 	codel_set_enqueue_time(skb);
+ 	flow = &q->flows[idx];