1 From: Eric Dumazet <edumazet@google.com>
3 codel_should_drop() logic allows a packet being not dropped if queue
4 size is under max packet size.
6 In fq_codel, we have two possible backlogs : The qdisc global one, and
9 The meaningful one for codel_should_drop() should be the global backlog,
10 not the per flow one, so that thin flows can have a non zero drop/mark
13 Signed-off-by: Eric Dumazet <edumazet@google.com>
14 Cc: Dave Taht <dave.taht@bufferbloat.net>
15 Cc: Kathleen Nichols <nichols@pollere.com>
16 Cc: Van Jacobson <van@pollere.net>
18 include/net/codel.h | 15 +++++++--------
19 net/sched/sch_codel.c | 4 ++--
20 net/sched/sch_fq_codel.c | 5 +++--
21 3 files changed, 12 insertions(+), 12 deletions(-)
23 --- a/include/net/codel.h
24 +++ b/include/net/codel.h
25 @@ -205,7 +205,7 @@ static codel_time_t codel_control_law(co
28 static bool codel_should_drop(const struct sk_buff *skb,
29 - unsigned int *backlog,
31 struct codel_vars *vars,
32 struct codel_params *params,
33 struct codel_stats *stats,
34 @@ -219,13 +219,13 @@ static bool codel_should_drop(const stru
37 vars->ldelay = now - codel_get_enqueue_time(skb);
38 - *backlog -= qdisc_pkt_len(skb);
39 + sch->qstats.backlog -= qdisc_pkt_len(skb);
41 if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
42 stats->maxpacket = qdisc_pkt_len(skb);
44 if (codel_time_before(vars->ldelay, params->target) ||
45 - *backlog <= stats->maxpacket) {
46 + sch->qstats.backlog <= stats->maxpacket) {
47 /* went below - stay below for at least interval */
48 vars->first_above_time = 0;
50 @@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(str
51 struct codel_params *params,
52 struct codel_vars *vars,
53 struct codel_stats *stats,
54 - codel_skb_dequeue_t dequeue_func,
56 + codel_skb_dequeue_t dequeue_func)
58 struct sk_buff *skb = dequeue_func(vars, sch);
60 @@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(str
63 now = codel_get_time();
64 - drop = codel_should_drop(skb, backlog, vars, params, stats, now);
65 + drop = codel_should_drop(skb, sch, vars, params, stats, now);
68 /* sojourn time below target - leave dropping state */
69 @@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(str
72 skb = dequeue_func(vars, sch);
73 - if (!codel_should_drop(skb, backlog,
74 + if (!codel_should_drop(skb, sch,
75 vars, params, stats, now)) {
76 /* leave dropping state */
77 vars->dropping = false;
78 @@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(str
81 skb = dequeue_func(vars, sch);
82 - drop = codel_should_drop(skb, backlog, vars, params,
83 + drop = codel_should_drop(skb, sch, vars, params,
86 vars->dropping = true;
87 --- a/net/sched/sch_codel.c
88 +++ b/net/sched/sch_codel.c
89 @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_deque
90 struct codel_sched_data *q = qdisc_priv(sch);
93 - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
94 - dequeue, &sch->qstats.backlog);
95 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
97 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
98 * or HTB crashes. Defer it for next round.
100 --- a/net/sched/sch_fq_codel.c
101 +++ b/net/sched/sch_fq_codel.c
102 @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_bu
104 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
106 + struct fq_codel_sched_data *q = qdisc_priv(sch);
107 struct fq_codel_flow *flow;
108 struct sk_buff *skb = NULL;
110 flow = container_of(vars, struct fq_codel_flow, cvars);
112 skb = dequeue_head(flow);
113 - sch->qstats.backlog -= qdisc_pkt_len(skb);
114 + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
118 @@ -256,7 +257,7 @@ begin:
119 prev_ecn_mark = q->cstats.ecn_mark;
121 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
122 - dequeue, &q->backlogs[flow - q->flows]);
125 flow->dropped += q->cstats.drop_count - prev_drop_count;
126 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;