1 From: Eric Dumazet <edumazet@google.com>
3 codel_should_drop() logic allows a packet being not dropped if queue
4 size is under max packet size.
6 In fq_codel, we have two possible backlogs : The qdisc global one, and
9 The meaningful one for codel_should_drop() should be the global backlog,
10 not the per flow one, so that thin flows can have a non zero drop/mark
13 Signed-off-by: Eric Dumazet <edumazet@google.com>
14 Cc: Dave Taht <dave.taht@bufferbloat.net>
15 Cc: Kathleen Nichols <nichols@pollere.com>
16 Cc: Van Jacobson <van@pollere.net>
18 include/net/codel.h | 15 +++++++--------
19 net/sched/sch_codel.c | 4 ++--
20 net/sched/sch_fq_codel.c | 5 +++--
21 3 files changed, 12 insertions(+), 12 deletions(-)
23 diff --git a/include/net/codel.h b/include/net/codel.h
24 index 7546517..550debf 100644
25 --- a/include/net/codel.h
26 +++ b/include/net/codel.h
27 @@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
30 static bool codel_should_drop(const struct sk_buff *skb,
31 - unsigned int *backlog,
33 struct codel_vars *vars,
34 struct codel_params *params,
35 struct codel_stats *stats,
36 @@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
39 vars->ldelay = now - codel_get_enqueue_time(skb);
40 - *backlog -= qdisc_pkt_len(skb);
41 + sch->qstats.backlog -= qdisc_pkt_len(skb);
43 if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
44 stats->maxpacket = qdisc_pkt_len(skb);
46 if (codel_time_before(vars->ldelay, params->target) ||
47 - *backlog <= stats->maxpacket) {
48 + sch->qstats.backlog <= stats->maxpacket) {
49 /* went below - stay below for at least interval */
50 vars->first_above_time = 0;
52 @@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
53 struct codel_params *params,
54 struct codel_vars *vars,
55 struct codel_stats *stats,
56 - codel_skb_dequeue_t dequeue_func,
58 + codel_skb_dequeue_t dequeue_func)
60 struct sk_buff *skb = dequeue_func(vars, sch);
62 @@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
65 now = codel_get_time();
66 - drop = codel_should_drop(skb, backlog, vars, params, stats, now);
67 + drop = codel_should_drop(skb, sch, vars, params, stats, now);
70 /* sojourn time below target - leave dropping state */
71 @@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
74 skb = dequeue_func(vars, sch);
75 - if (!codel_should_drop(skb, backlog,
76 + if (!codel_should_drop(skb, sch,
77 vars, params, stats, now)) {
78 /* leave dropping state */
79 vars->dropping = false;
80 @@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
83 skb = dequeue_func(vars, sch);
84 - drop = codel_should_drop(skb, backlog, vars, params,
85 + drop = codel_should_drop(skb, sch, vars, params,
88 vars->dropping = true;
89 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
90 index 213ef60..2f9ab17 100644
91 --- a/net/sched/sch_codel.c
92 +++ b/net/sched/sch_codel.c
93 @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
94 struct codel_sched_data *q = qdisc_priv(sch);
97 - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
98 - dequeue, &sch->qstats.backlog);
99 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
101 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
102 * or HTB crashes. Defer it for next round.
104 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
105 index 337ff20..9fc1c62 100644
106 --- a/net/sched/sch_fq_codel.c
107 +++ b/net/sched/sch_fq_codel.c
108 @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
110 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
112 + struct fq_codel_sched_data *q = qdisc_priv(sch);
113 struct fq_codel_flow *flow;
114 struct sk_buff *skb = NULL;
116 flow = container_of(vars, struct fq_codel_flow, cvars);
118 skb = dequeue_head(flow);
119 - sch->qstats.backlog -= qdisc_pkt_len(skb);
120 + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
124 @@ -256,7 +257,7 @@ begin:
125 prev_ecn_mark = q->cstats.ecn_mark;
127 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
128 - dequeue, &q->backlogs[flow - q->flows]);
131 flow->dropped += q->cstats.drop_count - prev_drop_count;
132 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;