generic: add upstream commit id to codel patches and refresh those
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-3.3 / 040-Controlled-Delay-AQM.patch
1 From a93fd80d261f1dc2788442dba8dd5701363d3d6e Mon Sep 17 00:00:00 2001
2 From: Eric Dumazet <edumazet@google.com>
3 Date: Thu, 10 May 2012 07:51:25 +0000
4 Subject: [PATCH] codel: Controlled Delay AQM
5
6 commit 76e3cc126bb223013a6b9a0e2a51238d1ef2e409 upstream.
7
8 An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
9
10 http://queue.acm.org/detail.cfm?id=2209336
11
12 This AQM main input is no longer queue size in bytes or packets, but the
13 delay packets stay in (FIFO) queue.
14
15 As we don't have infinite memory, we still can drop packets in enqueue()
16 in case of massive load, but mean of CoDel is to drop packets in
17 dequeue(), using a control law based on two simple parameters :
18
19 target : target sojourn time (default 5ms)
20 interval : width of moving time window (default 100ms)
21
22 Based on initial work from Dave Taht.
23
24 Refactored to help future codel inclusion as a plugin for other linux
25 qdisc (FQ_CODEL, ...), like RED.
26
27 include/net/codel.h contains codel algorithm as close as possible than
28 Kathleen reference.
29
30 net/sched/sch_codel.c contains the linux qdisc specific glue.
31
32 Separate structures permit a memory efficient implementation of fq_codel
33 (to be sent as a separate work) : Each flow has its own struct
34 codel_vars.
35
36 timestamps are taken at enqueue() time with 1024 ns precision, allowing
37 a range of 2199 seconds in queue, and 100Gb links support. iproute2 uses
38 usec as base unit.
39
40 Selected packets are dropped, unless ECN is enabled and packets can get
41 ECN mark instead.
42
43 Tested from 2Mb to 10Gb speeds with no particular problems, on ixgbe and
44 tg3 drivers (BQL enabled).
45
46 Usage: tc qdisc ... codel [ limit PACKETS ] [ target TIME ]
47 [ interval TIME ] [ ecn ]
48
49 qdisc codel 10: parent 1:1 limit 2000p target 3.0ms interval 60.0ms ecn
50 Sent 13347099587 bytes 8815805 pkt (dropped 0, overlimits 0 requeues 0)
51 rate 202365Kbit 16708pps backlog 113550b 75p requeues 0
52 count 116 lastcount 98 ldelay 4.3ms dropping drop_next 816us
53 maxpacket 1514 ecn_mark 84399 drop_overlimit 0
54
55 CoDel must be seen as a base module, and should be used keeping in mind
56 there is still a FIFO queue. So a typical setup will probably need a
57 hierarchy of several qdiscs and packet classifiers to be able to meet
58 whatever constraints a user might have.
59
60 One possible example would be to use fq_codel, which combines Fair
61 Queueing and CoDel, in replacement of sfq / sfq_red.
62
63 Signed-off-by: Eric Dumazet <edumazet@google.com>
64 Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
65 Cc: Kathleen Nichols <nichols@pollere.com>
66 Cc: Van Jacobson <van@pollere.net>
67 Cc: Tom Herbert <therbert@google.com>
68 Cc: Matt Mathis <mattmathis@google.com>
69 Cc: Yuchung Cheng <ycheng@google.com>
70 Cc: Stephen Hemminger <shemminger@vyatta.com>
71 Signed-off-by: David S. Miller <davem@davemloft.net>
72 ---
73 include/linux/pkt_sched.h | 26 ++++
74 include/net/codel.h | 332 +++++++++++++++++++++++++++++++++++++++++++++
75 net/sched/Kconfig | 11 ++
76 net/sched/Makefile | 1 +
77 net/sched/sch_codel.c | 275 +++++++++++++++++++++++++++++++++++++
78 5 files changed, 645 insertions(+)
79 create mode 100644 include/net/codel.h
80 create mode 100644 net/sched/sch_codel.c
81
82 --- a/include/linux/pkt_sched.h
83 +++ b/include/linux/pkt_sched.h
84 @@ -633,4 +633,30 @@ struct tc_qfq_stats {
85 __u32 lmax;
86 };
87
88 +/* CODEL */
89 +
90 +enum {
91 + TCA_CODEL_UNSPEC,
92 + TCA_CODEL_TARGET,
93 + TCA_CODEL_LIMIT,
94 + TCA_CODEL_INTERVAL,
95 + TCA_CODEL_ECN,
96 + __TCA_CODEL_MAX
97 +};
98 +
99 +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
100 +
101 +struct tc_codel_xstats {
102 + __u32 maxpacket; /* largest packet we've seen so far */
103 + __u32 count; /* how many drops we've done since the last time we
104 + * entered dropping state
105 + */
106 + __u32 lastcount; /* count at entry to dropping state */
107 + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
108 + __s32 drop_next; /* time to drop next packet */
109 + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
110 + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
111 + __u32 dropping; /* are we in dropping state ? */
112 +};
113 +
114 #endif
115 --- /dev/null
116 +++ b/include/net/codel.h
117 @@ -0,0 +1,332 @@
118 +#ifndef __NET_SCHED_CODEL_H
119 +#define __NET_SCHED_CODEL_H
120 +
121 +/*
122 + * Codel - The Controlled-Delay Active Queue Management algorithm
123 + *
124 + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
125 + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
126 + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
127 + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
128 + *
129 + * Redistribution and use in source and binary forms, with or without
130 + * modification, are permitted provided that the following conditions
131 + * are met:
132 + * 1. Redistributions of source code must retain the above copyright
133 + * notice, this list of conditions, and the following disclaimer,
134 + * without modification.
135 + * 2. Redistributions in binary form must reproduce the above copyright
136 + * notice, this list of conditions and the following disclaimer in the
137 + * documentation and/or other materials provided with the distribution.
138 + * 3. The names of the authors may not be used to endorse or promote products
139 + * derived from this software without specific prior written permission.
140 + *
141 + * Alternatively, provided that this notice is retained in full, this
142 + * software may be distributed under the terms of the GNU General
143 + * Public License ("GPL") version 2, in which case the provisions of the
144 + * GPL apply INSTEAD OF those given above.
145 + *
146 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
147 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
148 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
149 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
150 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
151 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
152 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
153 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
154 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
155 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
156 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
157 + * DAMAGE.
158 + *
159 + */
160 +
161 +#include <linux/types.h>
162 +#include <linux/ktime.h>
163 +#include <linux/skbuff.h>
164 +#include <net/pkt_sched.h>
165 +#include <net/inet_ecn.h>
166 +
167 +/* Controlling Queue Delay (CoDel) algorithm
168 + * =========================================
169 + * Source : Kathleen Nichols and Van Jacobson
170 + * http://queue.acm.org/detail.cfm?id=2209336
171 + *
172 + * Implemented on linux by Dave Taht and Eric Dumazet
173 + */
174 +
175 +
176 +/* CoDel uses a 1024 nsec clock, encoded in u32
177 + * This gives a range of 2199 seconds, because of signed compares
178 + */
179 +typedef u32 codel_time_t;
180 +typedef s32 codel_tdiff_t;
181 +#define CODEL_SHIFT 10
182 +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
183 +
184 +static inline codel_time_t codel_get_time(void)
185 +{
186 + u64 ns = ktime_to_ns(ktime_get());
187 +
188 + return ns >> CODEL_SHIFT;
189 +}
190 +
191 +#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
192 +#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
193 +#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
194 +#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
195 +
196 +/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
197 +struct codel_skb_cb {
198 + codel_time_t enqueue_time;
199 +};
200 +
201 +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
202 +{
203 + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
204 + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
205 +}
206 +
207 +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
208 +{
209 + return get_codel_cb(skb)->enqueue_time;
210 +}
211 +
212 +static void codel_set_enqueue_time(struct sk_buff *skb)
213 +{
214 + get_codel_cb(skb)->enqueue_time = codel_get_time();
215 +}
216 +
217 +static inline u32 codel_time_to_us(codel_time_t val)
218 +{
219 + u64 valns = ((u64)val << CODEL_SHIFT);
220 +
221 + do_div(valns, NSEC_PER_USEC);
222 + return (u32)valns;
223 +}
224 +
225 +/**
226 + * struct codel_params - contains codel parameters
227 + * @target: target queue size (in time units)
228 + * @interval: width of moving time window
229 + * @ecn: is Explicit Congestion Notification enabled
230 + */
231 +struct codel_params {
232 + codel_time_t target;
233 + codel_time_t interval;
234 + bool ecn;
235 +};
236 +
237 +/**
238 + * struct codel_vars - contains codel variables
239 + * @count: how many drops we've done since the last time we
240 + * entered dropping state
241 + * @lastcount: count at entry to dropping state
242 + * @dropping: set to true if in dropping state
243 + * @first_above_time: when we went (or will go) continuously above target
244 + * for interval
245 + * @drop_next: time to drop next packet, or when we dropped last
246 + * @ldelay: sojourn time of last dequeued packet
247 + */
248 +struct codel_vars {
249 + u32 count;
250 + u32 lastcount;
251 + bool dropping;
252 + codel_time_t first_above_time;
253 + codel_time_t drop_next;
254 + codel_time_t ldelay;
255 +};
256 +
257 +/**
258 + * struct codel_stats - contains codel shared variables and stats
259 + * @maxpacket: largest packet we've seen so far
260 + * @drop_count: temp count of dropped packets in dequeue()
261 + * ecn_mark: number of packets we ECN marked instead of dropping
262 + */
263 +struct codel_stats {
264 + u32 maxpacket;
265 + u32 drop_count;
266 + u32 ecn_mark;
267 +};
268 +
269 +static void codel_params_init(struct codel_params *params)
270 +{
271 + params->interval = MS2TIME(100);
272 + params->target = MS2TIME(5);
273 + params->ecn = false;
274 +}
275 +
276 +static void codel_vars_init(struct codel_vars *vars)
277 +{
278 + vars->drop_next = 0;
279 + vars->first_above_time = 0;
280 + vars->dropping = false; /* exit dropping state */
281 + vars->count = 0;
282 + vars->lastcount = 0;
283 +}
284 +
285 +static void codel_stats_init(struct codel_stats *stats)
286 +{
287 + stats->maxpacket = 256;
288 +}
289 +
290 +/* return interval/sqrt(x) with good precision
291 + * relies on int_sqrt(unsigned long x) kernel implementation
292 + */
293 +static u32 codel_inv_sqrt(u32 _interval, u32 _x)
294 +{
295 + u64 interval = _interval;
296 + unsigned long x = _x;
297 +
298 + /* Scale operands for max precision */
299 +
300 +#if BITS_PER_LONG == 64
301 + x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
302 + interval <<= 16;
303 +#endif
304 +
305 + while (x < (1UL << (BITS_PER_LONG - 2))) {
306 + x <<= 2;
307 + interval <<= 1;
308 + }
309 + do_div(interval, int_sqrt(x));
310 + return (u32)interval;
311 +}
312 +
313 +static codel_time_t codel_control_law(codel_time_t t,
314 + codel_time_t interval,
315 + u32 count)
316 +{
317 + return t + codel_inv_sqrt(interval, count);
318 +}
319 +
320 +
321 +static bool codel_should_drop(struct sk_buff *skb,
322 + unsigned int *backlog,
323 + struct codel_vars *vars,
324 + struct codel_params *params,
325 + struct codel_stats *stats,
326 + codel_time_t now)
327 +{
328 + bool ok_to_drop;
329 +
330 + if (!skb) {
331 + vars->first_above_time = 0;
332 + return false;
333 + }
334 +
335 + vars->ldelay = now - codel_get_enqueue_time(skb);
336 + *backlog -= qdisc_pkt_len(skb);
337 +
338 + if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
339 + stats->maxpacket = qdisc_pkt_len(skb);
340 +
341 + if (codel_time_before(vars->ldelay, params->target) ||
342 + *backlog <= stats->maxpacket) {
343 + /* went below - stay below for at least interval */
344 + vars->first_above_time = 0;
345 + return false;
346 + }
347 + ok_to_drop = false;
348 + if (vars->first_above_time == 0) {
349 + /* just went above from below. If we stay above
350 + * for at least interval we'll say it's ok to drop
351 + */
352 + vars->first_above_time = now + params->interval;
353 + } else if (codel_time_after(now, vars->first_above_time)) {
354 + ok_to_drop = true;
355 + }
356 + return ok_to_drop;
357 +}
358 +
359 +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
360 + struct Qdisc *sch);
361 +
362 +static struct sk_buff *codel_dequeue(struct Qdisc *sch,
363 + struct codel_params *params,
364 + struct codel_vars *vars,
365 + struct codel_stats *stats,
366 + codel_skb_dequeue_t dequeue_func,
367 + u32 *backlog)
368 +{
369 + struct sk_buff *skb = dequeue_func(vars, sch);
370 + codel_time_t now;
371 + bool drop;
372 +
373 + if (!skb) {
374 + vars->dropping = false;
375 + return skb;
376 + }
377 + now = codel_get_time();
378 + drop = codel_should_drop(skb, backlog, vars, params, stats, now);
379 + if (vars->dropping) {
380 + if (!drop) {
381 + /* sojourn time below target - leave dropping state */
382 + vars->dropping = false;
383 + } else if (codel_time_after_eq(now, vars->drop_next)) {
384 + /* It's time for the next drop. Drop the current
385 + * packet and dequeue the next. The dequeue might
386 + * take us out of dropping state.
387 + * If not, schedule the next drop.
388 + * A large backlog might result in drop rates so high
389 + * that the next drop should happen now,
390 + * hence the while loop.
391 + */
392 + while (vars->dropping &&
393 + codel_time_after_eq(now, vars->drop_next)) {
394 + if (++vars->count == 0) /* avoid zero divides */
395 + vars->count = ~0U;
396 + if (params->ecn && INET_ECN_set_ce(skb)) {
397 + stats->ecn_mark++;
398 + vars->drop_next =
399 + codel_control_law(vars->drop_next,
400 + params->interval,
401 + vars->count);
402 + goto end;
403 + }
404 + qdisc_drop(skb, sch);
405 + stats->drop_count++;
406 + skb = dequeue_func(vars, sch);
407 + if (!codel_should_drop(skb, backlog,
408 + vars, params, stats, now)) {
409 + /* leave dropping state */
410 + vars->dropping = false;
411 + } else {
412 + /* and schedule the next drop */
413 + vars->drop_next =
414 + codel_control_law(vars->drop_next,
415 + params->interval,
416 + vars->count);
417 + }
418 + }
419 + }
420 + } else if (drop) {
421 + if (params->ecn && INET_ECN_set_ce(skb)) {
422 + stats->ecn_mark++;
423 + } else {
424 + qdisc_drop(skb, sch);
425 + stats->drop_count++;
426 +
427 + skb = dequeue_func(vars, sch);
428 + drop = codel_should_drop(skb, backlog, vars, params,
429 + stats, now);
430 + }
431 + vars->dropping = true;
432 + /* if min went above target close to when we last went below it
433 + * assume that the drop rate that controlled the queue on the
434 + * last cycle is a good starting point to control it now.
435 + */
436 + if (codel_time_before(now - vars->drop_next,
437 + 16 * params->interval)) {
438 + vars->count = (vars->count - vars->lastcount) | 1;
439 + } else {
440 + vars->count = 1;
441 + }
442 + vars->lastcount = vars->count;
443 + vars->drop_next = codel_control_law(now, params->interval,
444 + vars->count);
445 + }
446 +end:
447 + return skb;
448 +}
449 +#endif
450 --- a/net/sched/Kconfig
451 +++ b/net/sched/Kconfig
452 @@ -250,6 +250,17 @@ config NET_SCH_QFQ
453
454 If unsure, say N.
455
456 +config NET_SCH_CODEL
457 + tristate "Controlled Delay AQM (CODEL)"
458 + help
459 + Say Y here if you want to use the Controlled Delay (CODEL)
460 + packet scheduling algorithm.
461 +
462 + To compile this driver as a module, choose M here: the module
463 + will be called sch_codel.
464 +
465 + If unsure, say N.
466 +
467 config NET_SCH_INGRESS
468 tristate "Ingress Qdisc"
469 depends on NET_CLS_ACT
470 --- a/net/sched/Makefile
471 +++ b/net/sched/Makefile
472 @@ -36,6 +36,7 @@ obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
473 obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
474 obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
475 obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
476 +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
477
478 obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
479 obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
480 --- /dev/null
481 +++ b/net/sched/sch_codel.c
482 @@ -0,0 +1,275 @@
483 +/*
484 + * Codel - The Controlled-Delay Active Queue Management algorithm
485 + *
486 + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
487 + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
488 + *
489 + * Implemented on linux by :
490 + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
491 + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
492 + *
493 + * Redistribution and use in source and binary forms, with or without
494 + * modification, are permitted provided that the following conditions
495 + * are met:
496 + * 1. Redistributions of source code must retain the above copyright
497 + * notice, this list of conditions, and the following disclaimer,
498 + * without modification.
499 + * 2. Redistributions in binary form must reproduce the above copyright
500 + * notice, this list of conditions and the following disclaimer in the
501 + * documentation and/or other materials provided with the distribution.
502 + * 3. The names of the authors may not be used to endorse or promote products
503 + * derived from this software without specific prior written permission.
504 + *
505 + * Alternatively, provided that this notice is retained in full, this
506 + * software may be distributed under the terms of the GNU General
507 + * Public License ("GPL") version 2, in which case the provisions of the
508 + * GPL apply INSTEAD OF those given above.
509 + *
510 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
511 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
512 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
513 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
514 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
515 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
516 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
517 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
518 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
519 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
520 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
521 + * DAMAGE.
522 + *
523 + */
524 +
525 +#include <linux/module.h>
526 +#include <linux/slab.h>
527 +#include <linux/types.h>
528 +#include <linux/kernel.h>
529 +#include <linux/errno.h>
530 +#include <linux/skbuff.h>
531 +#include <net/pkt_sched.h>
532 +#include <net/codel.h>
533 +
534 +
535 +#define DEFAULT_CODEL_LIMIT 1000
536 +
537 +struct codel_sched_data {
538 + struct codel_params params;
539 + struct codel_vars vars;
540 + struct codel_stats stats;
541 + u32 drop_overlimit;
542 +};
543 +
544 +/* This is the specific function called from codel_dequeue()
545 + * to dequeue a packet from queue. Note: backlog is handled in
546 + * codel, we dont need to reduce it here.
547 + */
548 +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
549 +{
550 + struct sk_buff *skb = __skb_dequeue(&sch->q);
551 +
552 + prefetch(&skb->end); /* we'll need skb_shinfo() */
553 + return skb;
554 +}
555 +
556 +static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
557 +{
558 + struct codel_sched_data *q = qdisc_priv(sch);
559 + struct sk_buff *skb;
560 +
561 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
562 + dequeue, &sch->qstats.backlog);
563 + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
564 + * or HTB crashes. Defer it for next round.
565 + */
566 + if (q->stats.drop_count && sch->q.qlen) {
567 + qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
568 + q->stats.drop_count = 0;
569 + }
570 + if (skb)
571 + qdisc_bstats_update(sch, skb);
572 + return skb;
573 +}
574 +
575 +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
576 +{
577 + struct codel_sched_data *q;
578 +
579 + if (likely(qdisc_qlen(sch) < sch->limit)) {
580 + codel_set_enqueue_time(skb);
581 + return qdisc_enqueue_tail(skb, sch);
582 + }
583 + q = qdisc_priv(sch);
584 + q->drop_overlimit++;
585 + return qdisc_drop(skb, sch);
586 +}
587 +
588 +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
589 + [TCA_CODEL_TARGET] = { .type = NLA_U32 },
590 + [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
591 + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
592 + [TCA_CODEL_ECN] = { .type = NLA_U32 },
593 +};
594 +
595 +static int codel_change(struct Qdisc *sch, struct nlattr *opt)
596 +{
597 + struct codel_sched_data *q = qdisc_priv(sch);
598 + struct nlattr *tb[TCA_CODEL_MAX + 1];
599 + unsigned int qlen;
600 + int err;
601 +
602 + if (!opt)
603 + return -EINVAL;
604 +
605 + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
606 + if (err < 0)
607 + return err;
608 +
609 + sch_tree_lock(sch);
610 +
611 + if (tb[TCA_CODEL_TARGET]) {
612 + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
613 +
614 + q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
615 + }
616 +
617 + if (tb[TCA_CODEL_INTERVAL]) {
618 + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
619 +
620 + q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
621 + }
622 +
623 + if (tb[TCA_CODEL_LIMIT])
624 + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
625 +
626 + if (tb[TCA_CODEL_ECN])
627 + q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
628 +
629 + qlen = sch->q.qlen;
630 + while (sch->q.qlen > sch->limit) {
631 + struct sk_buff *skb = __skb_dequeue(&sch->q);
632 +
633 + sch->qstats.backlog -= qdisc_pkt_len(skb);
634 + qdisc_drop(skb, sch);
635 + }
636 + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
637 +
638 + sch_tree_unlock(sch);
639 + return 0;
640 +}
641 +
642 +static int codel_init(struct Qdisc *sch, struct nlattr *opt)
643 +{
644 + struct codel_sched_data *q = qdisc_priv(sch);
645 +
646 + sch->limit = DEFAULT_CODEL_LIMIT;
647 +
648 + codel_params_init(&q->params);
649 + codel_vars_init(&q->vars);
650 + codel_stats_init(&q->stats);
651 +
652 + if (opt) {
653 + int err = codel_change(sch, opt);
654 +
655 + if (err)
656 + return err;
657 + }
658 +
659 + if (sch->limit >= 1)
660 + sch->flags |= TCQ_F_CAN_BYPASS;
661 + else
662 + sch->flags &= ~TCQ_F_CAN_BYPASS;
663 +
664 + return 0;
665 +}
666 +
667 +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
668 +{
669 + struct codel_sched_data *q = qdisc_priv(sch);
670 + struct nlattr *opts;
671 +
672 + opts = nla_nest_start(skb, TCA_OPTIONS);
673 + if (opts == NULL)
674 + goto nla_put_failure;
675 +
676 + if (nla_put_u32(skb, TCA_CODEL_TARGET,
677 + codel_time_to_us(q->params.target)) ||
678 + nla_put_u32(skb, TCA_CODEL_LIMIT,
679 + sch->limit) ||
680 + nla_put_u32(skb, TCA_CODEL_INTERVAL,
681 + codel_time_to_us(q->params.interval)) ||
682 + nla_put_u32(skb, TCA_CODEL_ECN,
683 + q->params.ecn))
684 + goto nla_put_failure;
685 +
686 + return nla_nest_end(skb, opts);
687 +
688 +nla_put_failure:
689 + nla_nest_cancel(skb, opts);
690 + return -1;
691 +}
692 +
693 +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
694 +{
695 + const struct codel_sched_data *q = qdisc_priv(sch);
696 + struct tc_codel_xstats st = {
697 + .maxpacket = q->stats.maxpacket,
698 + .count = q->vars.count,
699 + .lastcount = q->vars.lastcount,
700 + .drop_overlimit = q->drop_overlimit,
701 + .ldelay = codel_time_to_us(q->vars.ldelay),
702 + .dropping = q->vars.dropping,
703 + .ecn_mark = q->stats.ecn_mark,
704 + };
705 +
706 + if (q->vars.dropping) {
707 + codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
708 +
709 + if (delta >= 0)
710 + st.drop_next = codel_time_to_us(delta);
711 + else
712 + st.drop_next = -codel_time_to_us(-delta);
713 + }
714 +
715 + return gnet_stats_copy_app(d, &st, sizeof(st));
716 +}
717 +
718 +static void codel_reset(struct Qdisc *sch)
719 +{
720 + struct codel_sched_data *q = qdisc_priv(sch);
721 +
722 + qdisc_reset_queue(sch);
723 + codel_vars_init(&q->vars);
724 +}
725 +
726 +static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
727 + .id = "codel",
728 + .priv_size = sizeof(struct codel_sched_data),
729 +
730 + .enqueue = codel_qdisc_enqueue,
731 + .dequeue = codel_qdisc_dequeue,
732 + .peek = qdisc_peek_dequeued,
733 + .init = codel_init,
734 + .reset = codel_reset,
735 + .change = codel_change,
736 + .dump = codel_dump,
737 + .dump_stats = codel_dump_stats,
738 + .owner = THIS_MODULE,
739 +};
740 +
741 +static int __init codel_module_init(void)
742 +{
743 + return register_qdisc(&codel_qdisc_ops);
744 +}
745 +
746 +static void __exit codel_module_exit(void)
747 +{
748 + unregister_qdisc(&codel_qdisc_ops);
749 +}
750 +
751 +module_init(codel_module_init)
752 +module_exit(codel_module_exit)
753 +
754 +MODULE_DESCRIPTION("Controlled Delay queue discipline");
755 +MODULE_AUTHOR("Dave Taht");
756 +MODULE_AUTHOR("Eric Dumazet");
757 +MODULE_LICENSE("Dual BSD/GPL");