kernel: merge pending fq_codel backlog accounting fix
authorFelix Fietkau <nbd@nbd.name>
Tue, 7 Jun 2016 12:11:13 +0000 (14:11 +0200)
committerFelix Fietkau <nbd@nbd.name>
Sat, 11 Jun 2016 07:51:23 +0000 (09:51 +0200)
Signed-off-by: Felix Fietkau <nbd@nbd.name>
target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch [new file with mode: 0644]
target/linux/generic/patches-4.4/660-fq_codel_defaults.patch
target/linux/generic/patches-4.4/661-fq_codel_keep_dropped_stats.patch
target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch

diff --git a/target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch b/target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch
new file mode 100644 (file)
index 0000000..a1902fe
--- /dev/null
@@ -0,0 +1,70 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 4 Jun 2016 12:55:13 -0700
+Subject: [PATCH] fq_codel: fix NET_XMIT_CN behavior
+
+My prior attempt to fix the backlogs of parents failed.
+
+If we return NET_XMIT_CN, our parents wont increase their backlog,
+so our qdisc_tree_reduce_backlog() should take this into account.
+
+v2: Florian Westphal pointed out that we could drop the packet,
+so we need to save qdisc_pkt_len(skb) in a temp variable before
+calling fq_codel_drop()
+
+Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()")
+Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
+Reported-by: Stas Nichiporovich <stasn77@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: WANG Cong <xiyou.wangcong@gmail.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+---
+
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -197,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu
+       unsigned int idx, prev_backlog, prev_qlen;
+       struct fq_codel_flow *flow;
+       int uninitialized_var(ret);
++      unsigned int pkt_len;
+       bool memory_limited;
+       idx = fq_codel_classify(skb, sch, &ret);
+@@ -228,6 +229,8 @@ static int fq_codel_enqueue(struct sk_bu
+       prev_backlog = sch->qstats.backlog;
+       prev_qlen = sch->q.qlen;
++      /* save this packet length as it might be dropped by fq_codel_drop() */
++      pkt_len = qdisc_pkt_len(skb);
+       /* fq_codel_drop() is quite expensive, as it performs a linear search
+        * in q->backlogs[] to find a fat flow.
+        * So instead of dropping a single packet, drop half of its backlog
+@@ -235,14 +238,23 @@ static int fq_codel_enqueue(struct sk_bu
+        */
+       ret = fq_codel_drop(sch, q->drop_batch_size);
+-      q->drop_overlimit += prev_qlen - sch->q.qlen;
++      prev_qlen -= sch->q.qlen;
++      prev_backlog -= sch->qstats.backlog;
++      q->drop_overlimit += prev_qlen;
+       if (memory_limited)
+-              q->drop_overmemory += prev_qlen - sch->q.qlen;
+-      /* As we dropped packet(s), better let upper stack know this */
+-      qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
+-                                prev_backlog - sch->qstats.backlog);
++              q->drop_overmemory += prev_qlen;
+-      return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
++      /* As we dropped packet(s), better let upper stack know this.
++       * If we dropped a packet for this flow, return NET_XMIT_CN,
++       * but in this case, our parents wont increase their backlogs.
++       */
++      if (ret == idx) {
++              qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
++                                        prev_backlog - pkt_len);
++              return NET_XMIT_CN;
++      }
++      qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
++      return NET_XMIT_SUCCESS;
+ }
+ /* This is the specific function called from codel_dequeue()
index 048476ba48fa1aba24cdbab48f2495728bbbf3da..46fceffcf174f4b37a9a0aac9eba1d1e0505ef24 100644 (file)
@@ -1,6 +1,6 @@
 --- a/net/sched/sch_fq_codel.c
 +++ b/net/sched/sch_fq_codel.c
-@@ -459,7 +459,7 @@ static int fq_codel_init(struct Qdisc *s
+@@ -471,7 +471,7 @@ static int fq_codel_init(struct Qdisc *s
  
        sch->limit = 10*1024;
        q->flows_cnt = 1024;
index 30907229ce95209f578357b25bdf27fd9c703727..3cb950c0db4c844886f6ae4a69917a0535fbaa63 100644 (file)
@@ -1,6 +1,6 @@
 --- a/net/sched/sch_fq_codel.c
 +++ b/net/sched/sch_fq_codel.c
-@@ -218,7 +218,6 @@ static int fq_codel_enqueue(struct sk_bu
+@@ -219,7 +219,6 @@ static int fq_codel_enqueue(struct sk_bu
                list_add_tail(&flow->flowchain, &q->new_flows);
                q->new_flow_count++;
                flow->deficit = q->quantum;
index a56dec330a4810ea1d16b4605cc37c97699b0349..8952ab4e206b477f6392d17cf249f90de60232f4 100644 (file)
@@ -13,7 +13,7 @@
          device, it has to decide which ones to send first, which ones to
 --- a/net/sched/sch_fq_codel.c
 +++ b/net/sched/sch_fq_codel.c
-@@ -676,7 +676,7 @@ static const struct Qdisc_class_ops fq_c
+@@ -688,7 +688,7 @@ static const struct Qdisc_class_ops fq_c
        .walk           =       fq_codel_walk,
  };
  
@@ -22,7 +22,7 @@
        .cl_ops         =       &fq_codel_class_ops,
        .id             =       "fq_codel",
        .priv_size      =       sizeof(struct fq_codel_sched_data),
-@@ -692,6 +692,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
+@@ -704,6 +704,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
        .dump_stats =   fq_codel_dump_stats,
        .owner          =       THIS_MODULE,
  };