kernel: Copy patches from kernel 4.14 to 4.19
[openwrt/staging/chunkeey.git] / target / linux / generic / backport-4.19 / 363-v4.18-netfilter-nf_flow_table-add-support-for-sending-flow.patch
diff --git a/target/linux/generic/backport-4.19/363-v4.18-netfilter-nf_flow_table-add-support-for-sending-flow.patch b/target/linux/generic/backport-4.19/363-v4.18-netfilter-nf_flow_table-add-support-for-sending-flow.patch
new file mode 100644 (file)
index 0000000..905880f
--- /dev/null
@@ -0,0 +1,99 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 25 Feb 2018 15:41:11 +0100
+Subject: [PATCH] netfilter: nf_flow_table: add support for sending flows
+ back to the slow path
+
+Reset the timeout. For TCP, also set the state to indicate to use the
+next incoming packets to reset window tracking.
+This allows the slow path to take over again once the offload state has
+been torn down
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -100,6 +100,43 @@ err_ct_refcnt:
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_alloc);
++static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
++{
++      tcp->state = TCP_CONNTRACK_ESTABLISHED;
++      tcp->seen[0].td_maxwin = 0;
++      tcp->seen[1].td_maxwin = 0;
++}
++
++static void flow_offload_fixup_ct_state(struct nf_conn *ct)
++{
++      const struct nf_conntrack_l4proto *l4proto;
++      struct net *net = nf_ct_net(ct);
++      unsigned int *timeouts;
++      unsigned int timeout;
++      int l4num;
++
++      l4num = nf_ct_protonum(ct);
++      if (l4num == IPPROTO_TCP)
++              flow_offload_fixup_tcp(&ct->proto.tcp);
++
++      l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
++      if (!l4proto)
++              return;
++
++      timeouts = l4proto->get_timeouts(net);
++      if (!timeouts)
++              return;
++
++      if (l4num == IPPROTO_TCP)
++              timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
++      else if (l4num == IPPROTO_UDP)
++              timeout = timeouts[UDP_CT_REPLIED];
++      else
++              return;
++
++      ct->timeout = nfct_time_stamp + timeout;
++}
++
+ void flow_offload_free(struct flow_offload *flow)
+ {
+       struct flow_offload_entry *e;
+@@ -107,7 +144,8 @@ void flow_offload_free(struct flow_offlo
+       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
+       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
+       e = container_of(flow, struct flow_offload_entry, flow);
+-      nf_ct_delete(e->ct, 0, 0);
++      if (flow->flags & FLOW_OFFLOAD_DYING)
++              nf_ct_delete(e->ct, 0, 0);
+       nf_ct_put(e->ct);
+       kfree_rcu(e, rcu_head);
+ }
+@@ -164,6 +202,8 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
+ static void flow_offload_del(struct nf_flowtable *flow_table,
+                            struct flow_offload *flow)
+ {
++      struct flow_offload_entry *e;
++
+       rhashtable_remove_fast(&flow_table->rhashtable,
+                              &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+                              nf_flow_offload_rhash_params);
+@@ -171,12 +211,20 @@ static void flow_offload_del(struct nf_f
+                              &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+                              nf_flow_offload_rhash_params);
++      e = container_of(flow, struct flow_offload_entry, flow);
++      clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
++
+       flow_offload_free(flow);
+ }
+ void flow_offload_teardown(struct flow_offload *flow)
+ {
++      struct flow_offload_entry *e;
++
+       flow->flags |= FLOW_OFFLOAD_TEARDOWN;
++
++      e = container_of(flow, struct flow_offload_entry, flow);
++      flow_offload_fixup_ct_state(e->ct);
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_teardown);