kernel: backport upstream fix for CVE-2018-5390
authorJo-Philipp Wich <jo@mein.io>
Wed, 8 Aug 2018 09:12:18 +0000 (11:12 +0200)
committerJo-Philipp Wich <jo@mein.io>
Wed, 8 Aug 2018 12:50:31 +0000 (14:50 +0200)
Backport an upstream fix for a remotely exploitable TCP denial of service
flaw in Linux 4.9+.

The fixes are included in Linux 4.14.59 and later but did not yet end up in
version 4.9.118.

Signed-off-by: Jo-Philipp Wich <jo@mein.io>
target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch [new file with mode: 0644]

diff --git a/target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch b/target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch
new file mode 100644 (file)
index 0000000..4641727
--- /dev/null
@@ -0,0 +1,76 @@
+From 74b120c45aebf4278e1dedc55f5fa24d8ea83cdc Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 23 Jul 2018 09:28:21 -0700
+Subject: tcp: add tcp_ooo_try_coalesce() helper
+
+commit 58152ecbbcc6a0ce7fddd5bf5f6ee535834ece0c upstream.
+
+In case skb in out_or_order_queue is the result of
+multiple skbs coalescing, we would like to get a proper gso_segs
+counter tracking, so that future tcp_drop() can report an accurate
+number.
+
+I chose to not implement this tracking for skbs in receive queue,
+since they are not dropped, unless socket is disconnected.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a9be8df108b4..9d0b73aa649f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4370,6 +4370,23 @@ static bool tcp_try_coalesce(struct sock *sk,
+       return true;
+ }
++static bool tcp_ooo_try_coalesce(struct sock *sk,
++                           struct sk_buff *to,
++                           struct sk_buff *from,
++                           bool *fragstolen)
++{
++      bool res = tcp_try_coalesce(sk, to, from, fragstolen);
++
++      /* In case tcp_drop() is called later, update to->gso_segs */
++      if (res) {
++              u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
++                             max_t(u16, 1, skb_shinfo(from)->gso_segs);
++
++              skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
++      }
++      return res;
++}
++
+ static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+ {
+       sk_drops_add(sk, skb);
+@@ -4493,7 +4510,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+       /* In the typical case, we are adding an skb to the end of the list.
+        * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+        */
+-      if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
++      if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
++                               skb, &fragstolen)) {
+ coalesce_done:
+               tcp_grow_window(sk, skb);
+               kfree_skb_partial(skb, fragstolen);
+@@ -4543,7 +4561,8 @@ coalesce_done:
+                               tcp_drop(sk, skb1);
+                               goto merge_right;
+                       }
+-              } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
++              } else if (tcp_ooo_try_coalesce(sk, skb1,
++                                              skb, &fragstolen)) {
+                       goto coalesce_done;
+               }
+               p = &parent->rb_right;
+-- 
+cgit 1.2-0.3.lf.el7
+