if (nval != oval)
continue;
-@@ -2136,7 +2136,7 @@ static bool tcp_small_queue_check(struct
+@@ -2140,7 +2140,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2234,8 +2234,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2238,8 +2238,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3546,8 +3546,6 @@ void __tcp_send_ack(struct sock *sk, u32
+@@ -3550,8 +3550,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
-@@ -326,7 +326,7 @@ static void tcp_delack_timer(unsigned lo
+@@ -327,7 +327,7 @@ static void tcp_delack_timer(unsigned lo
inet_csk(sk)->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
sock_hold(sk);
}
bh_unlock_sock(sk);
-@@ -609,7 +609,7 @@ static void tcp_write_timer(unsigned lon
+@@ -610,7 +610,7 @@ static void tcp_write_timer(unsigned lon
tcp_write_timer_handler(sk);
} else {
/* delegate our work to tcp_release_cb() */