/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
inet_csk(sk)->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
inet_csk(sk)->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */