b30b7cea9039ffebb2989183cc361a2534cd88ea
[openwrt/openwrt.git] / target / linux / generic / backport-4.9 / 024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
1 From 7aa5470c2c09265902b5e4289afa82e4e7c2987e Mon Sep 17 00:00:00 2001
2 From: Eric Dumazet <edumazet@google.com>
3 Date: Sat, 3 Dec 2016 11:14:57 -0800
4 Subject: [PATCH 08/10] tcp: tsq: move tsq_flags close to sk_wmem_alloc
5
6 tsq_flags being in the same cache line than sk_wmem_alloc
7 makes a lot of sense. Both fields are changed from tcp_wfree()
8 and more generally by various TSQ related functions.
9
10 Prior patch made room in struct sock and added sk_tsq_flags,
11 this patch deletes tsq_flags from struct tcp_sock.
12
13 Signed-off-by: Eric Dumazet <edumazet@google.com>
14 Signed-off-by: David S. Miller <davem@davemloft.net>
15 ---
16 include/linux/tcp.h | 1 -
17 net/ipv4/tcp.c | 4 ++--
18 net/ipv4/tcp_ipv4.c | 2 +-
19 net/ipv4/tcp_output.c | 24 +++++++++++-------------
20 net/ipv4/tcp_timer.c | 4 ++--
21 net/ipv6/tcp_ipv6.c | 2 +-
22 6 files changed, 17 insertions(+), 20 deletions(-)
23
24 --- a/include/linux/tcp.h
25 +++ b/include/linux/tcp.h
26 @@ -192,7 +192,6 @@ struct tcp_sock {
27 u32 tsoffset; /* timestamp offset */
28
29 struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
30 - unsigned long tsq_flags;
31
32 /* Data for direct copy to user */
33 struct {
34 --- a/net/ipv4/tcp.c
35 +++ b/net/ipv4/tcp.c
36 @@ -665,9 +665,9 @@ static void tcp_push(struct sock *sk, in
37 if (tcp_should_autocork(sk, skb, size_goal)) {
38
39 /* avoid atomic op if TSQ_THROTTLED bit is already set */
40 - if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
41 + if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
42 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
43 - set_bit(TSQ_THROTTLED, &tp->tsq_flags);
44 + set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
45 }
46 /* It is possible TX completion already happened
47 * before we set TSQ_THROTTLED.
48 --- a/net/ipv4/tcp_ipv4.c
49 +++ b/net/ipv4/tcp_ipv4.c
50 @@ -446,7 +446,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb
51 if (!sock_owned_by_user(sk)) {
52 tcp_v4_mtu_reduced(sk);
53 } else {
54 - if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
55 + if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
56 sock_hold(sk);
57 }
58 goto out;
59 --- a/net/ipv4/tcp_output.c
60 +++ b/net/ipv4/tcp_output.c
61 @@ -772,14 +772,15 @@ static void tcp_tasklet_func(unsigned lo
62 list_for_each_safe(q, n, &list) {
63 tp = list_entry(q, struct tcp_sock, tsq_node);
64 list_del(&tp->tsq_node);
65 - clear_bit(TSQ_QUEUED, &tp->tsq_flags);
66
67 sk = (struct sock *)tp;
68 + clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
69 +
70 if (!sk->sk_lock.owned &&
71 - test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) {
72 + test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
73 bh_lock_sock(sk);
74 if (!sock_owned_by_user(sk)) {
75 - clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
76 + clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
77 tcp_tsq_handler(sk);
78 }
79 bh_unlock_sock(sk);
80 @@ -802,16 +803,15 @@ static void tcp_tasklet_func(unsigned lo
81 */
82 void tcp_release_cb(struct sock *sk)
83 {
84 - struct tcp_sock *tp = tcp_sk(sk);
85 unsigned long flags, nflags;
86
87 /* perform an atomic operation only if at least one flag is set */
88 do {
89 - flags = tp->tsq_flags;
90 + flags = sk->sk_tsq_flags;
91 if (!(flags & TCP_DEFERRED_ALL))
92 return;
93 nflags = flags & ~TCP_DEFERRED_ALL;
94 - } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
95 + } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
96
97 if (flags & TCPF_TSQ_DEFERRED)
98 tcp_tsq_handler(sk);
99 @@ -883,7 +883,7 @@ void tcp_wfree(struct sk_buff *skb)
100 if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
101 goto out;
102
103 - for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
104 + for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
105 struct tsq_tasklet *tsq;
106 bool empty;
107
108 @@ -891,7 +891,7 @@ void tcp_wfree(struct sk_buff *skb)
109 goto out;
110
111 nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
112 - nval = cmpxchg(&tp->tsq_flags, oval, nval);
113 + nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
114 if (nval != oval)
115 continue;
116
117 @@ -2140,7 +2140,7 @@ static bool tcp_small_queue_check(struct
118 skb->prev == sk->sk_write_queue.next)
119 return false;
120
121 - set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
122 + set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
123 /* It is possible TX completion already happened
124 * before we set TSQ_THROTTLED, so we must
125 * test again the condition.
126 @@ -2238,8 +2238,8 @@ static bool tcp_write_xmit(struct sock *
127 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
128 break;
129
130 - if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags))
131 - clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
132 + if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
133 + clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
134 if (tcp_small_queue_check(sk, skb, 0))
135 break;
136
137 @@ -3550,8 +3550,6 @@ void __tcp_send_ack(struct sock *sk, u32
138 /* We do not want pure acks influencing TCP Small Queues or fq/pacing
139 * too much.
140 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
141 - * We also avoid tcp_wfree() overhead (cache line miss accessing
142 - * tp->tsq_flags) by using regular sock_wfree()
143 */
144 skb_set_tcp_pure_ack(buff);
145
146 --- a/net/ipv4/tcp_timer.c
147 +++ b/net/ipv4/tcp_timer.c
148 @@ -327,7 +327,7 @@ static void tcp_delack_timer(unsigned lo
149 inet_csk(sk)->icsk_ack.blocked = 1;
150 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
151 /* deleguate our work to tcp_release_cb() */
152 - if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
153 + if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
154 sock_hold(sk);
155 }
156 bh_unlock_sock(sk);
157 @@ -610,7 +610,7 @@ static void tcp_write_timer(unsigned lon
158 tcp_write_timer_handler(sk);
159 } else {
160 /* delegate our work to tcp_release_cb() */
161 - if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
162 + if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
163 sock_hold(sk);
164 }
165 bh_unlock_sock(sk);
166 --- a/net/ipv6/tcp_ipv6.c
167 +++ b/net/ipv6/tcp_ipv6.c
168 @@ -404,7 +404,7 @@ static void tcp_v6_err(struct sk_buff *s
169 if (!sock_owned_by_user(sk))
170 tcp_v6_mtu_reduced(sk);
171 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
172 - &tp->tsq_flags))
173 + &sk->sk_tsq_flags))
174 sock_hold(sk);
175 goto out;
176 }