1 From a9b204d1564702b704ad6fe74f10a102c7b87ba3 Mon Sep 17 00:00:00 2001
2 From: Eric Dumazet <edumazet@google.com>
3 Date: Sat, 3 Dec 2016 11:14:53 -0800
4 Subject: [PATCH 04/10] tcp: tsq: avoid one atomic in tcp_wfree()
6 Under high load, tcp_wfree() has an atomic operation trying
7 to schedule a tasklet over and over.
9 We can schedule it only if our per cpu list was empty.
11 Signed-off-by: Eric Dumazet <edumazet@google.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
14 net/ipv4/tcp_output.c | 5 ++++-
15 1 file changed, 4 insertions(+), 1 deletion(-)
17 --- a/net/ipv4/tcp_output.c
18 +++ b/net/ipv4/tcp_output.c
19 @@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb)
21 for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
22 struct tsq_tasklet *tsq;
25 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
27 @@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb)
28 /* queue this socket to tasklet queue */
29 local_irq_save(flags);
30 tsq = this_cpu_ptr(&tsq_tasklet);
31 + empty = list_empty(&tsq->head);
32 list_add(&tp->tsq_node, &tsq->head);
33 - tasklet_schedule(&tsq->tasklet);
35 + tasklet_schedule(&tsq->tasklet);
36 local_irq_restore(flags);