08e5cbb5b9049a24e9bf77b6653ab9366fb6dae0
[openwrt/staging/wigyori.git] / package / kernel / mac80211 / patches / subsys / 312-net-fq_impl-do-not-maintain-a-backlog-sorted-list-of.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Wed, 25 Nov 2020 18:10:34 +0100
3 Subject: [PATCH] net/fq_impl: do not maintain a backlog-sorted list of
4 flows
5
6 A sorted flow list is only needed to drop packets in the biggest flow when
7 hitting the overmemory condition.
8 By scanning flows only when needed, we can avoid paying the cost of
9 maintaining the list under normal conditions
10 In order to avoid scanning lots of empty flows and touching too many cold
11 cache lines, a bitmap of flows with backlog is maintained
12
13 Signed-off-by: Felix Fietkau <nbd@nbd.name>
14 ---
15
16 --- a/include/net/fq.h
17 +++ b/include/net/fq.h
18 @@ -19,8 +19,6 @@ struct fq_tin;
19 * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
20 * (deficit round robin) based round robin queuing similar to the one
21 * found in net/sched/sch_fq_codel.c
22 - * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
23 - * fat flows and efficient head-dropping if packet limit is reached
24 * @queue: sk_buff queue to hold packets
25 * @backlog: number of bytes pending in the queue. The number of packets can be
26 * found in @queue.qlen
27 @@ -29,7 +27,6 @@ struct fq_tin;
28 struct fq_flow {
29 struct fq_tin *tin;
30 struct list_head flowchain;
31 - struct list_head backlogchain;
32 struct sk_buff_head queue;
33 u32 backlog;
34 int deficit;
35 @@ -47,6 +44,7 @@ struct fq_flow {
36 struct fq_tin {
37 struct list_head new_flows;
38 struct list_head old_flows;
39 + struct list_head tin_list;
40 struct fq_flow default_flow;
41 u32 backlog_bytes;
42 u32 backlog_packets;
43 @@ -60,14 +58,14 @@ struct fq_tin {
44 /**
45 * struct fq - main container for fair queuing purposes
46 *
47 - * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
48 - * head-dropping when @backlog reaches @limit
49 * @limit: max number of packets that can be queued across all flows
50 * @backlog: number of packets queued across all flows
51 */
52 struct fq {
53 struct fq_flow *flows;
54 - struct list_head backlogs;
55 + unsigned long *flows_bitmap;
56 +
57 + struct list_head tin_backlog;
58 spinlock_t lock;
59 u32 flows_cnt;
60 u32 limit;
61 --- a/include/net/fq_impl.h
62 +++ b/include/net/fq_impl.h
63 @@ -17,12 +17,24 @@ __fq_adjust_removal(struct fq *fq, struc
64 unsigned int bytes, unsigned int truesize)
65 {
66 struct fq_tin *tin = flow->tin;
67 + int idx;
68
69 tin->backlog_bytes -= bytes;
70 tin->backlog_packets -= packets;
71 flow->backlog -= bytes;
72 fq->backlog -= packets;
73 fq->memory_usage -= truesize;
74 +
75 + if (flow->backlog)
76 + return;
77 +
78 + if (flow == &tin->default_flow) {
79 + list_del_init(&tin->tin_list);
80 + return;
81 + }
82 +
83 + idx = flow - fq->flows;
84 + __clear_bit(idx, fq->flows_bitmap);
85 }
86
87 static void fq_adjust_removal(struct fq *fq,
88 @@ -32,24 +44,6 @@ static void fq_adjust_removal(struct fq
89 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
90 }
91
92 -static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
93 -{
94 - struct fq_flow *i;
95 -
96 - if (flow->backlog == 0) {
97 - list_del_init(&flow->backlogchain);
98 - } else {
99 - i = flow;
100 -
101 - list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
102 - if (i->backlog < flow->backlog)
103 - break;
104 -
105 - list_move_tail(&flow->backlogchain,
106 - &i->backlogchain);
107 - }
108 -}
109 -
110 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
111 struct fq_flow *flow)
112 {
113 @@ -62,7 +56,6 @@ static struct sk_buff *fq_flow_dequeue(s
114 return NULL;
115
116 fq_adjust_removal(fq, flow, skb);
117 - fq_rejigger_backlog(fq, flow);
118
119 return skb;
120 }
121 @@ -90,7 +83,6 @@ static int fq_flow_drop(struct fq *fq, s
122 } while (packets < pending);
123
124 __fq_adjust_removal(fq, flow, packets, bytes, truesize);
125 - fq_rejigger_backlog(fq, flow);
126
127 return packets;
128 }
129 @@ -170,22 +162,36 @@ static struct fq_flow *fq_flow_classify(
130 return flow;
131 }
132
133 -static void fq_recalc_backlog(struct fq *fq,
134 - struct fq_tin *tin,
135 - struct fq_flow *flow)
136 -{
137 - struct fq_flow *i;
138 -
139 - if (list_empty(&flow->backlogchain))
140 - list_add_tail(&flow->backlogchain, &fq->backlogs);
141 -
142 - i = flow;
143 - list_for_each_entry_continue_reverse(i, &fq->backlogs,
144 - backlogchain)
145 - if (i->backlog > flow->backlog)
146 - break;
147 +static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
148 +{
149 + struct fq_tin *tin;
150 + struct fq_flow *flow = NULL;
151 + u32 len = 0;
152 + int i;
153 +
154 + for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
155 + struct fq_flow *cur = &fq->flows[i];
156 + unsigned int cur_len;
157 +
158 + cur_len = cur->backlog;
159 + if (cur_len <= len)
160 + continue;
161 +
162 + flow = cur;
163 + len = cur_len;
164 + }
165
166 - list_move(&flow->backlogchain, &i->backlogchain);
167 + list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
168 + unsigned int cur_len = tin->default_flow.backlog;
169 +
170 + if (cur_len <= len)
171 + continue;
172 +
173 + flow = &tin->default_flow;
174 + len = cur_len;
175 + }
176 +
177 + return flow;
178 }
179
180 static void fq_tin_enqueue(struct fq *fq,
181 @@ -200,6 +206,13 @@ static void fq_tin_enqueue(struct fq *fq
182
183 flow = fq_flow_classify(fq, tin, idx, skb);
184
185 + if (!flow->backlog) {
186 + if (flow != &tin->default_flow)
187 + __set_bit(idx, fq->flows_bitmap);
188 + else if (list_empty(&tin->tin_list))
189 + list_add(&tin->tin_list, &fq->tin_backlog);
190 + }
191 +
192 flow->tin = tin;
193 flow->backlog += skb->len;
194 tin->backlog_bytes += skb->len;
195 @@ -207,8 +220,6 @@ static void fq_tin_enqueue(struct fq *fq
196 fq->memory_usage += skb->truesize;
197 fq->backlog++;
198
199 - fq_recalc_backlog(fq, tin, flow);
200 -
201 if (list_empty(&flow->flowchain)) {
202 flow->deficit = fq->quantum;
203 list_add_tail(&flow->flowchain,
204 @@ -218,9 +229,7 @@ static void fq_tin_enqueue(struct fq *fq
205 __skb_queue_tail(&flow->queue, skb);
206 oom = (fq->memory_usage > fq->memory_limit);
207 while (fq->backlog > fq->limit || oom) {
208 - flow = list_first_entry_or_null(&fq->backlogs,
209 - struct fq_flow,
210 - backlogchain);
211 + flow = fq_find_fattest_flow(fq);
212 if (!flow)
213 return;
214
215 @@ -255,8 +264,6 @@ static void fq_flow_filter(struct fq *fq
216 fq_adjust_removal(fq, flow, skb);
217 free_func(fq, tin, flow, skb);
218 }
219 -
220 - fq_rejigger_backlog(fq, flow);
221 }
222
223 static void fq_tin_filter(struct fq *fq,
224 @@ -279,16 +286,18 @@ static void fq_flow_reset(struct fq *fq,
225 struct fq_flow *flow,
226 fq_skb_free_t free_func)
227 {
228 + struct fq_tin *tin = flow->tin;
229 struct sk_buff *skb;
230
231 while ((skb = fq_flow_dequeue(fq, flow)))
232 - free_func(fq, flow->tin, flow, skb);
233 + free_func(fq, tin, flow, skb);
234
235 - if (!list_empty(&flow->flowchain))
236 + if (!list_empty(&flow->flowchain)) {
237 list_del_init(&flow->flowchain);
238 -
239 - if (!list_empty(&flow->backlogchain))
240 - list_del_init(&flow->backlogchain);
241 + if (list_empty(&tin->new_flows) &&
242 + list_empty(&tin->old_flows))
243 + list_del_init(&tin->tin_list);
244 + }
245
246 flow->tin = NULL;
247
248 @@ -314,6 +323,7 @@ static void fq_tin_reset(struct fq *fq,
249 fq_flow_reset(fq, flow, free_func);
250 }
251
252 + WARN_ON_ONCE(!list_empty(&tin->tin_list));
253 WARN_ON_ONCE(tin->backlog_bytes);
254 WARN_ON_ONCE(tin->backlog_packets);
255 }
256 @@ -321,7 +331,6 @@ static void fq_tin_reset(struct fq *fq,
257 static void fq_flow_init(struct fq_flow *flow)
258 {
259 INIT_LIST_HEAD(&flow->flowchain);
260 - INIT_LIST_HEAD(&flow->backlogchain);
261 __skb_queue_head_init(&flow->queue);
262 }
263
264 @@ -329,6 +338,7 @@ static void fq_tin_init(struct fq_tin *t
265 {
266 INIT_LIST_HEAD(&tin->new_flows);
267 INIT_LIST_HEAD(&tin->old_flows);
268 + INIT_LIST_HEAD(&tin->tin_list);
269 fq_flow_init(&tin->default_flow);
270 }
271
272 @@ -337,8 +347,8 @@ static int fq_init(struct fq *fq, int fl
273 int i;
274
275 memset(fq, 0, sizeof(fq[0]));
276 - INIT_LIST_HEAD(&fq->backlogs);
277 spin_lock_init(&fq->lock);
278 + INIT_LIST_HEAD(&fq->tin_backlog);
279 fq->flows_cnt = max_t(u32, flows_cnt, 1);
280 fq->quantum = 300;
281 fq->limit = 8192;
282 @@ -348,6 +358,14 @@ static int fq_init(struct fq *fq, int fl
283 if (!fq->flows)
284 return -ENOMEM;
285
286 + fq->flows_bitmap = kcalloc(BITS_TO_LONGS(fq->flows_cnt), sizeof(long),
287 + GFP_KERNEL);
288 + if (!fq->flows_bitmap) {
289 + kvfree(fq->flows);
290 + fq->flows = NULL;
291 + return -ENOMEM;
292 + }
293 +
294 for (i = 0; i < fq->flows_cnt; i++)
295 fq_flow_init(&fq->flows[i]);
296
297 @@ -364,6 +382,9 @@ static void fq_reset(struct fq *fq,
298
299 kvfree(fq->flows);
300 fq->flows = NULL;
301 +
302 + kfree(fq->flows_bitmap);
303 + fq->flows_bitmap = NULL;
304 }
305
306 #endif
307 --- a/net/mac80211/tx.c
308 +++ b/net/mac80211/tx.c
309 @@ -3364,8 +3364,6 @@ out_recalc:
310 if (head->len != orig_len) {
311 flow->backlog += head->len - orig_len;
312 tin->backlog_bytes += head->len - orig_len;
313 -
314 - fq_recalc_backlog(fq, tin, flow);
315 }
316 out:
317 spin_unlock_bh(&fq->lock);