kernel: fix mtk_eth_soc throughput regressions on gigabit PHY ports
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch
1 From 530716d008ca26315f246cd70dc1cefc636beaa4 Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Sun, 18 Sep 2022 02:00:09 -0600
4 Subject: [PATCH 12/29] mm: multi-gen LRU: debugfs interface
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Add /sys/kernel/debug/lru_gen for working set estimation and proactive
10 reclaim. These techniques are commonly used to optimize job scheduling
11 (bin packing) in data centers [1][2].
12
13 Compared with the page table-based approach and the PFN-based
14 approach, this lruvec-based approach has the following advantages:
15 1. It offers better choices because it is aware of memcgs, NUMA nodes,
16 shared mappings and unmapped page cache.
17 2. It is more scalable because it is O(nr_hot_pages), whereas the
18 PFN-based approach is O(nr_total_pages).
19
20 Add /sys/kernel/debug/lru_gen_full for debugging.
21
22 [1] https://dl.acm.org/doi/10.1145/3297858.3304053
23 [2] https://dl.acm.org/doi/10.1145/3503222.3507731
24
25 Link: https://lkml.kernel.org/r/20220918080010.2920238-13-yuzhao@google.com
26 Signed-off-by: Yu Zhao <yuzhao@google.com>
27 Reviewed-by: Qi Zheng <zhengqi.arch@bytedance.com>
28 Acked-by: Brian Geffon <bgeffon@google.com>
29 Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
30 Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
31 Acked-by: Steven Barrett <steven@liquorix.net>
32 Acked-by: Suleiman Souhlal <suleiman@google.com>
33 Tested-by: Daniel Byrne <djbyrne@mtu.edu>
34 Tested-by: Donald Carr <d@chaos-reins.com>
35 Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
36 Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
37 Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
38 Tested-by: Sofia Trinh <sofia.trinh@edi.works>
39 Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
40 Cc: Andi Kleen <ak@linux.intel.com>
41 Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
42 Cc: Barry Song <baohua@kernel.org>
43 Cc: Catalin Marinas <catalin.marinas@arm.com>
44 Cc: Dave Hansen <dave.hansen@linux.intel.com>
45 Cc: Hillf Danton <hdanton@sina.com>
46 Cc: Jens Axboe <axboe@kernel.dk>
47 Cc: Johannes Weiner <hannes@cmpxchg.org>
48 Cc: Jonathan Corbet <corbet@lwn.net>
49 Cc: Linus Torvalds <torvalds@linux-foundation.org>
50 Cc: Matthew Wilcox <willy@infradead.org>
51 Cc: Mel Gorman <mgorman@suse.de>
52 Cc: Miaohe Lin <linmiaohe@huawei.com>
53 Cc: Michael Larabel <Michael@MichaelLarabel.com>
54 Cc: Michal Hocko <mhocko@kernel.org>
55 Cc: Mike Rapoport <rppt@kernel.org>
56 Cc: Mike Rapoport <rppt@linux.ibm.com>
57 Cc: Peter Zijlstra <peterz@infradead.org>
58 Cc: Tejun Heo <tj@kernel.org>
59 Cc: Vlastimil Babka <vbabka@suse.cz>
60 Cc: Will Deacon <will@kernel.org>
61 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
62 ---
63 include/linux/nodemask.h | 1 +
64 mm/vmscan.c | 411 ++++++++++++++++++++++++++++++++++++++-
65 2 files changed, 402 insertions(+), 10 deletions(-)
66
67 --- a/include/linux/nodemask.h
68 +++ b/include/linux/nodemask.h
69 @@ -485,6 +485,7 @@ static inline int num_node_state(enum no
70 #define first_online_node 0
71 #define first_memory_node 0
72 #define next_online_node(nid) (MAX_NUMNODES)
73 +#define next_memory_node(nid) (MAX_NUMNODES)
74 #define nr_node_ids 1U
75 #define nr_online_nodes 1U
76
77 --- a/mm/vmscan.c
78 +++ b/mm/vmscan.c
79 @@ -53,6 +53,7 @@
80 #include <linux/pagewalk.h>
81 #include <linux/shmem_fs.h>
82 #include <linux/ctype.h>
83 +#include <linux/debugfs.h>
84
85 #include <asm/tlbflush.h>
86 #include <asm/div64.h>
87 @@ -3968,12 +3969,40 @@ static void clear_mm_walk(void)
88 kfree(walk);
89 }
90
91 -static void inc_min_seq(struct lruvec *lruvec, int type)
92 +static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
93 {
94 + int zone;
95 + int remaining = MAX_LRU_BATCH;
96 struct lru_gen_struct *lrugen = &lruvec->lrugen;
97 + int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
98 +
99 + if (type == LRU_GEN_ANON && !can_swap)
100 + goto done;
101 +
102 + /* prevent cold/hot inversion if force_scan is true */
103 + for (zone = 0; zone < MAX_NR_ZONES; zone++) {
104 + struct list_head *head = &lrugen->lists[old_gen][type][zone];
105 +
106 + while (!list_empty(head)) {
107 + struct page *page = lru_to_page(head);
108 +
109 + VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
110 + VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
111 + VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
112 + VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
113
114 + new_gen = page_inc_gen(lruvec, page, false);
115 + list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
116 +
117 + if (!--remaining)
118 + return false;
119 + }
120 + }
121 +done:
122 reset_ctrl_pos(lruvec, type, true);
123 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
124 +
125 + return true;
126 }
127
128 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
129 @@ -4019,7 +4048,7 @@ next:
130 return success;
131 }
132
133 -static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
134 +static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
135 {
136 int prev, next;
137 int type, zone;
138 @@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *l
139 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
140 continue;
141
142 - VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap);
143 + VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
144
145 - inc_min_seq(lruvec, type);
146 + while (!inc_min_seq(lruvec, type, can_swap)) {
147 + spin_unlock_irq(&lruvec->lru_lock);
148 + cond_resched();
149 + spin_lock_irq(&lruvec->lru_lock);
150 + }
151 }
152
153 /*
154 @@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *l
155 }
156
157 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
158 - struct scan_control *sc, bool can_swap)
159 + struct scan_control *sc, bool can_swap, bool force_scan)
160 {
161 bool success;
162 struct lru_gen_mm_walk *walk;
163 @@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lr
164 * handful of PTEs. Spreading the work out over a period of time usually
165 * is less efficient, but it avoids bursty page faults.
166 */
167 - if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
168 + if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
169 success = iterate_mm_list_nowalk(lruvec, max_seq);
170 goto done;
171 }
172 @@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lr
173 walk->lruvec = lruvec;
174 walk->max_seq = max_seq;
175 walk->can_swap = can_swap;
176 - walk->force_scan = false;
177 + walk->force_scan = force_scan;
178
179 do {
180 success = iterate_mm_list(lruvec, walk, &mm);
181 @@ -4127,7 +4160,7 @@ done:
182
183 VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
184
185 - inc_max_seq(lruvec, can_swap);
186 + inc_max_seq(lruvec, can_swap, force_scan);
187 /* either this sees any waiters or they will see updated max_seq */
188 if (wq_has_sleeper(&lruvec->mm_state.wait))
189 wake_up_all(&lruvec->mm_state.wait);
190 @@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lr
191 }
192
193 if (need_aging)
194 - try_to_inc_max_seq(lruvec, max_seq, sc, swappiness);
195 + try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
196
197 return true;
198 }
199 @@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(stru
200 if (current_is_kswapd())
201 return 0;
202
203 - if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap))
204 + if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
205 return nr_to_scan;
206 done:
207 return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
208 @@ -5124,6 +5157,361 @@ static struct attribute_group lru_gen_at
209 };
210
211 /******************************************************************************
212 + * debugfs interface
213 + ******************************************************************************/
214 +
215 +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
216 +{
217 + struct mem_cgroup *memcg;
218 + loff_t nr_to_skip = *pos;
219 +
220 + m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
221 + if (!m->private)
222 + return ERR_PTR(-ENOMEM);
223 +
224 + memcg = mem_cgroup_iter(NULL, NULL, NULL);
225 + do {
226 + int nid;
227 +
228 + for_each_node_state(nid, N_MEMORY) {
229 + if (!nr_to_skip--)
230 + return get_lruvec(memcg, nid);
231 + }
232 + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
233 +
234 + return NULL;
235 +}
236 +
237 +static void lru_gen_seq_stop(struct seq_file *m, void *v)
238 +{
239 + if (!IS_ERR_OR_NULL(v))
240 + mem_cgroup_iter_break(NULL, lruvec_memcg(v));
241 +
242 + kvfree(m->private);
243 + m->private = NULL;
244 +}
245 +
246 +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
247 +{
248 + int nid = lruvec_pgdat(v)->node_id;
249 + struct mem_cgroup *memcg = lruvec_memcg(v);
250 +
251 + ++*pos;
252 +
253 + nid = next_memory_node(nid);
254 + if (nid == MAX_NUMNODES) {
255 + memcg = mem_cgroup_iter(NULL, memcg, NULL);
256 + if (!memcg)
257 + return NULL;
258 +
259 + nid = first_memory_node;
260 + }
261 +
262 + return get_lruvec(memcg, nid);
263 +}
264 +
265 +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
266 + unsigned long max_seq, unsigned long *min_seq,
267 + unsigned long seq)
268 +{
269 + int i;
270 + int type, tier;
271 + int hist = lru_hist_from_seq(seq);
272 + struct lru_gen_struct *lrugen = &lruvec->lrugen;
273 +
274 + for (tier = 0; tier < MAX_NR_TIERS; tier++) {
275 + seq_printf(m, " %10d", tier);
276 + for (type = 0; type < ANON_AND_FILE; type++) {
277 + const char *s = " ";
278 + unsigned long n[3] = {};
279 +
280 + if (seq == max_seq) {
281 + s = "RT ";
282 + n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
283 + n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
284 + } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
285 + s = "rep";
286 + n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
287 + n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
288 + if (tier)
289 + n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
290 + }
291 +
292 + for (i = 0; i < 3; i++)
293 + seq_printf(m, " %10lu%c", n[i], s[i]);
294 + }
295 + seq_putc(m, '\n');
296 + }
297 +
298 + seq_puts(m, " ");
299 + for (i = 0; i < NR_MM_STATS; i++) {
300 + const char *s = " ";
301 + unsigned long n = 0;
302 +
303 + if (seq == max_seq && NR_HIST_GENS == 1) {
304 + s = "LOYNFA";
305 + n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
306 + } else if (seq != max_seq && NR_HIST_GENS > 1) {
307 + s = "loynfa";
308 + n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
309 + }
310 +
311 + seq_printf(m, " %10lu%c", n, s[i]);
312 + }
313 + seq_putc(m, '\n');
314 +}
315 +
316 +static int lru_gen_seq_show(struct seq_file *m, void *v)
317 +{
318 + unsigned long seq;
319 + bool full = !debugfs_real_fops(m->file)->write;
320 + struct lruvec *lruvec = v;
321 + struct lru_gen_struct *lrugen = &lruvec->lrugen;
322 + int nid = lruvec_pgdat(lruvec)->node_id;
323 + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
324 + DEFINE_MAX_SEQ(lruvec);
325 + DEFINE_MIN_SEQ(lruvec);
326 +
327 + if (nid == first_memory_node) {
328 + const char *path = memcg ? m->private : "";
329 +
330 +#ifdef CONFIG_MEMCG
331 + if (memcg)
332 + cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
333 +#endif
334 + seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
335 + }
336 +
337 + seq_printf(m, " node %5d\n", nid);
338 +
339 + if (!full)
340 + seq = min_seq[LRU_GEN_ANON];
341 + else if (max_seq >= MAX_NR_GENS)
342 + seq = max_seq - MAX_NR_GENS + 1;
343 + else
344 + seq = 0;
345 +
346 + for (; seq <= max_seq; seq++) {
347 + int type, zone;
348 + int gen = lru_gen_from_seq(seq);
349 + unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
350 +
351 + seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
352 +
353 + for (type = 0; type < ANON_AND_FILE; type++) {
354 + unsigned long size = 0;
355 + char mark = full && seq < min_seq[type] ? 'x' : ' ';
356 +
357 + for (zone = 0; zone < MAX_NR_ZONES; zone++)
358 + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
359 +
360 + seq_printf(m, " %10lu%c", size, mark);
361 + }
362 +
363 + seq_putc(m, '\n');
364 +
365 + if (full)
366 + lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
367 + }
368 +
369 + return 0;
370 +}
371 +
372 +static const struct seq_operations lru_gen_seq_ops = {
373 + .start = lru_gen_seq_start,
374 + .stop = lru_gen_seq_stop,
375 + .next = lru_gen_seq_next,
376 + .show = lru_gen_seq_show,
377 +};
378 +
379 +static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
380 + bool can_swap, bool force_scan)
381 +{
382 + DEFINE_MAX_SEQ(lruvec);
383 + DEFINE_MIN_SEQ(lruvec);
384 +
385 + if (seq < max_seq)
386 + return 0;
387 +
388 + if (seq > max_seq)
389 + return -EINVAL;
390 +
391 + if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
392 + return -ERANGE;
393 +
394 + try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
395 +
396 + return 0;
397 +}
398 +
399 +static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
400 + int swappiness, unsigned long nr_to_reclaim)
401 +{
402 + DEFINE_MAX_SEQ(lruvec);
403 +
404 + if (seq + MIN_NR_GENS > max_seq)
405 + return -EINVAL;
406 +
407 + sc->nr_reclaimed = 0;
408 +
409 + while (!signal_pending(current)) {
410 + DEFINE_MIN_SEQ(lruvec);
411 +
412 + if (seq < min_seq[!swappiness])
413 + return 0;
414 +
415 + if (sc->nr_reclaimed >= nr_to_reclaim)
416 + return 0;
417 +
418 + if (!evict_pages(lruvec, sc, swappiness, NULL))
419 + return 0;
420 +
421 + cond_resched();
422 + }
423 +
424 + return -EINTR;
425 +}
426 +
427 +static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
428 + struct scan_control *sc, int swappiness, unsigned long opt)
429 +{
430 + struct lruvec *lruvec;
431 + int err = -EINVAL;
432 + struct mem_cgroup *memcg = NULL;
433 +
434 + if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
435 + return -EINVAL;
436 +
437 + if (!mem_cgroup_disabled()) {
438 + rcu_read_lock();
439 + memcg = mem_cgroup_from_id(memcg_id);
440 +#ifdef CONFIG_MEMCG
441 + if (memcg && !css_tryget(&memcg->css))
442 + memcg = NULL;
443 +#endif
444 + rcu_read_unlock();
445 +
446 + if (!memcg)
447 + return -EINVAL;
448 + }
449 +
450 + if (memcg_id != mem_cgroup_id(memcg))
451 + goto done;
452 +
453 + lruvec = get_lruvec(memcg, nid);
454 +
455 + if (swappiness < 0)
456 + swappiness = get_swappiness(lruvec, sc);
457 + else if (swappiness > 200)
458 + goto done;
459 +
460 + switch (cmd) {
461 + case '+':
462 + err = run_aging(lruvec, seq, sc, swappiness, opt);
463 + break;
464 + case '-':
465 + err = run_eviction(lruvec, seq, sc, swappiness, opt);
466 + break;
467 + }
468 +done:
469 + mem_cgroup_put(memcg);
470 +
471 + return err;
472 +}
473 +
474 +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
475 + size_t len, loff_t *pos)
476 +{
477 + void *buf;
478 + char *cur, *next;
479 + unsigned int flags;
480 + struct blk_plug plug;
481 + int err = -EINVAL;
482 + struct scan_control sc = {
483 + .may_writepage = true,
484 + .may_unmap = true,
485 + .may_swap = true,
486 + .reclaim_idx = MAX_NR_ZONES - 1,
487 + .gfp_mask = GFP_KERNEL,
488 + };
489 +
490 + buf = kvmalloc(len + 1, GFP_KERNEL);
491 + if (!buf)
492 + return -ENOMEM;
493 +
494 + if (copy_from_user(buf, src, len)) {
495 + kvfree(buf);
496 + return -EFAULT;
497 + }
498 +
499 + set_task_reclaim_state(current, &sc.reclaim_state);
500 + flags = memalloc_noreclaim_save();
501 + blk_start_plug(&plug);
502 + if (!set_mm_walk(NULL)) {
503 + err = -ENOMEM;
504 + goto done;
505 + }
506 +
507 + next = buf;
508 + next[len] = '\0';
509 +
510 + while ((cur = strsep(&next, ",;\n"))) {
511 + int n;
512 + int end;
513 + char cmd;
514 + unsigned int memcg_id;
515 + unsigned int nid;
516 + unsigned long seq;
517 + unsigned int swappiness = -1;
518 + unsigned long opt = -1;
519 +
520 + cur = skip_spaces(cur);
521 + if (!*cur)
522 + continue;
523 +
524 + n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
525 + &seq, &end, &swappiness, &end, &opt, &end);
526 + if (n < 4 || cur[end]) {
527 + err = -EINVAL;
528 + break;
529 + }
530 +
531 + err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
532 + if (err)
533 + break;
534 + }
535 +done:
536 + clear_mm_walk();
537 + blk_finish_plug(&plug);
538 + memalloc_noreclaim_restore(flags);
539 + set_task_reclaim_state(current, NULL);
540 +
541 + kvfree(buf);
542 +
543 + return err ? : len;
544 +}
545 +
546 +static int lru_gen_seq_open(struct inode *inode, struct file *file)
547 +{
548 + return seq_open(file, &lru_gen_seq_ops);
549 +}
550 +
551 +static const struct file_operations lru_gen_rw_fops = {
552 + .open = lru_gen_seq_open,
553 + .read = seq_read,
554 + .write = lru_gen_seq_write,
555 + .llseek = seq_lseek,
556 + .release = seq_release,
557 +};
558 +
559 +static const struct file_operations lru_gen_ro_fops = {
560 + .open = lru_gen_seq_open,
561 + .read = seq_read,
562 + .llseek = seq_lseek,
563 + .release = seq_release,
564 +};
565 +
566 +/******************************************************************************
567 * initialization
568 ******************************************************************************/
569
570 @@ -5180,6 +5568,9 @@ static int __init init_lru_gen(void)
571 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
572 pr_err("lru_gen: failed to create sysfs group\n");
573
574 + debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
575 + debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
576 +
577 return 0;
578 };
579 late_initcall(init_lru_gen);