kernel: 6.1: Synchronize MGLRU patches with upstream
[openwrt/staging/hauke.git] / target / linux / generic / backport-6.1 / 020-v6.3-05-UPSTREAM-mm-multi-gen-LRU-shuffle-should_run_aging.patch
1 From eca3858631e0cbad2ca6e40f788892749428e4cb Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:03 -0700
4 Subject: [PATCH 05/19] UPSTREAM: mm: multi-gen LRU: shuffle should_run_aging()
5
6 Move should_run_aging() next to its only caller left.
7
8 Link: https://lkml.kernel.org/r/20221222041905.2431096-6-yuzhao@google.com
9 Cc: Johannes Weiner <hannes@cmpxchg.org>
10 Cc: Jonathan Corbet <corbet@lwn.net>
11 Cc: Michael Larabel <Michael@MichaelLarabel.com>
12 Cc: Michal Hocko <mhocko@kernel.org>
13 Cc: Mike Rapoport <rppt@kernel.org>
14 Cc: Roman Gushchin <roman.gushchin@linux.dev>
15 Cc: Suren Baghdasaryan <surenb@google.com>
16 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
17 Bug: 274865848
18 (cherry picked from commit 77d4459a4a1a472b7309e475f962dda87d950abd)
19 Signed-off-by: T.J. Mercier <tjmercier@google.com>
20 Change-Id: I3b0383fe16b93a783b4d8c0b3a0b325160392576
21 Signed-off-by: Yu Zhao <yuzhao@google.com>
22 Signed-off-by: T.J. Mercier <tjmercier@google.com>
23 ---
24 mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
25 1 file changed, 62 insertions(+), 62 deletions(-)
26
27 diff --git a/mm/vmscan.c b/mm/vmscan.c
28 index 5a2e83e673232..0c47952714b26 100644
29 --- a/mm/vmscan.c
30 +++ b/mm/vmscan.c
31 @@ -4454,68 +4454,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
32 return true;
33 }
34
35 -static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
36 - struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
37 -{
38 - int gen, type, zone;
39 - unsigned long old = 0;
40 - unsigned long young = 0;
41 - unsigned long total = 0;
42 - struct lru_gen_folio *lrugen = &lruvec->lrugen;
43 - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
44 - DEFINE_MIN_SEQ(lruvec);
45 -
46 - /* whether this lruvec is completely out of cold folios */
47 - if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
48 - *nr_to_scan = 0;
49 - return true;
50 - }
51 -
52 - for (type = !can_swap; type < ANON_AND_FILE; type++) {
53 - unsigned long seq;
54 -
55 - for (seq = min_seq[type]; seq <= max_seq; seq++) {
56 - unsigned long size = 0;
57 -
58 - gen = lru_gen_from_seq(seq);
59 -
60 - for (zone = 0; zone < MAX_NR_ZONES; zone++)
61 - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
62 -
63 - total += size;
64 - if (seq == max_seq)
65 - young += size;
66 - else if (seq + MIN_NR_GENS == max_seq)
67 - old += size;
68 - }
69 - }
70 -
71 - /* try to scrape all its memory if this memcg was deleted */
72 - *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
73 -
74 - /*
75 - * The aging tries to be lazy to reduce the overhead, while the eviction
76 - * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
77 - * ideal number of generations is MIN_NR_GENS+1.
78 - */
79 - if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
80 - return false;
81 -
82 - /*
83 - * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
84 - * of the total number of pages for each generation. A reasonable range
85 - * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
86 - * aging cares about the upper bound of hot pages, while the eviction
87 - * cares about the lower bound of cold pages.
88 - */
89 - if (young * MIN_NR_GENS > total)
90 - return true;
91 - if (old * (MIN_NR_GENS + 2) < total)
92 - return true;
93 -
94 - return false;
95 -}
96 -
97 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
98 {
99 int gen, type, zone;
100 @@ -5099,6 +5037,68 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
101 return scanned;
102 }
103
104 +static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
105 + struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
106 +{
107 + int gen, type, zone;
108 + unsigned long old = 0;
109 + unsigned long young = 0;
110 + unsigned long total = 0;
111 + struct lru_gen_folio *lrugen = &lruvec->lrugen;
112 + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
113 + DEFINE_MIN_SEQ(lruvec);
114 +
115 + /* whether this lruvec is completely out of cold folios */
116 + if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
117 + *nr_to_scan = 0;
118 + return true;
119 + }
120 +
121 + for (type = !can_swap; type < ANON_AND_FILE; type++) {
122 + unsigned long seq;
123 +
124 + for (seq = min_seq[type]; seq <= max_seq; seq++) {
125 + unsigned long size = 0;
126 +
127 + gen = lru_gen_from_seq(seq);
128 +
129 + for (zone = 0; zone < MAX_NR_ZONES; zone++)
130 + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
131 +
132 + total += size;
133 + if (seq == max_seq)
134 + young += size;
135 + else if (seq + MIN_NR_GENS == max_seq)
136 + old += size;
137 + }
138 + }
139 +
140 + /* try to scrape all its memory if this memcg was deleted */
141 + *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
142 +
143 + /*
144 + * The aging tries to be lazy to reduce the overhead, while the eviction
145 + * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
146 + * ideal number of generations is MIN_NR_GENS+1.
147 + */
148 + if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
149 + return false;
150 +
151 + /*
152 + * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
153 + * of the total number of pages for each generation. A reasonable range
154 + * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
155 + * aging cares about the upper bound of hot pages, while the eviction
156 + * cares about the lower bound of cold pages.
157 + */
158 + if (young * MIN_NR_GENS > total)
159 + return true;
160 + if (old * (MIN_NR_GENS + 2) < total)
161 + return true;
162 +
163 + return false;
164 +}
165 +
166 /*
167 * For future optimizations:
168 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
169 --
170 2.40.1
171