kernel: Update MGLRU patchset
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch
1 From 73d1ff551760f0c79c47ab70faa4c2ca91413f5c Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Sun, 18 Sep 2022 02:00:08 -0600
4 Subject: [PATCH 11/29] mm: multi-gen LRU: thrashing prevention
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Add /sys/kernel/mm/lru_gen/min_ttl_ms for thrashing prevention, as
10 requested by many desktop users [1].
11
12 When set to value N, it prevents the working set of N milliseconds from
13 getting evicted. The OOM killer is triggered if this working set cannot
14 be kept in memory. Based on the average human detectable lag (~100ms),
15 N=1000 usually eliminates intolerable lags due to thrashing. Larger
16 values like N=3000 make lags less noticeable at the risk of premature OOM
17 kills.
18
19 Compared with the size-based approach [2], this time-based approach
20 has the following advantages:
21
22 1. It is easier to configure because it is agnostic to applications
23 and memory sizes.
24 2. It is more reliable because it is directly wired to the OOM killer.
25
26 [1] https://lore.kernel.org/r/Ydza%2FzXKY9ATRoh6@google.com/
27 [2] https://lore.kernel.org/r/20101028191523.GA14972@google.com/
28
29 Link: https://lkml.kernel.org/r/20220918080010.2920238-12-yuzhao@google.com
30 Signed-off-by: Yu Zhao <yuzhao@google.com>
31 Acked-by: Brian Geffon <bgeffon@google.com>
32 Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
33 Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
34 Acked-by: Steven Barrett <steven@liquorix.net>
35 Acked-by: Suleiman Souhlal <suleiman@google.com>
36 Tested-by: Daniel Byrne <djbyrne@mtu.edu>
37 Tested-by: Donald Carr <d@chaos-reins.com>
38 Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
39 Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
40 Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
41 Tested-by: Sofia Trinh <sofia.trinh@edi.works>
42 Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
43 Cc: Andi Kleen <ak@linux.intel.com>
44 Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
45 Cc: Barry Song <baohua@kernel.org>
46 Cc: Catalin Marinas <catalin.marinas@arm.com>
47 Cc: Dave Hansen <dave.hansen@linux.intel.com>
48 Cc: Hillf Danton <hdanton@sina.com>
49 Cc: Jens Axboe <axboe@kernel.dk>
50 Cc: Johannes Weiner <hannes@cmpxchg.org>
51 Cc: Jonathan Corbet <corbet@lwn.net>
52 Cc: Linus Torvalds <torvalds@linux-foundation.org>
53 Cc: Matthew Wilcox <willy@infradead.org>
54 Cc: Mel Gorman <mgorman@suse.de>
55 Cc: Miaohe Lin <linmiaohe@huawei.com>
56 Cc: Michael Larabel <Michael@MichaelLarabel.com>
57 Cc: Michal Hocko <mhocko@kernel.org>
58 Cc: Mike Rapoport <rppt@kernel.org>
59 Cc: Mike Rapoport <rppt@linux.ibm.com>
60 Cc: Peter Zijlstra <peterz@infradead.org>
61 Cc: Qi Zheng <zhengqi.arch@bytedance.com>
62 Cc: Tejun Heo <tj@kernel.org>
63 Cc: Vlastimil Babka <vbabka@suse.cz>
64 Cc: Will Deacon <will@kernel.org>
65 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
66 ---
67 include/linux/mmzone.h | 2 ++
68 mm/vmscan.c | 74 ++++++++++++++++++++++++++++++++++++++++--
69 2 files changed, 73 insertions(+), 3 deletions(-)
70
71 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
72 index edaf035503ed..6b85ba1f4e18 100644
73 --- a/include/linux/mmzone.h
74 +++ b/include/linux/mmzone.h
75 @@ -399,6 +399,8 @@ struct lru_gen_struct {
76 unsigned long max_seq;
77 /* the eviction increments the oldest generation numbers */
78 unsigned long min_seq[ANON_AND_FILE];
79 + /* the birth time of each generation in jiffies */
80 + unsigned long timestamps[MAX_NR_GENS];
81 /* the multi-gen LRU lists, lazily sorted on eviction */
82 struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
83 /* the multi-gen LRU sizes, eventually consistent */
84 diff --git a/mm/vmscan.c b/mm/vmscan.c
85 index be37d996bc92..642ee7bef61d 100644
86 --- a/mm/vmscan.c
87 +++ b/mm/vmscan.c
88 @@ -4064,6 +4064,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
89 for (type = 0; type < ANON_AND_FILE; type++)
90 reset_ctrl_pos(lruvec, type, false);
91
92 + WRITE_ONCE(lrugen->timestamps[next], jiffies);
93 /* make sure preceding modifications appear */
94 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
95
96 @@ -4193,7 +4194,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig
97 return false;
98 }
99
100 -static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc)
101 +static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
102 {
103 bool need_aging;
104 unsigned long nr_to_scan;
105 @@ -4207,16 +4208,36 @@ static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc)
106 mem_cgroup_calculate_protection(NULL, memcg);
107
108 if (mem_cgroup_below_min(memcg))
109 - return;
110 + return false;
111
112 need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
113 +
114 + if (min_ttl) {
115 + int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
116 + unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
117 +
118 + if (time_is_after_jiffies(birth + min_ttl))
119 + return false;
120 +
121 + /* the size is likely too small to be helpful */
122 + if (!nr_to_scan && sc->priority != DEF_PRIORITY)
123 + return false;
124 + }
125 +
126 if (need_aging)
127 try_to_inc_max_seq(lruvec, max_seq, sc, swappiness);
128 +
129 + return true;
130 }
131
132 +/* to protect the working set of the last N jiffies */
133 +static unsigned long lru_gen_min_ttl __read_mostly;
134 +
135 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
136 {
137 struct mem_cgroup *memcg;
138 + bool success = false;
139 + unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
140
141 VM_WARN_ON_ONCE(!current_is_kswapd());
142
143 @@ -4239,12 +4260,32 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
144 do {
145 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
146
147 - age_lruvec(lruvec, sc);
148 + if (age_lruvec(lruvec, sc, min_ttl))
149 + success = true;
150
151 cond_resched();
152 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
153
154 clear_mm_walk();
155 +
156 + /* check the order to exclude compaction-induced reclaim */
157 + if (success || !min_ttl || sc->order)
158 + return;
159 +
160 + /*
161 + * The main goal is to OOM kill if every generation from all memcgs is
162 + * younger than min_ttl. However, another possibility is all memcgs are
163 + * either below min or empty.
164 + */
165 + if (mutex_trylock(&oom_lock)) {
166 + struct oom_control oc = {
167 + .gfp_mask = sc->gfp_mask,
168 + };
169 +
170 + out_of_memory(&oc);
171 +
172 + mutex_unlock(&oom_lock);
173 + }
174 }
175
176 /*
177 @@ -5002,6 +5043,28 @@ static void lru_gen_change_state(bool enabled)
178 * sysfs interface
179 ******************************************************************************/
180
181 +static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
182 +{
183 + return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
184 +}
185 +
186 +static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr,
187 + const char *buf, size_t len)
188 +{
189 + unsigned int msecs;
190 +
191 + if (kstrtouint(buf, 0, &msecs))
192 + return -EINVAL;
193 +
194 + WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
195 +
196 + return len;
197 +}
198 +
199 +static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR(
200 + min_ttl_ms, 0644, show_min_ttl, store_min_ttl
201 +);
202 +
203 static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
204 {
205 unsigned int caps = 0;
206 @@ -5050,6 +5113,7 @@ static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
207 );
208
209 static struct attribute *lru_gen_attrs[] = {
210 + &lru_gen_min_ttl_attr.attr,
211 &lru_gen_enabled_attr.attr,
212 NULL
213 };
214 @@ -5065,12 +5129,16 @@ static struct attribute_group lru_gen_attr_group = {
215
216 void lru_gen_init_lruvec(struct lruvec *lruvec)
217 {
218 + int i;
219 int gen, type, zone;
220 struct lru_gen_struct *lrugen = &lruvec->lrugen;
221
222 lrugen->max_seq = MIN_NR_GENS + 1;
223 lrugen->enabled = lru_gen_enabled();
224
225 + for (i = 0; i <= MIN_NR_GENS + 1; i++)
226 + lrugen->timestamps[i] = jiffies;
227 +
228 for_each_gen_type_zone(gen, type, zone)
229 INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
230
231 --
232 2.40.0
233