6d764bb2b4480a4814866d96347e71fa9bec6a18
[openwrt/staging/hauke.git] / target / linux / generic / backport-6.1 / 020-v6.3-03-UPSTREAM-mm-multi-gen-LRU-remove-eviction-fairness-s.patch
1 From 14f9a7a15f3d1af351f30e0438fd747b7ac253b0 Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:01 -0700
4 Subject: [PATCH 03/19] UPSTREAM: mm: multi-gen LRU: remove eviction fairness
5 safeguard
6
7 Recall that the eviction consumes the oldest generation: first it
8 bucket-sorts folios whose gen counters were updated by the aging and
9 reclaims the rest; then it increments lrugen->min_seq.
10
11 The current eviction fairness safeguard for global reclaim has a
12 dilemma: when there are multiple eligible memcgs, should it continue
13 or stop upon meeting the reclaim goal? If it continues, it overshoots
14 and increases direct reclaim latency; if it stops, it loses fairness
15 between memcgs it has taken memory away from and those it has yet to.
16
17 With memcg LRU, the eviction, while ensuring eventual fairness, will
18 stop upon meeting its goal. Therefore the current eviction fairness
19 safeguard for global reclaim will not be needed.
20
21 Note that memcg LRU only applies to global reclaim. For memcg reclaim,
22 the eviction will continue, even if it is overshooting. This becomes
23 unconditional due to code simplification.
24
25 Link: https://lkml.kernel.org/r/20221222041905.2431096-4-yuzhao@google.com
26 Signed-off-by: Yu Zhao <yuzhao@google.com>
27 Cc: Johannes Weiner <hannes@cmpxchg.org>
28 Cc: Jonathan Corbet <corbet@lwn.net>
29 Cc: Michael Larabel <Michael@MichaelLarabel.com>
30 Cc: Michal Hocko <mhocko@kernel.org>
31 Cc: Mike Rapoport <rppt@kernel.org>
32 Cc: Roman Gushchin <roman.gushchin@linux.dev>
33 Cc: Suren Baghdasaryan <surenb@google.com>
34 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
35 Bug: 274865848
36 (cherry picked from commit a579086c99ed70cc4bfc104348dbe3dd8f2787e6)
37 Change-Id: I08ac1b3c90e29cafd0566785aaa4bcdb5db7d22c
38 Signed-off-by: T.J. Mercier <tjmercier@google.com>
39 ---
40 mm/vmscan.c | 81 +++++++++++++++--------------------------------------
41 1 file changed, 23 insertions(+), 58 deletions(-)
42
43 diff --git a/mm/vmscan.c b/mm/vmscan.c
44 index b02fed912f742..991961180b320 100644
45 --- a/mm/vmscan.c
46 +++ b/mm/vmscan.c
47 @@ -448,6 +448,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
48 return sc->target_mem_cgroup;
49 }
50
51 +static bool global_reclaim(struct scan_control *sc)
52 +{
53 + return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
54 +}
55 +
56 /**
57 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
58 * @sc: scan_control in question
59 @@ -498,6 +503,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
60 return false;
61 }
62
63 +static bool global_reclaim(struct scan_control *sc)
64 +{
65 + return true;
66 +}
67 +
68 static bool writeback_throttling_sane(struct scan_control *sc)
69 {
70 return true;
71 @@ -4993,8 +5003,7 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw
72 return scanned;
73 }
74
75 -static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
76 - bool *need_swapping)
77 +static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
78 {
79 int type;
80 int scanned;
81 @@ -5083,9 +5092,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
82 goto retry;
83 }
84
85 - if (need_swapping && type == LRU_GEN_ANON)
86 - *need_swapping = true;
87 -
88 return scanned;
89 }
90
91 @@ -5124,67 +5130,26 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *
92 return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
93 }
94
95 -static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
96 - struct scan_control *sc, bool need_swapping)
97 +static unsigned long get_nr_to_reclaim(struct scan_control *sc)
98 {
99 - int i;
100 - DEFINE_MAX_SEQ(lruvec);
101 -
102 - if (!current_is_kswapd()) {
103 - /* age each memcg at most once to ensure fairness */
104 - if (max_seq - seq > 1)
105 - return true;
106 -
107 - /* over-swapping can increase allocation latency */
108 - if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
109 - return true;
110 -
111 - /* give this thread a chance to exit and free its memory */
112 - if (fatal_signal_pending(current)) {
113 - sc->nr_reclaimed += MIN_LRU_BATCH;
114 - return true;
115 - }
116 -
117 - if (cgroup_reclaim(sc))
118 - return false;
119 - } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
120 - return false;
121 -
122 - /* keep scanning at low priorities to ensure fairness */
123 - if (sc->priority > DEF_PRIORITY - 2)
124 - return false;
125 -
126 - /*
127 - * A minimum amount of work was done under global memory pressure. For
128 - * kswapd, it may be overshooting. For direct reclaim, the allocation
129 - * may succeed if all suitable zones are somewhat safe. In either case,
130 - * it's better to stop now, and restart later if necessary.
131 - */
132 - for (i = 0; i <= sc->reclaim_idx; i++) {
133 - unsigned long wmark;
134 - struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
135 -
136 - if (!managed_zone(zone))
137 - continue;
138 -
139 - wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
140 - if (wmark > zone_page_state(zone, NR_FREE_PAGES))
141 - return false;
142 - }
143 + /* don't abort memcg reclaim to ensure fairness */
144 + if (!global_reclaim(sc))
145 + return -1;
146
147 - sc->nr_reclaimed += MIN_LRU_BATCH;
148 + /* discount the previous progress for kswapd */
149 + if (current_is_kswapd())
150 + return sc->nr_to_reclaim + sc->last_reclaimed;
151
152 - return true;
153 + return max(sc->nr_to_reclaim, compact_gap(sc->order));
154 }
155
156 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
157 {
158 struct blk_plug plug;
159 bool need_aging = false;
160 - bool need_swapping = false;
161 unsigned long scanned = 0;
162 unsigned long reclaimed = sc->nr_reclaimed;
163 - DEFINE_MAX_SEQ(lruvec);
164 + unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
165
166 lru_add_drain();
167
168 @@ -5208,7 +5173,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
169 if (!nr_to_scan)
170 goto done;
171
172 - delta = evict_folios(lruvec, sc, swappiness, &need_swapping);
173 + delta = evict_folios(lruvec, sc, swappiness);
174 if (!delta)
175 goto done;
176
177 @@ -5216,7 +5181,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
178 if (scanned >= nr_to_scan)
179 break;
180
181 - if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
182 + if (sc->nr_reclaimed >= nr_to_reclaim)
183 break;
184
185 cond_resched();
186 @@ -5666,7 +5631,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
187 if (sc->nr_reclaimed >= nr_to_reclaim)
188 return 0;
189
190 - if (!evict_folios(lruvec, sc, swappiness, NULL))
191 + if (!evict_folios(lruvec, sc, swappiness))
192 return 0;
193
194 cond_resched();
195 --
196 2.40.1
197