kernel: Update MGLRU patchset
[openwrt/staging/hauke.git] / target / linux / generic / backport-5.15 / 020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch
1 From afd37e73db04c7e6b47411120ac5f6a7eca51fec Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:00 -0700
4 Subject: [PATCH 22/29] mm: multi-gen LRU: rename lrugen->lists[] to
5 lrugen->pages[]
6
7 lru_gen_page will be chained into per-node lists by the coming
8 lrugen->list.
9
10 Link: https://lkml.kernel.org/r/20221222041905.2431096-3-yuzhao@google.com
11 Signed-off-by: Yu Zhao <yuzhao@google.com>
12 Cc: Johannes Weiner <hannes@cmpxchg.org>
13 Cc: Jonathan Corbet <corbet@lwn.net>
14 Cc: Michael Larabel <Michael@MichaelLarabel.com>
15 Cc: Michal Hocko <mhocko@kernel.org>
16 Cc: Mike Rapoport <rppt@kernel.org>
17 Cc: Roman Gushchin <roman.gushchin@linux.dev>
18 Cc: Suren Baghdasaryan <surenb@google.com>
19 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
20 ---
21 include/linux/mm_inline.h | 4 ++--
22 include/linux/mmzone.h | 8 ++++----
23 mm/vmscan.c | 20 ++++++++++----------
24 3 files changed, 16 insertions(+), 16 deletions(-)
25
26 diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
27 index 27c4890503c5..4adc9ba59569 100644
28 --- a/include/linux/mm_inline.h
29 +++ b/include/linux/mm_inline.h
30 @@ -246,9 +246,9 @@ static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bo
31 lru_gen_update_size(lruvec, page, -1, gen);
32 /* for rotate_reclaimable_page() */
33 if (reclaiming)
34 - list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
35 + list_add_tail(&page->lru, &lrugen->pages[gen][type][zone]);
36 else
37 - list_add(&page->lru, &lrugen->lists[gen][type][zone]);
38 + list_add(&page->lru, &lrugen->pages[gen][type][zone]);
39
40 return true;
41 }
42 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
43 index 5856b026c089..7b8a26aaf381 100644
44 --- a/include/linux/mmzone.h
45 +++ b/include/linux/mmzone.h
46 @@ -302,7 +302,7 @@ enum lruvec_flags {
47 * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
48 * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
49 * corresponding generation. The gen counter in page->flags stores gen+1 while
50 - * a page is on one of lrugen->lists[]. Otherwise it stores 0.
51 + * a page is on one of lrugen->pages[]. Otherwise it stores 0.
52 *
53 * A page is added to the youngest generation on faulting. The aging needs to
54 * check the accessed bit at least twice before handing this page over to the
55 @@ -314,8 +314,8 @@ enum lruvec_flags {
56 * rest of generations, if they exist, are considered inactive. See
57 * lru_gen_is_active().
58 *
59 - * PG_active is always cleared while a page is on one of lrugen->lists[] so that
60 - * the aging needs not to worry about it. And it's set again when a page
61 + * PG_active is always cleared while a page is on one of lrugen->pages[] so
62 + * that the aging needs not to worry about it. And it's set again when a page
63 * considered active is isolated for non-reclaiming purposes, e.g., migration.
64 * See lru_gen_add_page() and lru_gen_del_page().
65 *
66 @@ -402,7 +402,7 @@ struct lru_gen_page {
67 /* the birth time of each generation in jiffies */
68 unsigned long timestamps[MAX_NR_GENS];
69 /* the multi-gen LRU lists, lazily sorted on eviction */
70 - struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
71 + struct list_head pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
72 /* the multi-gen LRU sizes, eventually consistent */
73 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
74 /* the exponential moving average of refaulted */
75 diff --git a/mm/vmscan.c b/mm/vmscan.c
76 index 3b1b5bd9736a..2322c913aa64 100644
77 --- a/mm/vmscan.c
78 +++ b/mm/vmscan.c
79 @@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
80
81 /* prevent cold/hot inversion if force_scan is true */
82 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
83 - struct list_head *head = &lrugen->lists[old_gen][type][zone];
84 + struct list_head *head = &lrugen->pages[old_gen][type][zone];
85
86 while (!list_empty(head)) {
87 struct page *page = lru_to_page(head);
88 @@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
89 VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
90
91 new_gen = page_inc_gen(lruvec, page, false);
92 - list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
93 + list_move_tail(&page->lru, &lrugen->pages[new_gen][type][zone]);
94
95 if (!--remaining)
96 return false;
97 @@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
98 gen = lru_gen_from_seq(min_seq[type]);
99
100 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
101 - if (!list_empty(&lrugen->lists[gen][type][zone]))
102 + if (!list_empty(&lrugen->pages[gen][type][zone]))
103 goto next;
104 }
105
106 @@ -4491,7 +4491,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
107
108 /* promoted */
109 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
110 - list_move(&page->lru, &lrugen->lists[gen][type][zone]);
111 + list_move(&page->lru, &lrugen->pages[gen][type][zone]);
112 return true;
113 }
114
115 @@ -4500,7 +4500,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
116 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
117
118 gen = page_inc_gen(lruvec, page, false);
119 - list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
120 + list_move_tail(&page->lru, &lrugen->pages[gen][type][zone]);
121
122 WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
123 lrugen->protected[hist][type][tier - 1] + delta);
124 @@ -4512,7 +4512,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
125 if (PageLocked(page) || PageWriteback(page) ||
126 (type == LRU_GEN_FILE && PageDirty(page))) {
127 gen = page_inc_gen(lruvec, page, true);
128 - list_move(&page->lru, &lrugen->lists[gen][type][zone]);
129 + list_move(&page->lru, &lrugen->pages[gen][type][zone]);
130 return true;
131 }
132
133 @@ -4579,7 +4579,7 @@ static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
134 for (zone = sc->reclaim_idx; zone >= 0; zone--) {
135 LIST_HEAD(moved);
136 int skipped = 0;
137 - struct list_head *head = &lrugen->lists[gen][type][zone];
138 + struct list_head *head = &lrugen->pages[gen][type][zone];
139
140 while (!list_empty(head)) {
141 struct page *page = lru_to_page(head);
142 @@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
143 int gen, type, zone;
144
145 for_each_gen_type_zone(gen, type, zone) {
146 - if (!list_empty(&lrugen->lists[gen][type][zone]))
147 + if (!list_empty(&lrugen->pages[gen][type][zone]))
148 return false;
149 }
150 }
151 @@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruvec *lruvec)
152 int remaining = MAX_LRU_BATCH;
153
154 for_each_gen_type_zone(gen, type, zone) {
155 - struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
156 + struct list_head *head = &lruvec->lrugen.pages[gen][type][zone];
157
158 while (!list_empty(head)) {
159 bool success;
160 @@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
161 lrugen->timestamps[i] = jiffies;
162
163 for_each_gen_type_zone(gen, type, zone)
164 - INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
165 + INIT_LIST_HEAD(&lrugen->pages[gen][type][zone]);
166
167 lruvec->mm_state.seq = MIN_NR_GENS;
168 init_waitqueue_head(&lruvec->mm_state.wait);
169 --
170 2.40.0
171