From: Yu Zhao <yuzhao@google.com>
Date: Wed, 21 Dec 2022 21:18:59 -0700
Subject: [PATCH 21/29] mm: multi-gen LRU: rename lru_gen_struct to
- lru_gen_page
+ lru_gen_folio
Patch series "mm: multi-gen LRU: memcg LRU", v3.
========
An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
-since each node and memcg combination has an LRU of pages (see
+since each node and memcg combination has an LRU of folios (see
mem_cgroup_lruvec()).
Its goal is to improve the scalability of global reclaim, which is
complexity in contrast to the current linear complexity.
The basic structure of an memcg LRU can be understood by an analogy to
-the active/inactive LRU (of pages):
+the active/inactive LRU (of folios):
1. It has the young and the old (generations), i.e., the counterparts
to the active and the inactive;
2. The increment of max_seq triggers promotion, i.e., the counterpart
This patch (of 8):
-The new name lru_gen_page will be more distinct from the coming
+The new name lru_gen_folio will be more distinct from the coming
lru_gen_memcg.
Link: https://lkml.kernel.org/r/20221222041905.2431096-1-yuzhao@google.com
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -168,7 +168,7 @@ static inline void lru_gen_update_size(s
- int zone = page_zonenum(page);
- int delta = thp_nr_pages(page);
+ int zone = folio_zonenum(folio);
+ int delta = thp_nr_folios(folio);
enum lru_list lru = type * LRU_INACTIVE_FILE;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
-@@ -214,7 +214,7 @@ static inline bool lru_gen_add_page(stru
- int gen = page_lru_gen(page);
- int type = page_is_file_lru(page);
- int zone = page_zonenum(page);
+@@ -214,7 +214,7 @@ static inline bool lru_gen_add_folio(stru
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
- VM_WARN_ON_ONCE_PAGE(gen != -1, page);
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -394,7 +394,7 @@ enum {
- * The number of pages in each generation is eventually consistent and therefore
+ * The number of folios in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
*/
-struct lru_gen_struct {
-+struct lru_gen_page {
++struct lru_gen_folio {
/* the aging increments the youngest generation number */
unsigned long max_seq;
/* the eviction increments the oldest generation numbers */
/* the lruvec under reclaim */
struct lruvec *lruvec;
- /* unstable max_seq from lru_gen_struct */
-+ /* unstable max_seq from lru_gen_page */
++ /* unstable max_seq from lru_gen_folio */
unsigned long max_seq;
/* the next address within an mm to scan */
unsigned long next_addr;
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
- struct lru_gen_struct lrugen;
-+ struct lru_gen_page lrugen;
++ struct lru_gen_folio lrugen;
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
- /* see the comment on lru_gen_struct */
-+ /* see the comment on lru_gen_page */
++ /* see the comment on lru_gen_folio */
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
struct ctrl_pos *pos)
{
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
pos->refaulted = lrugen->avg_refaulted[type][tier] +
{
int hist, tier;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
-@@ -3408,7 +3408,7 @@ static int page_update_gen(struct page *
- static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
+@@ -3408,7 +3408,7 @@ static int folio_update_gen(struct folio *
+ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
- int type = page_is_file_lru(page);
+ int type = folio_is_file_lru(folio);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
- unsigned long new_flags, old_flags = READ_ONCE(page->flags);
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
@@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
walk->batched = 0;
int zone;
int remaining = MAX_LRU_BATCH;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
if (type == LRU_GEN_ANON && !can_swap)
int gen, type, zone;
bool success = false;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
}
- /* see the comment on lru_gen_struct */
-+ /* see the comment on lru_gen_page */
++ /* see the comment on lru_gen_folio */
if (can_swap) {
min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
int prev, next;
int type, zone;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
spin_lock_irq(&lruvec->lru_lock);
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
unsigned long young = 0;
unsigned long total = 0;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
-@@ -4466,7 +4466,7 @@ static bool sort_page(struct lruvec *lru
- int delta = thp_nr_pages(page);
- int refs = page_lru_refs(page);
+@@ -4466,7 +4466,7 @@ static bool sort_folio(struct lruvec *lru
+ int delta = thp_nr_folios(folio);
+ int refs = folio_lru_refs(folio);
int tier = lru_tier_from_refs(refs);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
- VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
+ VM_WARN_ON_ONCE_folio(gen >= MAX_NR_GENS, folio);
-@@ -4566,7 +4566,7 @@ static int scan_pages(struct lruvec *lru
+@@ -4566,7 +4566,7 @@ static int scan_folios(struct lruvec *lru
int scanned = 0;
int isolated = 0;
int remaining = MAX_LRU_BATCH;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list));
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
if (lrugen->enabled) {
enum lru_list lru;
int type, tier;
int hist = lru_hist_from_seq(seq);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
bool full = !debugfs_real_fops(m->file)->write;
struct lruvec *lruvec = v;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
int i;
int gen, type, zone;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
-+ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct lru_gen_folio *lrugen = &lruvec->lrugen;
lrugen->max_seq = MIN_NR_GENS + 1;
lrugen->enabled = lru_gen_enabled();
unsigned long min_seq;
struct lruvec *lruvec;
- struct lru_gen_struct *lrugen;
-+ struct lru_gen_page *lrugen;
- int type = page_is_file_lru(page);
- int delta = thp_nr_pages(page);
- int refs = page_lru_refs(page);
-@@ -252,7 +252,7 @@ static void lru_gen_refault(struct page
++ struct lru_gen_folio *lrugen;
+ int type = folio_is_file_lru(folio);
+ int delta = thp_nr_folios(folio);
+ int refs = folio_lru_refs(folio);
+@@ -252,7 +252,7 @@ static void lru_gen_refault(struct folio
unsigned long token;
unsigned long min_seq;
struct lruvec *lruvec;
- struct lru_gen_struct *lrugen;
-+ struct lru_gen_page *lrugen;
++ struct lru_gen_folio *lrugen;
struct mem_cgroup *memcg;
struct pglist_data *pgdat;
- int type = page_is_file_lru(page);
+ int type = folio_is_file_lru(folio);