--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -606,5 +606,13 @@ static __always_inline void del_page_fro
- make_pte_marker(PTE_MARKER_UFFD_WP));
+@@ -578,4 +578,12 @@ pte_install_uffd_wp_if_needed(struct vm_
#endif
}
-+
+
+static inline bool vma_has_recency(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+
+ return true;
+}
-
++
#endif
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -1353,8 +1354,7 @@ again:
+@@ -1435,8 +1435,7 @@ again:
force_flush = 1;
set_page_dirty(page);
}
mark_page_accessed(page);
}
rss[mm_counter(page)]--;
-@@ -4795,8 +4795,8 @@ static inline void mm_account_fault(stru
+@@ -5170,8 +5169,8 @@ static inline void mm_account_fault(stru
#ifdef CONFIG_LRU_GEN
static void lru_gen_enter_fault(struct vm_area_struct *vma)
{
static void lru_gen_exit_fault(void)
--- a/mm/rmap.c
+++ b/mm/rmap.c
-@@ -794,25 +794,14 @@ static bool page_referenced_one(struct p
+@@ -823,25 +823,14 @@ static bool folio_referenced_one(struct
}
if (pvmw.pte) {
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
-@@ -846,7 +835,20 @@ static bool invalid_page_referenced_vma(
- struct page_referenced_arg *pra = arg;
+@@ -875,7 +864,20 @@ static bool invalid_folio_referenced_vma
+ struct folio_referenced_arg *pra = arg;
struct mem_cgroup *memcg = pra->memcg;
- if (!mm_match_cgroup(vma->vm_mm, memcg))
return true;
return false;
-@@ -876,6 +878,7 @@ int page_referenced(struct page *page,
+@@ -906,6 +908,7 @@ int folio_referenced(struct folio *folio
.arg = (void *)&pra,
.anon_lock = folio_lock_anon_vma_read,
.try_lock = true,
};
*vm_flags = 0;
-@@ -891,15 +894,6 @@ int page_referenced(struct page *page,
+@@ -921,15 +924,6 @@ int folio_referenced(struct folio *folio
return 1;
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long
+@@ -3766,7 +3766,10 @@ static int should_skip_vma(unsigned long
if (is_vm_hugetlb_page(vma))
return true;