generic: 6.1: refresh backport patches
[openwrt/staging/ldir.git] / target / linux / generic / backport-6.1 / 020-v6.3-19-mm-add-vma_has_recency.patch
index d740c5bd1d5183f0d933f6b5b94ae406d9fb3560..5335b80488004fa17e1955a252748dee4c5e1239 100644 (file)
@@ -58,11 +58,10 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
 
 --- a/include/linux/mm_inline.h
 +++ b/include/linux/mm_inline.h
-@@ -333,4 +333,13 @@ static __always_inline void del_page_fro
-       update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-                       -thp_nr_pages(page));
+@@ -578,4 +578,12 @@ pte_install_uffd_wp_if_needed(struct vm_
+ #endif
  }
-+
 +static inline bool vma_has_recency(struct vm_area_struct *vma)
 +{
 +      if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
@@ -74,15 +73,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  #endif
 --- a/mm/memory.c
 +++ b/mm/memory.c
-@@ -41,6 +41,7 @@
- #include <linux/kernel_stat.h>
- #include <linux/mm.h>
-+#include <linux/mm_inline.h>
- #include <linux/sched/mm.h>
- #include <linux/sched/coredump.h>
- #include <linux/sched/numa_balancing.h>
-@@ -1353,8 +1354,7 @@ again:
+@@ -1435,8 +1435,7 @@ again:
                                        force_flush = 1;
                                        set_page_dirty(page);
                                }
@@ -92,7 +83,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
                                        mark_page_accessed(page);
                        }
                        rss[mm_counter(page)]--;
-@@ -4795,8 +4795,8 @@ static inline void mm_account_fault(stru
+@@ -5170,8 +5169,8 @@ static inline void mm_account_fault(stru
  #ifdef CONFIG_LRU_GEN
  static void lru_gen_enter_fault(struct vm_area_struct *vma)
  {
@@ -105,7 +96,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  static void lru_gen_exit_fault(void)
 --- a/mm/rmap.c
 +++ b/mm/rmap.c
-@@ -794,25 +794,14 @@ static bool page_referenced_one(struct p
+@@ -823,25 +823,14 @@ static bool folio_referenced_one(struct
                }
  
                if (pvmw.pte) {
@@ -121,10 +112,10 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
 -                              /*
 -                               * Don't treat a reference through
 -                               * a sequentially read mapping as such.
--                               * If the page has been used in another mapping,
+-                               * If the folio has been used in another mapping,
 -                               * we will catch it; if this other mapping is
 -                               * already gone, the unmap path will have set
--                               * PG_referenced or activated the page.
+-                               * the referenced flag or activated the folio.
 -                               */
 -                              if (likely(!(vma->vm_flags & VM_SEQ_READ)))
 -                                      referenced++;
@@ -134,8 +125,8 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
                } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
                        if (pmdp_clear_flush_young_notify(vma, address,
                                                pvmw.pmd))
-@@ -846,7 +835,20 @@ static bool invalid_page_referenced_vma(
-       struct page_referenced_arg *pra = arg;
+@@ -875,7 +864,20 @@ static bool invalid_folio_referenced_vma
+       struct folio_referenced_arg *pra = arg;
        struct mem_cgroup *memcg = pra->memcg;
  
 -      if (!mm_match_cgroup(vma->vm_mm, memcg))
@@ -156,15 +147,15 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
                return true;
  
        return false;
-@@ -876,6 +878,7 @@ int page_referenced(struct page *page,
-               .rmap_one = page_referenced_one,
+@@ -906,6 +908,7 @@ int folio_referenced(struct folio *folio
                .arg = (void *)&pra,
-               .anon_lock = page_lock_anon_vma_read,
-+              .invalid_vma = invalid_page_referenced_vma,
+               .anon_lock = folio_lock_anon_vma_read,
+               .try_lock = true,
++              .invalid_vma = invalid_folio_referenced_vma,
        };
  
        *vm_flags = 0;
-@@ -891,15 +894,6 @@ int page_referenced(struct page *page,
+@@ -921,15 +924,6 @@ int folio_referenced(struct folio *folio
                        return 1;
        }
  
@@ -174,15 +165,15 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
 -       * cgroups
 -       */
 -      if (memcg) {
--              rwc.invalid_vma = invalid_page_referenced_vma;
+-              rwc.invalid_vma = invalid_folio_referenced_vma;
 -      }
 -
-       rmap_walk(page, &rwc);
+       rmap_walk(folio, &rwc);
        *vm_flags = pra.vm_flags;
  
 --- a/mm/vmscan.c
 +++ b/mm/vmscan.c
-@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long
+@@ -3766,7 +3766,10 @@ static int should_skip_vma(unsigned long
        if (is_vm_hugetlb_page(vma))
                return true;