diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7ad9d2ba50d4..ffdf2ccf8269 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2396,8 +2396,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #endif if (prot_numa) { - struct folio *folio; - bool toptier; + /* * Avoid trapping faults against the zero page. The read-only * data is likely to be read-cached on the local CPU and @@ -2409,19 +2408,9 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock; - folio = pmd_folio(*pmd); - toptier = node_is_toptier(folio_nid(folio)); - /* - * Skip scanning top tier node if normal numa - * balancing is disabled - */ - if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && - toptier) + if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma, + vma_is_single_threaded_private(vma))) goto unlock; - - if (folio_use_access_time(folio)) - folio_xchg_access_time(folio, - jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical