mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
mm/huge_memory: implement device-private THP splitting
Add support for splitting device-private THP folios, enabling fallback to smaller page sizes when large page allocation or migration fails. Key changes: - split_huge_pmd(): Handle device-private PMD entries during splitting - Preserve RMAP_EXCLUSIVE semantics for anonymous exclusive folios - Skip RMP_USE_SHARED_ZEROPAGE for device-private entries as they don't support shared zero page semantics Link: https://lkml.kernel.org/r/20251001065707.920170-6-balbirs@nvidia.com Signed-off-by: Balbir Singh <balbirs@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Gregory Price <gourry@gourry.net> Cc: Ying Huang <ying.huang@linux.alibaba.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Danilo Krummrich <dakr@kernel.org> Cc: David Airlie <airlied@gmail.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mika Penttilä <mpenttil@redhat.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Francois Dugast <francois.dugast@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
65edfda6f3
commit
1462872900
2 changed files with 76 additions and 12 deletions
|
|
@ -2842,16 +2842,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
struct page *page;
|
||||
pgtable_t pgtable;
|
||||
pmd_t old_pmd, _pmd;
|
||||
bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
|
||||
bool soft_dirty, uffd_wp = false, young = false, write = false;
|
||||
bool anon_exclusive = false, dirty = false;
|
||||
unsigned long addr;
|
||||
pte_t *pte;
|
||||
int i;
|
||||
swp_entry_t entry;
|
||||
|
||||
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
|
||||
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
|
||||
VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
|
||||
|
||||
VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd) && !pmd_trans_huge(*pmd));
|
||||
|
||||
count_vm_event(THP_SPLIT_PMD);
|
||||
|
||||
|
|
@ -2899,20 +2901,51 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
return __split_huge_zero_page_pmd(vma, haddr, pmd);
|
||||
}
|
||||
|
||||
pmd_migration = is_pmd_migration_entry(*pmd);
|
||||
if (unlikely(pmd_migration)) {
|
||||
swp_entry_t entry;
|
||||
|
||||
if (is_pmd_migration_entry(*pmd)) {
|
||||
old_pmd = *pmd;
|
||||
entry = pmd_to_swp_entry(old_pmd);
|
||||
page = pfn_swap_entry_to_page(entry);
|
||||
folio = page_folio(page);
|
||||
|
||||
soft_dirty = pmd_swp_soft_dirty(old_pmd);
|
||||
uffd_wp = pmd_swp_uffd_wp(old_pmd);
|
||||
|
||||
write = is_writable_migration_entry(entry);
|
||||
if (PageAnon(page))
|
||||
anon_exclusive = is_readable_exclusive_migration_entry(entry);
|
||||
young = is_migration_entry_young(entry);
|
||||
dirty = is_migration_entry_dirty(entry);
|
||||
} else if (is_pmd_device_private_entry(*pmd)) {
|
||||
old_pmd = *pmd;
|
||||
entry = pmd_to_swp_entry(old_pmd);
|
||||
page = pfn_swap_entry_to_page(entry);
|
||||
folio = page_folio(page);
|
||||
|
||||
soft_dirty = pmd_swp_soft_dirty(old_pmd);
|
||||
uffd_wp = pmd_swp_uffd_wp(old_pmd);
|
||||
|
||||
write = is_writable_device_private_entry(entry);
|
||||
anon_exclusive = PageAnonExclusive(page);
|
||||
|
||||
/*
|
||||
* Device private THP should be treated the same as regular
|
||||
* folios w.r.t anon exclusive handling. See the comments for
|
||||
* folio handling and anon_exclusive below.
|
||||
*/
|
||||
if (freeze && anon_exclusive &&
|
||||
folio_try_share_anon_rmap_pmd(folio, page))
|
||||
freeze = false;
|
||||
if (!freeze) {
|
||||
rmap_t rmap_flags = RMAP_NONE;
|
||||
|
||||
folio_ref_add(folio, HPAGE_PMD_NR - 1);
|
||||
if (anon_exclusive)
|
||||
rmap_flags |= RMAP_EXCLUSIVE;
|
||||
|
||||
folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
|
||||
vma, haddr, rmap_flags);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Up to this point the pmd is present and huge and userland has
|
||||
|
|
@ -2996,11 +3029,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
* Note that NUMA hinting access restrictions are not transferred to
|
||||
* avoid any possibility of altering permissions across VMAs.
|
||||
*/
|
||||
if (freeze || pmd_migration) {
|
||||
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
|
||||
pte_t entry;
|
||||
swp_entry_t swp_entry;
|
||||
if (freeze || is_pmd_migration_entry(old_pmd)) {
|
||||
pte_t entry;
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
|
||||
if (write)
|
||||
swp_entry = make_writable_migration_entry(
|
||||
page_to_pfn(page + i));
|
||||
|
|
@ -3019,7 +3052,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
entry = pte_swp_mksoft_dirty(entry);
|
||||
if (uffd_wp)
|
||||
entry = pte_swp_mkuffd_wp(entry);
|
||||
VM_WARN_ON(!pte_none(ptep_get(pte + i)));
|
||||
set_pte_at(mm, addr, pte + i, entry);
|
||||
}
|
||||
} else if (is_pmd_device_private_entry(old_pmd)) {
|
||||
pte_t entry;
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
|
||||
/*
|
||||
* anon_exclusive was already propagated to the relevant
|
||||
* pages corresponding to the pte entries when freeze
|
||||
* is false.
|
||||
*/
|
||||
if (write)
|
||||
swp_entry = make_writable_device_private_entry(
|
||||
page_to_pfn(page + i));
|
||||
else
|
||||
swp_entry = make_readable_device_private_entry(
|
||||
page_to_pfn(page + i));
|
||||
/*
|
||||
* Young and dirty bits are not progated via swp_entry
|
||||
*/
|
||||
entry = swp_entry_to_pte(swp_entry);
|
||||
if (soft_dirty)
|
||||
entry = pte_swp_mksoft_dirty(entry);
|
||||
if (uffd_wp)
|
||||
entry = pte_swp_mkuffd_wp(entry);
|
||||
VM_WARN_ON(!pte_none(ptep_get(pte + i)));
|
||||
set_pte_at(mm, addr, pte + i, entry);
|
||||
}
|
||||
|
|
@ -3046,7 +3105,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
}
|
||||
pte_unmap(pte);
|
||||
|
||||
if (!pmd_migration)
|
||||
if (!is_pmd_migration_entry(*pmd))
|
||||
folio_remove_rmap_pmd(folio, page, vma);
|
||||
if (freeze)
|
||||
put_page(page);
|
||||
|
|
@ -3059,7 +3118,7 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
|
|||
pmd_t *pmd, bool freeze)
|
||||
{
|
||||
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
|
||||
if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd))
|
||||
if (pmd_trans_huge(*pmd) || is_pmd_non_present_folio_entry(*pmd))
|
||||
__split_huge_pmd_locked(vma, pmd, address, freeze);
|
||||
}
|
||||
|
||||
|
|
@ -3238,6 +3297,9 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
|
|||
VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
|
||||
lockdep_assert_held(&lruvec->lru_lock);
|
||||
|
||||
if (folio_is_device_private(folio))
|
||||
return;
|
||||
|
||||
if (list) {
|
||||
/* page reclaim is reclaiming a huge page */
|
||||
VM_WARN_ON(folio_test_lru(folio));
|
||||
|
|
@ -3842,8 +3904,9 @@ fail:
|
|||
if (nr_shmem_dropped)
|
||||
shmem_uncharge(mapping->host, nr_shmem_dropped);
|
||||
|
||||
if (!ret && is_anon)
|
||||
if (!ret && is_anon && !folio_is_device_private(folio))
|
||||
remap_flags = RMP_USE_SHARED_ZEROPAGE;
|
||||
|
||||
remap_page(folio, 1 << order, remap_flags);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -307,6 +307,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
|
|||
VM_BUG_ON_PAGE(!PageAnon(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(pte_present(old_pte), page);
|
||||
VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio);
|
||||
|
||||
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
|
||||
mm_forbids_zeropage(pvmw->vma->vm_mm))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue