mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
mm/memory: add tree limit to free_pgtables()
The ceiling and tree search limit need to be different arguments for the future change in the failed fork attempt. The ceiling and floor variables are not very descriptive, so change them to pg_start/pg_end. Adding a new variable for the vma_end to the function as it will differ from the pg_end in the later patches in the series. Add a kernel doc about the free_pgtables() function. Test code also updated. No functional changes intended. Link: https://lkml.kernel.org/r/20260121164946.2093480-6-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Pedro Falcato <pfalcato@suse.de> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Chris Li <chrisl@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
23bd03a9a2
commit
eda8c5e776
5 changed files with 42 additions and 14 deletions
|
|
@ -510,8 +510,10 @@ void deactivate_file_folio(struct folio *folio);
|
|||
void folio_activate(struct folio *folio);
|
||||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
||||
struct vm_area_struct *start_vma, unsigned long floor,
|
||||
unsigned long ceiling, bool mm_wr_locked);
|
||||
struct vm_area_struct *vma, unsigned long pg_start,
|
||||
unsigned long pg_end, unsigned long vma_end,
|
||||
bool mm_wr_locked);
|
||||
|
||||
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
|
||||
|
||||
struct zap_details;
|
||||
|
|
|
|||
42
mm/memory.c
42
mm/memory.c
|
|
@ -370,23 +370,47 @@ void free_pgd_range(struct mmu_gather *tlb,
|
|||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* free_pgtables() - Free a range of page tables
|
||||
* @tlb: The mmu gather
|
||||
* @mas: The maple state
|
||||
* @vma: The first vma
|
||||
* @pg_start: The lowest page table address (floor)
|
||||
* @pg_end: The highest page table address (ceiling)
|
||||
* @vma_end: The highest vma tree search address
|
||||
* @mm_wr_locked: boolean indicating if the mm is write locked
|
||||
*
|
||||
* Note: pg_start and pg_end are provided to indicate the absolute range of the
|
||||
* page tables that should be removed. This can differ from the vma mappings on
|
||||
* some archs that may have mappings that need to be removed outside the vmas.
|
||||
* Note that the prev->vm_end and next->vm_start are often used.
|
||||
*
|
||||
* The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
|
||||
* unrelated data to the mm_struct being torn down.
|
||||
*/
|
||||
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
||||
struct vm_area_struct *vma, unsigned long floor,
|
||||
unsigned long ceiling, bool mm_wr_locked)
|
||||
struct vm_area_struct *vma, unsigned long pg_start,
|
||||
unsigned long pg_end, unsigned long vma_end,
|
||||
bool mm_wr_locked)
|
||||
{
|
||||
struct unlink_vma_file_batch vb;
|
||||
|
||||
/*
|
||||
* Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
|
||||
* may be 0. Underflow is expected in this case. Otherwise the
|
||||
* pagetable end is exclusive.
|
||||
* vma_end is exclusive.
|
||||
* The last vma address should never be larger than the pagetable end.
|
||||
*/
|
||||
WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
|
||||
|
||||
tlb_free_vmas(tlb);
|
||||
|
||||
do {
|
||||
unsigned long addr = vma->vm_start;
|
||||
struct vm_area_struct *next;
|
||||
|
||||
/*
|
||||
* Note: USER_PGTABLES_CEILING may be passed as ceiling and may
|
||||
* be 0. This will underflow and is okay.
|
||||
*/
|
||||
next = mas_find(mas, ceiling - 1);
|
||||
next = mas_find(mas, vma_end - 1);
|
||||
if (unlikely(xa_is_zero(next)))
|
||||
next = NULL;
|
||||
|
||||
|
|
@ -406,7 +430,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
|||
*/
|
||||
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
|
||||
vma = next;
|
||||
next = mas_find(mas, ceiling - 1);
|
||||
next = mas_find(mas, vma_end - 1);
|
||||
if (unlikely(xa_is_zero(next)))
|
||||
next = NULL;
|
||||
if (mm_wr_locked)
|
||||
|
|
@ -417,7 +441,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
|||
unlink_file_vma_batch_final(&vb);
|
||||
|
||||
free_pgd_range(tlb, addr, vma->vm_end,
|
||||
floor, next ? next->vm_start : ceiling);
|
||||
pg_start, next ? next->vm_start : pg_end);
|
||||
vma = next;
|
||||
} while (vma);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1308,7 +1308,7 @@ void exit_mmap(struct mm_struct *mm)
|
|||
mt_clear_in_rcu(&mm->mm_mt);
|
||||
vma_iter_set(&vmi, vma->vm_end);
|
||||
free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
|
||||
USER_PGTABLES_CEILING, true);
|
||||
USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
/*
|
||||
|
|
|
|||
3
mm/vma.c
3
mm/vma.c
|
|
@ -484,6 +484,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
|||
unmap_vmas(&tlb, mas, vma, vma_start, vma_end, vma_end);
|
||||
mas_set(mas, vma->vm_end);
|
||||
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
/* mm_wr_locked = */ true);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
|
@ -1275,7 +1276,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
|
|||
mas_set(mas_detach, 1);
|
||||
/* start and end may be different if there is no prev or next vma. */
|
||||
free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
|
||||
vms->unmap_end, mm_wr_locked);
|
||||
vms->unmap_end, vms->unmap_end, mm_wr_locked);
|
||||
tlb_finish_mmu(&tlb);
|
||||
vms->clear_ptes = false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1139,7 +1139,8 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
|
|||
|
||||
static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
||||
struct vm_area_struct *vma, unsigned long floor,
|
||||
unsigned long ceiling, bool mm_wr_locked)
|
||||
unsigned long ceiling, unsigned long tree_max,
|
||||
bool mm_wr_locked)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue