mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
mm: introduce unmap_desc struct to reduce function arguments
The unmap_region code uses a number of arguments that could use better documentation. With the addition of a descriptor for unmap (called unmap_desc), the arguments can be more self-documenting and increase the descriptions within the declaration. No functional change intended Link: https://lkml.kernel.org/r/20260121164946.2093480-9-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Pedro Falcato <pfalcato@suse.de> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Chris Li <chrisl@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
43873af772
commit
5b6626a76a
3 changed files with 51 additions and 23 deletions
14
mm/mmap.c
14
mm/mmap.c
|
|
@ -1876,11 +1876,17 @@ loop_out:
|
|||
if (end) {
|
||||
vma_iter_set(&vmi, 0);
|
||||
tmp = vma_next(&vmi);
|
||||
UNMAP_STATE(unmap, &vmi, /* first = */ tmp,
|
||||
/* vma_start = */ 0, /* vma_end = */ end,
|
||||
/* prev = */ NULL, /* next = */ NULL);
|
||||
|
||||
/*
|
||||
* Don't iterate over vmas beyond the failure point for
|
||||
* both unmap_vma() and free_pgtables().
|
||||
*/
|
||||
unmap.tree_end = end;
|
||||
flush_cache_mm(mm);
|
||||
unmap_region(&vmi.mas, /* vma = */ tmp,
|
||||
/* vma_start = */ 0, /* vma_end = */ end,
|
||||
/* pg_end = */ end, /* prev = */ NULL,
|
||||
/* next = */ NULL);
|
||||
unmap_region(&unmap);
|
||||
charge = tear_down_vmas(mm, &vmi, tmp, end);
|
||||
vm_unacct_memory(charge);
|
||||
}
|
||||
|
|
|
|||
25
mm/vma.c
25
mm/vma.c
|
|
@ -472,21 +472,19 @@ void remove_vma(struct vm_area_struct *vma)
|
|||
*
|
||||
* Called with the mm semaphore held.
|
||||
*/
|
||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
unsigned long vma_start, unsigned long vma_end,
|
||||
unsigned long pg_max, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next)
|
||||
void unmap_region(struct unmap_desc *unmap)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct mm_struct *mm = unmap->first->vm_mm;
|
||||
struct ma_state *mas = unmap->mas;
|
||||
struct mmu_gather tlb;
|
||||
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, mas, vma, vma_start, vma_end, vma_end);
|
||||
mas_set(mas, vma->vm_end);
|
||||
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
pg_max, next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
/* mm_wr_locked = */ true);
|
||||
unmap_vmas(&tlb, mas, unmap->first, unmap->vma_start, unmap->vma_end,
|
||||
unmap->vma_end);
|
||||
mas_set(mas, unmap->tree_reset);
|
||||
free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
|
||||
unmap->tree_end, unmap->mm_wr_locked);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
|
|
@ -2463,15 +2461,14 @@ static int __mmap_new_file_vma(struct mmap_state *map,
|
|||
|
||||
error = mmap_file(vma->vm_file, vma);
|
||||
if (error) {
|
||||
UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end,
|
||||
map->prev, map->next);
|
||||
fput(vma->vm_file);
|
||||
vma->vm_file = NULL;
|
||||
|
||||
vma_iter_set(vmi, vma->vm_end);
|
||||
/* Undo any partial mapping done by a device driver. */
|
||||
unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
|
||||
map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
|
||||
map->prev, map->next);
|
||||
|
||||
unmap_region(&unmap);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
|||
35
mm/vma.h
35
mm/vma.h
|
|
@ -155,6 +155,35 @@ struct vma_merge_struct {
|
|||
|
||||
};
|
||||
|
||||
struct unmap_desc {
|
||||
struct ma_state *mas; /* the maple state point to the first vma */
|
||||
struct vm_area_struct *first; /* The first vma */
|
||||
unsigned long pg_start; /* The first pagetable address to free (floor) */
|
||||
unsigned long pg_end; /* The last pagetable address to free (ceiling) */
|
||||
unsigned long vma_start; /* The min vma address */
|
||||
unsigned long vma_end; /* The max vma address */
|
||||
unsigned long tree_end; /* Maximum for the vma tree search */
|
||||
unsigned long tree_reset; /* Where to reset the vma tree walk */
|
||||
bool mm_wr_locked; /* If the mmap write lock is held */
|
||||
};
|
||||
|
||||
#define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \
|
||||
struct unmap_desc name = { \
|
||||
.mas = &(_vmi)->mas, \
|
||||
.first = _vma, \
|
||||
.pg_start = _prev ? ((struct vm_area_struct *)_prev)->vm_end : \
|
||||
FIRST_USER_ADDRESS, \
|
||||
.pg_end = _next ? ((struct vm_area_struct *)_next)->vm_start : \
|
||||
USER_PGTABLES_CEILING, \
|
||||
.vma_start = _vma_start, \
|
||||
.vma_end = _vma_end, \
|
||||
.tree_end = _next ? \
|
||||
((struct vm_area_struct *)_next)->vm_start : \
|
||||
USER_PGTABLES_CEILING, \
|
||||
.tree_reset = _vma->vm_end, \
|
||||
.mm_wr_locked = true, \
|
||||
}
|
||||
|
||||
static inline bool vmg_nomem(struct vma_merge_struct *vmg)
|
||||
{
|
||||
return vmg->state == VMA_MERGE_ERROR_NOMEM;
|
||||
|
|
@ -262,11 +291,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
|
|||
bool unlock);
|
||||
|
||||
void remove_vma(struct vm_area_struct *vma);
|
||||
|
||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
unsigned long vma_start, unsigned long vma_end,
|
||||
unsigned long pg_max, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next);
|
||||
void unmap_region(struct unmap_desc *unmap);
|
||||
|
||||
/**
|
||||
* vma_modify_flags() - Perform any necessary split/merge in preparation for
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue