mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
binder: use per-vma lock in page installation
Use per-vma locking for concurrent page installations, this minimizes contention with unrelated vmas improving performance. The mmap_lock is still acquired when needed though, e.g. before get_user_pages_remote(). Many thanks to Barry Song who posted a similar approach [1]. Link: https://lore.kernel.org/all/20240902225009.34576-1-21cnbao@gmail.com/ [1] Cc: Nhat Pham <nphamcs@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Barry Song <v-songbaohua@oppo.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Hillf Danton <hdanton@sina.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20241210143114.661252-8-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
0a7bf6866d
commit
9e2aa76549
1 changed files with 50 additions and 17 deletions
|
|
@ -233,6 +233,53 @@ static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
|
|||
return smp_load_acquire(&alloc->mapped);
|
||||
}
|
||||
|
||||
static struct page *binder_page_lookup(struct binder_alloc *alloc,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct mm_struct *mm = alloc->mm;
|
||||
struct page *page;
|
||||
long npages = 0;
|
||||
|
||||
/*
|
||||
* Find an existing page in the remote mm. If missing,
|
||||
* don't attempt to fault-in just propagate an error.
|
||||
*/
|
||||
mmap_read_lock(mm);
|
||||
if (binder_alloc_is_mapped(alloc))
|
||||
npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
|
||||
&page, NULL);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
return npages > 0 ? page : NULL;
|
||||
}
|
||||
|
||||
static int binder_page_insert(struct binder_alloc *alloc,
|
||||
unsigned long addr,
|
||||
struct page *page)
|
||||
{
|
||||
struct mm_struct *mm = alloc->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int ret = -ESRCH;
|
||||
|
||||
/* attempt per-vma lock first */
|
||||
vma = lock_vma_under_rcu(mm, addr);
|
||||
if (vma) {
|
||||
if (binder_alloc_is_mapped(alloc))
|
||||
ret = vm_insert_page(vma, addr, page);
|
||||
vma_end_read(vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* fall back to mmap_lock */
|
||||
mmap_read_lock(mm);
|
||||
vma = vma_lookup(mm, addr);
|
||||
if (vma && binder_alloc_is_mapped(alloc))
|
||||
ret = vm_insert_page(vma, addr, page);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct page *binder_page_alloc(struct binder_alloc *alloc,
|
||||
unsigned long index)
|
||||
{
|
||||
|
|
@ -268,9 +315,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
|||
unsigned long index,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct page *page;
|
||||
long npages;
|
||||
int ret;
|
||||
|
||||
if (!mmget_not_zero(alloc->mm))
|
||||
|
|
@ -282,16 +327,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
|||
goto out;
|
||||
}
|
||||
|
||||
mmap_read_lock(alloc->mm);
|
||||
vma = vma_lookup(alloc->mm, addr);
|
||||
if (!vma || !binder_alloc_is_mapped(alloc)) {
|
||||
binder_free_page(page);
|
||||
pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
|
||||
ret = -ESRCH;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = vm_insert_page(vma, addr, page);
|
||||
ret = binder_page_insert(alloc, addr, page);
|
||||
switch (ret) {
|
||||
case -EBUSY:
|
||||
/*
|
||||
|
|
@ -301,9 +337,8 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
|||
*/
|
||||
ret = 0;
|
||||
binder_free_page(page);
|
||||
npages = get_user_pages_remote(alloc->mm, addr, 1,
|
||||
FOLL_NOFAULT, &page, NULL);
|
||||
if (npages <= 0) {
|
||||
page = binder_page_lookup(alloc, addr);
|
||||
if (!page) {
|
||||
pr_err("%d: failed to find page at offset %lx\n",
|
||||
alloc->pid, addr - alloc->vm_start);
|
||||
ret = -ESRCH;
|
||||
|
|
@ -321,8 +356,6 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
|||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
unlock:
|
||||
mmap_read_unlock(alloc->mm);
|
||||
out:
|
||||
mmput_async(alloc->mm);
|
||||
return ret;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue