mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
binder: Store lru freelist in binder_alloc
Store a pointer to the free pages list that the binder allocator should use for a process inside of struct binder_alloc. This change allows binder allocator code to be tested and debugged deterministically while a system is using binder; i.e., without interfering with other binder processes and independently of the shrinker. This is necessary to convert the current binder_alloc_selftest into a kunit test that does not rely on hijacking an existing binder_proc to run. A binder process's binder_alloc->freelist should not be changed after it is initialized. A sole exception is the process that runs the existing binder_alloc selftest. Its freelist can be temporarily replaced for the duration of the test because it runs as a single thread before any pages can be added to the global binder freelist, and the test frees every page it allocates before dropping the binder_selftest_lock. This exception allows the existing selftest to be used to check for regressions, but it will be dropped when the binder_alloc tests are converted to kunit in a subsequent patch in this series. Signed-off-by: Tiffany Yang <ynaffit@google.com> Acked-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20250714185321.2417234-3-ynaffit@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
bea3e7bfa2
commit
4328a52642
3 changed files with 67 additions and 20 deletions
|
|
@ -26,7 +26,7 @@
|
|||
#include "binder_alloc.h"
|
||||
#include "binder_trace.h"
|
||||
|
||||
struct list_lru binder_freelist;
|
||||
static struct list_lru binder_freelist;
|
||||
|
||||
static DEFINE_MUTEX(binder_alloc_mmap_lock);
|
||||
|
||||
|
|
@ -206,7 +206,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
|
|||
|
||||
trace_binder_free_lru_start(alloc, index);
|
||||
|
||||
ret = list_lru_add(&binder_freelist,
|
||||
ret = list_lru_add(alloc->freelist,
|
||||
page_to_lru(page),
|
||||
page_to_nid(page),
|
||||
NULL);
|
||||
|
|
@ -405,7 +405,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
|
|||
if (page) {
|
||||
trace_binder_alloc_lru_start(alloc, index);
|
||||
|
||||
on_lru = list_lru_del(&binder_freelist,
|
||||
on_lru = list_lru_del(alloc->freelist,
|
||||
page_to_lru(page),
|
||||
page_to_nid(page),
|
||||
NULL);
|
||||
|
|
@ -1003,7 +1003,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
if (!page)
|
||||
continue;
|
||||
|
||||
on_lru = list_lru_del(&binder_freelist,
|
||||
on_lru = list_lru_del(alloc->freelist,
|
||||
page_to_lru(page),
|
||||
page_to_nid(page),
|
||||
NULL);
|
||||
|
|
@ -1223,6 +1223,17 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
|
||||
static struct shrinker *binder_shrinker;
|
||||
|
||||
static void __binder_alloc_init(struct binder_alloc *alloc,
|
||||
struct list_lru *freelist)
|
||||
{
|
||||
alloc->pid = current->group_leader->pid;
|
||||
alloc->mm = current->mm;
|
||||
mmgrab(alloc->mm);
|
||||
mutex_init(&alloc->mutex);
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
alloc->freelist = freelist;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_init() - called by binder_open() for per-proc initialization
|
||||
* @alloc: binder_alloc for this proc
|
||||
|
|
@ -1232,11 +1243,7 @@ static struct shrinker *binder_shrinker;
|
|||
*/
|
||||
void binder_alloc_init(struct binder_alloc *alloc)
|
||||
{
|
||||
alloc->pid = current->group_leader->pid;
|
||||
alloc->mm = current->mm;
|
||||
mmgrab(alloc->mm);
|
||||
mutex_init(&alloc->mutex);
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
__binder_alloc_init(alloc, &binder_freelist);
|
||||
}
|
||||
|
||||
int binder_alloc_shrinker_init(void)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/list_lru.h>
|
||||
#include <uapi/linux/android/binder.h>
|
||||
|
||||
extern struct list_lru binder_freelist;
|
||||
struct binder_transaction;
|
||||
|
||||
/**
|
||||
|
|
@ -91,6 +90,7 @@ static inline struct list_head *page_to_lru(struct page *p)
|
|||
* @free_async_space: VA space available for async buffers. This is
|
||||
* initialized at mmap time to 1/2 the full VA space
|
||||
* @pages: array of struct page *
|
||||
* @freelist: lru list to use for free pages (invariant after init)
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
* @pages_high: high watermark of offset in @pages
|
||||
|
|
@ -113,6 +113,7 @@ struct binder_alloc {
|
|||
struct rb_root allocated_buffers;
|
||||
size_t free_async_space;
|
||||
struct page **pages;
|
||||
struct list_lru *freelist;
|
||||
size_t buffer_size;
|
||||
int pid;
|
||||
size_t pages_high;
|
||||
|
|
|
|||
|
|
@ -8,8 +8,9 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include "binder_alloc.h"
|
||||
|
||||
#define BUFFER_NUM 5
|
||||
|
|
@ -18,6 +19,7 @@
|
|||
static bool binder_selftest_run = true;
|
||||
static int binder_selftest_failures;
|
||||
static DEFINE_MUTEX(binder_selftest_lock);
|
||||
static struct list_lru binder_selftest_freelist;
|
||||
|
||||
/**
|
||||
* enum buf_end_align_type - Page alignment of a buffer
|
||||
|
|
@ -142,11 +144,6 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
|
|||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
binder_alloc_free_buf(alloc, buffers[seq[i]]);
|
||||
|
||||
/**
|
||||
* Error message on a free page can be false positive
|
||||
* if binder shrinker ran during binder_alloc_free_buf
|
||||
* calls above.
|
||||
*/
|
||||
for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
|
||||
if (list_empty(page_to_lru(alloc->pages[i]))) {
|
||||
pr_err_size_seq(sizes, seq);
|
||||
|
|
@ -162,8 +159,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
|
|||
int i;
|
||||
unsigned long count;
|
||||
|
||||
while ((count = list_lru_count(&binder_freelist))) {
|
||||
list_lru_walk(&binder_freelist, binder_alloc_free_page,
|
||||
while ((count = list_lru_count(&binder_selftest_freelist))) {
|
||||
list_lru_walk(&binder_selftest_freelist, binder_alloc_free_page,
|
||||
NULL, count);
|
||||
}
|
||||
|
||||
|
|
@ -187,7 +184,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
|
|||
|
||||
/* Allocate from lru. */
|
||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||
if (list_lru_count(&binder_freelist))
|
||||
if (list_lru_count(&binder_selftest_freelist))
|
||||
pr_err("lru list should be empty but is not\n");
|
||||
|
||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||
|
|
@ -275,6 +272,20 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
|
|||
}
|
||||
}
|
||||
|
||||
int binder_selftest_alloc_get_page_count(struct binder_alloc *alloc)
|
||||
{
|
||||
struct page *page;
|
||||
int allocated = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = alloc->pages[i];
|
||||
if (page)
|
||||
allocated++;
|
||||
}
|
||||
return allocated;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_selftest_alloc() - Test alloc and free of buffer pages.
|
||||
* @alloc: Pointer to alloc struct.
|
||||
|
|
@ -286,6 +297,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
|
|||
*/
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||
{
|
||||
struct list_lru *prev_freelist;
|
||||
size_t end_offset[BUFFER_NUM];
|
||||
|
||||
if (!binder_selftest_run)
|
||||
|
|
@ -293,14 +305,41 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
|
|||
mutex_lock(&binder_selftest_lock);
|
||||
if (!binder_selftest_run || !alloc->mapped)
|
||||
goto done;
|
||||
|
||||
prev_freelist = alloc->freelist;
|
||||
|
||||
/*
|
||||
* It is not safe to modify this process's alloc->freelist if it has any
|
||||
* pages on a freelist. Since the test runs before any binder ioctls can
|
||||
* be dealt with, none of its pages should be allocated yet.
|
||||
*/
|
||||
if (binder_selftest_alloc_get_page_count(alloc)) {
|
||||
pr_err("process has existing alloc state\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (list_lru_init(&binder_selftest_freelist)) {
|
||||
pr_err("failed to init test freelist\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
alloc->freelist = &binder_selftest_freelist;
|
||||
|
||||
pr_info("STARTED\n");
|
||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||
binder_selftest_run = false;
|
||||
if (binder_selftest_failures > 0)
|
||||
pr_info("%d tests FAILED\n", binder_selftest_failures);
|
||||
else
|
||||
pr_info("PASSED\n");
|
||||
|
||||
if (list_lru_count(&binder_selftest_freelist))
|
||||
pr_err("expect test freelist to be empty\n");
|
||||
|
||||
cleanup:
|
||||
/* Even if we didn't run the test, it's no longer thread-safe. */
|
||||
binder_selftest_run = false;
|
||||
alloc->freelist = prev_freelist;
|
||||
list_lru_destroy(&binder_selftest_freelist);
|
||||
done:
|
||||
mutex_unlock(&binder_selftest_lock);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue