mm: page_owner: use new iteration API

The page_ext_next() function assumes that page extension objects for a
page order allocation always reside in the same memory section, which may
not be true and could lead to crashes.  Use the new page_ext iteration API
instead.

Link: https://lkml.kernel.org/r/93c80b040960fa2ebab4a9729073f77a30649862.1741301089.git.luizcap@redhat.com
Fixes: cf54f310d0 ("mm/hugetlb: use __GFP_COMP for gigantic folios")
Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Luiz Capitulino 2025-03-06 17:44:52 -05:00 committed by Andrew Morton
parent 4e30b94cda
commit 3a812bed3d

View file

@ -229,17 +229,19 @@ static void dec_stack_record_count(depot_stack_handle_t handle,
handle);
}
static inline void __update_page_owner_handle(struct page_ext *page_ext,
static inline void __update_page_owner_handle(struct page *page,
depot_stack_handle_t handle,
unsigned short order,
gfp_t gfp_mask,
short last_migrate_reason, u64 ts_nsec,
pid_t pid, pid_t tgid, char *comm)
{
int i;
struct page_ext_iter iter;
struct page_ext *page_ext;
struct page_owner *page_owner;
for (i = 0; i < (1 << order); i++) {
rcu_read_lock();
for_each_page_ext(page, 1 << order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
page_owner->handle = handle;
page_owner->order = order;
@ -252,20 +254,22 @@ static inline void __update_page_owner_handle(struct page_ext *page_ext,
sizeof(page_owner->comm));
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
page_ext = page_ext_next(page_ext);
}
rcu_read_unlock();
}
static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
static inline void __update_page_owner_free_handle(struct page *page,
depot_stack_handle_t handle,
unsigned short order,
pid_t pid, pid_t tgid,
u64 free_ts_nsec)
{
int i;
struct page_ext_iter iter;
struct page_ext *page_ext;
struct page_owner *page_owner;
for (i = 0; i < (1 << order); i++) {
rcu_read_lock();
for_each_page_ext(page, 1 << order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
/* Only __reset_page_owner() wants to clear the bit */
if (handle) {
@ -275,8 +279,8 @@ static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
page_owner->free_ts_nsec = free_ts_nsec;
page_owner->free_pid = current->pid;
page_owner->free_tgid = current->tgid;
page_ext = page_ext_next(page_ext);
}
rcu_read_unlock();
}
void __reset_page_owner(struct page *page, unsigned short order)
@ -293,11 +297,11 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
alloc_handle = page_owner->handle;
page_ext_put(page_ext);
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
__update_page_owner_free_handle(page_ext, handle, order, current->pid,
__update_page_owner_free_handle(page, handle, order, current->pid,
current->tgid, free_ts_nsec);
page_ext_put(page_ext);
if (alloc_handle != early_handle)
/*
@ -313,19 +317,13 @@ void __reset_page_owner(struct page *page, unsigned short order)
noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
struct page_ext *page_ext;
u64 ts_nsec = local_clock();
depot_stack_handle_t handle;
handle = save_stack(gfp_mask);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
__update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
__update_page_owner_handle(page, handle, order, gfp_mask, -1,
ts_nsec, current->pid, current->tgid,
current->comm);
page_ext_put(page_ext);
inc_stack_record_count(handle, gfp_mask, 1 << order);
}
@ -344,44 +342,42 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
void __split_page_owner(struct page *page, int old_order, int new_order)
{
int i;
struct page_ext *page_ext = page_ext_get(page);
struct page_ext_iter iter;
struct page_ext *page_ext;
struct page_owner *page_owner;
if (unlikely(!page_ext))
return;
for (i = 0; i < (1 << old_order); i++) {
rcu_read_lock();
for_each_page_ext(page, 1 << old_order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
page_owner->order = new_order;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
rcu_read_unlock();
}
void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{
int i;
struct page_ext *old_ext;
struct page_ext *new_ext;
struct page_ext *page_ext;
struct page_ext_iter iter;
struct page_owner *old_page_owner;
struct page_owner *new_page_owner;
depot_stack_handle_t migrate_handle;
old_ext = page_ext_get(&old->page);
if (unlikely(!old_ext))
page_ext = page_ext_get(&old->page);
if (unlikely(!page_ext))
return;
new_ext = page_ext_get(&newfolio->page);
if (unlikely(!new_ext)) {
page_ext_put(old_ext);
return;
}
old_page_owner = get_page_owner(page_ext);
page_ext_put(page_ext);
page_ext = page_ext_get(&newfolio->page);
if (unlikely(!page_ext))
return;
new_page_owner = get_page_owner(page_ext);
page_ext_put(page_ext);
old_page_owner = get_page_owner(old_ext);
new_page_owner = get_page_owner(new_ext);
migrate_handle = new_page_owner->handle;
__update_page_owner_handle(new_ext, old_page_owner->handle,
__update_page_owner_handle(&newfolio->page, old_page_owner->handle,
old_page_owner->order, old_page_owner->gfp_mask,
old_page_owner->last_migrate_reason,
old_page_owner->ts_nsec, old_page_owner->pid,
@ -391,7 +387,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
* will be freed after migration. Keep them until then as they may be
* useful.
*/
__update_page_owner_free_handle(new_ext, 0, old_page_owner->order,
__update_page_owner_free_handle(&newfolio->page, 0, old_page_owner->order,
old_page_owner->free_pid,
old_page_owner->free_tgid,
old_page_owner->free_ts_nsec);
@ -400,14 +396,12 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
* for the new one and the old folio otherwise there will be an imbalance
* when subtracting those pages from the stack.
*/
for (i = 0; i < (1 << new_page_owner->order); i++) {
rcu_read_lock();
for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) {
old_page_owner = get_page_owner(page_ext);
old_page_owner->handle = migrate_handle;
old_ext = page_ext_next(old_ext);
old_page_owner = get_page_owner(old_ext);
}
page_ext_put(new_ext);
page_ext_put(old_ext);
rcu_read_unlock();
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@ -813,7 +807,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
goto ext_put_continue;
/* Found early allocated page */
__update_page_owner_handle(page_ext, early_handle, 0, 0,
__update_page_owner_handle(page, early_handle, 0, 0,
-1, local_clock(), current->pid,
current->tgid, current->comm);
count++;