mm/zsmalloc: convert init_zspage() to use zpdesc

Replace get_first/next_page func series and kmap_atomic to new helper, no
functional change.

Link: https://lkml.kernel.org/r/20241216150450.1228021-9-42.hyeyoo@gmail.com
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Hyeonggon Yoo 2024-12-17 00:04:39 +09:00 committed by Andrew Morton
parent 76fb5d9981
commit acaf41841e

View file

@ -925,16 +925,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
{
unsigned int freeobj = 1;
unsigned long off = 0;
struct page *page = get_first_page(zspage);
struct zpdesc *zpdesc = get_first_zpdesc(zspage);
while (page) {
struct page *next_page;
while (zpdesc) {
struct zpdesc *next_zpdesc;
struct link_free *link;
void *vaddr;
set_first_obj_offset(page, off);
set_first_obj_offset(zpdesc_page(zpdesc), off);
vaddr = kmap_local_page(page);
vaddr = kmap_local_zpdesc(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
@ -947,8 +947,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
* page, which must point to the first object on the next
* page (if present)
*/
next_page = get_next_page(page);
if (next_page) {
next_zpdesc = get_next_zpdesc(zpdesc);
if (next_zpdesc) {
link->next = freeobj++ << OBJ_TAG_BITS;
} else {
/*
@ -958,7 +958,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
link->next = -1UL << OBJ_TAG_BITS;
}
kunmap_local(vaddr);
page = next_page;
zpdesc = next_zpdesc;
off %= PAGE_SIZE;
}