mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc

Now that all users of get/set_first_obj_offset() are converted to use
zpdesc, convert them to take zpdesc.

Link: https://lkml.kernel.org/r/20241216150450.1228021-18-42.hyeyoo@gmail.com
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alex Shi 2024-12-17 00:04:48 +09:00 committed by Andrew Morton
parent 74999813c0
commit fc5eec0d8c

View file

@ -478,20 +478,20 @@ static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc)
{
VM_WARN_ON_ONCE(!PageZsmalloc(page));
return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset)
{
/* With 24 bits available, we can support offsets into 16 MiB pages. */
BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
VM_WARN_ON_ONCE(!PageZsmalloc(page));
VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK;
zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline unsigned int get_freeobj(struct zspage *zspage)
@ -911,7 +911,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
struct link_free *link;
void *vaddr;
set_first_obj_offset(zpdesc_page(zpdesc), off);
set_first_obj_offset(zpdesc, off);
vaddr = kmap_local_zpdesc(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
@ -1555,7 +1555,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
unsigned long handle = 0;
void *addr = kmap_local_zpdesc(zpdesc);
offset = get_first_obj_offset(zpdesc_page(zpdesc));
offset = get_first_obj_offset(zpdesc);
offset += class->size * index;
while (offset < PAGE_SIZE) {
@ -1750,8 +1750,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
create_page_chain(class, zspage, zpdescs);
first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc));
set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset);
first_obj_offset = get_first_obj_offset(oldzpdesc);
set_first_obj_offset(newzpdesc, first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
newzpdesc->handle = oldzpdesc->handle;
__zpdesc_set_movable(newzpdesc, &zsmalloc_mops);
@ -1806,7 +1806,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
offset = get_first_obj_offset(zpdesc_page(zpdesc));
offset = get_first_obj_offset(zpdesc);
s_addr = kmap_local_zpdesc(zpdesc);
/*