mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
mm/zsmalloc: convert obj_allocated() and related helpers to use zpdesc
Convert obj_allocated(), and related helpers to take zpdesc. Also make its callers to cast (struct page *) to (struct zpdesc *) when calling them. The users will be converted gradually as there are many. Link: https://lkml.kernel.org/r/20241216150450.1228021-8-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi <alexs@kernel.org> Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org> Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7d2e1a6950
commit
76fb5d9981
1 changed files with 10 additions and 10 deletions
|
|
@ -823,15 +823,15 @@ static unsigned long handle_to_obj(unsigned long handle)
|
|||
return *(unsigned long *)handle;
|
||||
}
|
||||
|
||||
static inline bool obj_allocated(struct page *page, void *obj,
|
||||
static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
|
||||
unsigned long *phandle)
|
||||
{
|
||||
unsigned long handle;
|
||||
struct zspage *zspage = get_zspage(page);
|
||||
struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
|
||||
|
||||
if (unlikely(ZsHugePage(zspage))) {
|
||||
VM_BUG_ON_PAGE(!is_first_page(page), page);
|
||||
handle = page->index;
|
||||
VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc));
|
||||
handle = zpdesc->handle;
|
||||
} else
|
||||
handle = *(unsigned long *)obj;
|
||||
|
||||
|
|
@ -1569,18 +1569,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
|
|||
* return handle.
|
||||
*/
|
||||
static unsigned long find_alloced_obj(struct size_class *class,
|
||||
struct page *page, int *obj_idx)
|
||||
struct zpdesc *zpdesc, int *obj_idx)
|
||||
{
|
||||
unsigned int offset;
|
||||
int index = *obj_idx;
|
||||
unsigned long handle = 0;
|
||||
void *addr = kmap_local_page(page);
|
||||
void *addr = kmap_local_zpdesc(zpdesc);
|
||||
|
||||
offset = get_first_obj_offset(page);
|
||||
offset = get_first_obj_offset(zpdesc_page(zpdesc));
|
||||
offset += class->size * index;
|
||||
|
||||
while (offset < PAGE_SIZE) {
|
||||
if (obj_allocated(page, addr + offset, &handle))
|
||||
if (obj_allocated(zpdesc, addr + offset, &handle))
|
||||
break;
|
||||
|
||||
offset += class->size;
|
||||
|
|
@ -1604,7 +1604,7 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
|
|||
struct size_class *class = pool->size_class[src_zspage->class];
|
||||
|
||||
while (1) {
|
||||
handle = find_alloced_obj(class, s_page, &obj_idx);
|
||||
handle = find_alloced_obj(class, page_zpdesc(s_page), &obj_idx);
|
||||
if (!handle) {
|
||||
s_page = get_next_page(s_page);
|
||||
if (!s_page)
|
||||
|
|
@ -1837,7 +1837,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
|||
|
||||
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
|
||||
addr += class->size) {
|
||||
if (obj_allocated(page, addr, &handle)) {
|
||||
if (obj_allocated(page_zpdesc(page), addr, &handle)) {
|
||||
|
||||
old_obj = handle_to_obj(handle);
|
||||
obj_to_location(old_obj, &dummy, &obj_idx);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue