mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
zsmalloc: make common caches global
Currently, zsmalloc creates kmem_cache of handles and zspages for each pool, which may be suboptimal from the memory usage point of view (extra internal fragmentation per pool). Systems that create multiple zsmalloc pools may benefit from shared common zsmalloc caches. Make handles and zspages kmem caches global. The memory savings depend on particular setup and data patterns and can be found via slabinfo. Link: https://lkml.kernel.org/r/20260117025406.799428-1-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Nhat Pham <nphamcs@gmail.com> Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev> Cc: Brian Geffon <bgeffon@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ef24e0aa07
commit
3881b00a2c
1 changed files with 51 additions and 57 deletions
108
mm/zsmalloc.c
108
mm/zsmalloc.c
|
|
@ -193,12 +193,13 @@ struct link_free {
|
|||
};
|
||||
};
|
||||
|
||||
static struct kmem_cache *handle_cachep;
|
||||
static struct kmem_cache *zspage_cachep;
|
||||
|
||||
struct zs_pool {
|
||||
const char *name;
|
||||
|
||||
struct size_class *size_class[ZS_SIZE_CLASSES];
|
||||
struct kmem_cache *handle_cachep;
|
||||
struct kmem_cache *zspage_cachep;
|
||||
|
||||
atomic_long_t pages_allocated;
|
||||
|
||||
|
|
@ -371,60 +372,28 @@ static void init_deferred_free(struct zs_pool *pool) {}
|
|||
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
|
||||
#endif
|
||||
|
||||
static int create_cache(struct zs_pool *pool)
|
||||
static unsigned long cache_alloc_handle(gfp_t gfp)
|
||||
{
|
||||
char *name;
|
||||
gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
|
||||
|
||||
name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
|
||||
0, 0, NULL);
|
||||
kfree(name);
|
||||
if (!pool->handle_cachep)
|
||||
return -EINVAL;
|
||||
|
||||
name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
|
||||
0, 0, NULL);
|
||||
kfree(name);
|
||||
if (!pool->zspage_cachep) {
|
||||
kmem_cache_destroy(pool->handle_cachep);
|
||||
pool->handle_cachep = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (unsigned long)kmem_cache_alloc(handle_cachep, gfp);
|
||||
}
|
||||
|
||||
static void destroy_cache(struct zs_pool *pool)
|
||||
static void cache_free_handle(unsigned long handle)
|
||||
{
|
||||
kmem_cache_destroy(pool->handle_cachep);
|
||||
kmem_cache_destroy(pool->zspage_cachep);
|
||||
kmem_cache_free(handle_cachep, (void *)handle);
|
||||
}
|
||||
|
||||
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
|
||||
static struct zspage *cache_alloc_zspage(gfp_t gfp)
|
||||
{
|
||||
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
|
||||
gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
||||
gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
|
||||
|
||||
return kmem_cache_zalloc(zspage_cachep, gfp);
|
||||
}
|
||||
|
||||
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
|
||||
static void cache_free_zspage(struct zspage *zspage)
|
||||
{
|
||||
kmem_cache_free(pool->handle_cachep, (void *)handle);
|
||||
}
|
||||
|
||||
static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
|
||||
{
|
||||
return kmem_cache_zalloc(pool->zspage_cachep,
|
||||
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
||||
}
|
||||
|
||||
static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
|
||||
{
|
||||
kmem_cache_free(pool->zspage_cachep, zspage);
|
||||
kmem_cache_free(zspage_cachep, zspage);
|
||||
}
|
||||
|
||||
/* class->lock(which owns the handle) synchronizes races */
|
||||
|
|
@ -853,7 +822,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
|
|||
zpdesc = next;
|
||||
} while (zpdesc != NULL);
|
||||
|
||||
cache_free_zspage(pool, zspage);
|
||||
cache_free_zspage(zspage);
|
||||
|
||||
class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
|
||||
atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
|
||||
|
|
@ -966,7 +935,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
|
|||
{
|
||||
int i;
|
||||
struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
|
||||
struct zspage *zspage = cache_alloc_zspage(pool, gfp);
|
||||
struct zspage *zspage = cache_alloc_zspage(gfp);
|
||||
|
||||
if (!zspage)
|
||||
return NULL;
|
||||
|
|
@ -988,7 +957,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
|
|||
zpdesc_dec_zone_page_state(zpdescs[i]);
|
||||
free_zpdesc(zpdescs[i]);
|
||||
}
|
||||
cache_free_zspage(pool, zspage);
|
||||
cache_free_zspage(zspage);
|
||||
return NULL;
|
||||
}
|
||||
__zpdesc_set_zsmalloc(zpdesc);
|
||||
|
|
@ -1339,7 +1308,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
|
|||
if (unlikely(size > ZS_MAX_ALLOC_SIZE))
|
||||
return (unsigned long)ERR_PTR(-ENOSPC);
|
||||
|
||||
handle = cache_alloc_handle(pool, gfp);
|
||||
handle = cache_alloc_handle(gfp);
|
||||
if (!handle)
|
||||
return (unsigned long)ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
@ -1363,7 +1332,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
|
|||
|
||||
zspage = alloc_zspage(pool, class, gfp, nid);
|
||||
if (!zspage) {
|
||||
cache_free_handle(pool, handle);
|
||||
cache_free_handle(handle);
|
||||
return (unsigned long)ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
|
@ -1443,7 +1412,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
|||
free_zspage(pool, class, zspage);
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
cache_free_handle(pool, handle);
|
||||
cache_free_handle(handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_free);
|
||||
|
||||
|
|
@ -2105,9 +2074,6 @@ struct zs_pool *zs_create_pool(const char *name)
|
|||
if (!pool->name)
|
||||
goto err;
|
||||
|
||||
if (create_cache(pool))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Iterate reversely, because, size of size_class that we want to use
|
||||
* for merging should be larger or equal to current size.
|
||||
|
|
@ -2229,20 +2195,47 @@ void zs_destroy_pool(struct zs_pool *pool)
|
|||
kfree(class);
|
||||
}
|
||||
|
||||
destroy_cache(pool);
|
||||
kfree(pool->name);
|
||||
kfree(pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_destroy_pool);
|
||||
|
||||
static void zs_destroy_caches(void)
|
||||
{
|
||||
kmem_cache_destroy(handle_cachep);
|
||||
handle_cachep = NULL;
|
||||
kmem_cache_destroy(zspage_cachep);
|
||||
zspage_cachep = NULL;
|
||||
}
|
||||
|
||||
static int __init zs_init_caches(void)
|
||||
{
|
||||
handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
|
||||
0, 0, NULL);
|
||||
zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
|
||||
0, 0, NULL);
|
||||
|
||||
if (!handle_cachep || !zspage_cachep) {
|
||||
zs_destroy_caches();
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init zs_init(void)
|
||||
{
|
||||
int rc __maybe_unused;
|
||||
int rc;
|
||||
|
||||
rc = zs_init_caches();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
zs_destroy_caches();
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
zs_stat_init();
|
||||
return 0;
|
||||
|
|
@ -2254,6 +2247,7 @@ static void __exit zs_exit(void)
|
|||
set_movable_ops(NULL, PGTY_zsmalloc);
|
||||
#endif
|
||||
zs_stat_exit();
|
||||
zs_destroy_caches();
|
||||
}
|
||||
|
||||
module_init(zs_init);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue