mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
mm, slab: clean up slab->obj_exts always
When memory allocation profiling is disabled at runtime or due to an
error, shutdown_mem_profiling() is called: slab->obj_exts which
previously allocated remains.
It won't be cleared by unaccount_slab() because of
mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process...
..
[...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
Fixes: 21c690a349 ("mm: introduce slabobj_ext to support slab object extensions")
Cc: stable@vger.kernel.org
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
d2f5819b6e
commit
be8250786c
1 changed files with 8 additions and 22 deletions
30
mm/slub.c
30
mm/slub.c
|
|
@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Should be called only if mem_alloc_profiling_enabled() */
|
||||
static noinline void free_slab_obj_exts(struct slab *slab)
|
||||
static inline void free_slab_obj_exts(struct slab *slab)
|
||||
{
|
||||
struct slabobj_ext *obj_exts;
|
||||
|
||||
|
|
@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab)
|
|||
slab->obj_exts = 0;
|
||||
}
|
||||
|
||||
static inline bool need_slab_obj_ext(void)
|
||||
{
|
||||
if (mem_alloc_profiling_enabled())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
|
||||
* inside memcg_slab_post_alloc_hook. No other users for now.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
static inline void init_slab_obj_exts(struct slab *slab)
|
||||
|
|
@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool need_slab_obj_ext(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
||||
|
|
@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
|
|||
static inline void
|
||||
alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
|
||||
{
|
||||
if (need_slab_obj_ext())
|
||||
if (mem_alloc_profiling_enabled())
|
||||
__alloc_tagging_slab_alloc_hook(s, object, flags);
|
||||
}
|
||||
|
||||
|
|
@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
|
|||
static __always_inline void unaccount_slab(struct slab *slab, int order,
|
||||
struct kmem_cache *s)
|
||||
{
|
||||
if (memcg_kmem_online() || need_slab_obj_ext())
|
||||
free_slab_obj_exts(slab);
|
||||
/*
|
||||
* The slab object extensions should now be freed regardless of
|
||||
* whether mem_alloc_profiling_enabled() or not because profiling
|
||||
* might have been disabled after slab->obj_exts got allocated.
|
||||
*/
|
||||
free_slab_obj_exts(slab);
|
||||
|
||||
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
|
||||
-(PAGE_SIZE << order));
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue