From 48647d3f9a644d1e81af6558102d43cdb260597b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 11 Feb 2026 10:42:30 +0100 Subject: [PATCH 1/3] slab: distinguish lock and trylock for sheaf_flush_main() sheaf_flush_main() can be called from __pcs_replace_full_main() where it's fine if the trylock fails, and pcs_flush_all() where it's not expected to and for some flush callers (when destroying the cache or memory hotremove) it would be actually a problem if it failed and left the main sheaf not flushed. The flush callers can however safely use local_lock() instead of trylock. The trylock failure should not happen in practice on !PREEMPT_RT, but can happen on PREEMPT_RT. The impact is limited in practice because when a trylock fails in the kmem_cache_destroy() path, it means someone is using the cache while destroying it, which is a bug on its own. The memory hotremove path is unlikely to be employed in a production RT config, but it's possible. To fix this, split the function into sheaf_flush_main() (using local_lock()) and sheaf_try_flush_main() (using local_trylock()) where both call __sheaf_flush_main_batch() to flush a single batch of objects. This will also allow lockdep to verify our context assumptions. The problem was raised in an off-list question by Marcelo. Fixes: 2d517aa09bbc ("slab: add opt-in caching layer of percpu sheaves") Cc: stable@vger.kernel.org Reported-by: Marcelo Tosatti Signed-off-by: Vlastimil Babka Reviewed-by: Harry Yoo Reviewed-by: Hao Li Link: https://patch.msgid.link/20260211-b4-sheaf-flush-v1-1-4e7f492f0055@suse.cz Signed-off-by: Vlastimil Babka (SUSE) --- mm/slub.c | 47 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 0c906fefc31b..b1e9f16ba435 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2858,19 +2858,19 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); * object pointers are moved to a on-stack array under the lock. To bound the * stack usage, limit each batch to PCS_BATCH_MAX. * - * returns true if at least partially flushed + * Must be called with s->cpu_sheaves->lock locked, returns with the lock + * unlocked. + * + * Returns how many objects are remaining to be flushed */ -static bool sheaf_flush_main(struct kmem_cache *s) +static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s) { struct slub_percpu_sheaves *pcs; unsigned int batch, remaining; void *objects[PCS_BATCH_MAX]; struct slab_sheaf *sheaf; - bool ret = false; -next_batch: - if (!local_trylock(&s->cpu_sheaves->lock)) - return ret; + lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); pcs = this_cpu_ptr(s->cpu_sheaves); sheaf = pcs->main; @@ -2888,10 +2888,37 @@ next_batch: stat_add(s, SHEAF_FLUSH, batch); - ret = true; + return remaining; +} - if (remaining) - goto next_batch; +static void sheaf_flush_main(struct kmem_cache *s) +{ + unsigned int remaining; + + do { + local_lock(&s->cpu_sheaves->lock); + + remaining = __sheaf_flush_main_batch(s); + + } while (remaining); +} + +/* + * Returns true if the main sheaf was at least partially flushed. + */ +static bool sheaf_try_flush_main(struct kmem_cache *s) +{ + unsigned int remaining; + bool ret = false; + + do { + if (!local_trylock(&s->cpu_sheaves->lock)) + return ret; + + ret = true; + remaining = __sheaf_flush_main_batch(s); + + } while (remaining); return ret; } @@ -5704,7 +5731,7 @@ alloc_empty: if (put_fail) stat(s, BARN_PUT_FAIL); - if (!sheaf_flush_main(s)) + if (!sheaf_try_flush_main(s)) return NULL; if (!local_trylock(&s->cpu_sheaves->lock)) From fb1091febd668398aa84c161b8d9a1834321e021 Mon Sep 17 00:00:00 2001 From: "Vlastimil Babka (SUSE)" Date: Mon, 2 Mar 2026 10:55:37 +0100 Subject: [PATCH 2/3] mm/slab: allow sheaf refill if blocking is not allowed Ming Lei reported [1] a regression in the ublk null target benchmark due to sheaves. The profile shows that the alloc_from_pcs() fastpath fails and allocations fall back to ___slab_alloc(). It also shows the allocations happen through mempool_alloc(). The strategy of mempool_alloc() is to call the underlying allocator (here slab) without __GFP_DIRECT_RECLAIM first. This does not play well with __pcs_replace_empty_main() checking for gfpflags_allow_blocking() to decide if it should refill an empty sheaf or fallback to the slowpath, so we end up falling back. We could change the mempool strategy but there might be other paths doing the same ting. So instead allow sheaf refill when blocking is not allowed, changing the condition to gfpflags_allow_spinning(). The original condition was unnecessarily restrictive. Note this doesn't fully resolve the regression [1] as another component of that are memoryless nodes, which is to be addressed separately. Reported-by: Ming Lei Fixes: e47c897a2949 ("slab: add sheaves to most caches") Link: https://lore.kernel.org/all/aZ0SbIqaIkwoW2mB@fedora/ [1] Link: https://patch.msgid.link/20260302095536.34062-2-vbabka@kernel.org Signed-off-by: Vlastimil Babka (SUSE) --- mm/slub.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index b1e9f16ba435..20cb4f3b636d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4567,7 +4567,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty = NULL; struct slab_sheaf *full; struct node_barn *barn; - bool can_alloc; + bool allow_spin; lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); @@ -4588,8 +4588,9 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, return NULL; } - full = barn_replace_empty_sheaf(barn, pcs->main, - gfpflags_allow_spinning(gfp)); + allow_spin = gfpflags_allow_spinning(gfp); + + full = barn_replace_empty_sheaf(barn, pcs->main, allow_spin); if (full) { stat(s, BARN_GET); @@ -4599,9 +4600,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, stat(s, BARN_GET_FAIL); - can_alloc = gfpflags_allow_blocking(gfp); - - if (can_alloc) { + if (allow_spin) { if (pcs->spare) { empty = pcs->spare; pcs->spare = NULL; @@ -4611,8 +4610,9 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, } local_unlock(&s->cpu_sheaves->lock); + pcs = NULL; - if (!can_alloc) + if (!allow_spin) return NULL; if (empty) { @@ -4632,11 +4632,8 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, if (!full) return NULL; - /* - * we can reach here only when gfpflags_allow_blocking - * so this must not be an irq - */ - local_lock(&s->cpu_sheaves->lock); + if (!local_trylock(&s->cpu_sheaves->lock)) + goto barn_put; pcs = this_cpu_ptr(s->cpu_sheaves); /* @@ -4667,6 +4664,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, return pcs; } +barn_put: barn_put_full_sheaf(barn, full); stat(s, BARN_PUT); From 6432f15c818cb30eec7c4ca378ecdebd9796f741 Mon Sep 17 00:00:00 2001 From: Harry Yoo Date: Tue, 3 Mar 2026 22:57:22 +0900 Subject: [PATCH 3/3] mm/slab: change stride type from unsigned short to unsigned int Commit 7a8e71bc619d ("mm/slab: use stride to access slabobj_ext") defined the type of slab->stride as unsigned short, because the author initially planned to store stride within the lower 16 bits of the page_type field, but later stored it in unused bits in the counters field instead. However, the idea of having only 2-byte stride turned out to be a serious mistake. On systems with 64k pages, order-1 pages are 128k, which is larger than USHRT_MAX. It triggers a debug warning because s->size is 128k while stride, truncated to 2 bytes, becomes zero: ------------[ cut here ]------------ Warning! stride (0) != s->size (131072) WARNING: mm/slub.c:2231 at alloc_slab_obj_exts_early.constprop.0+0x524/0x534, CPU#6: systemd-sysctl/307 Modules linked in: CPU: 6 UID: 0 PID: 307 Comm: systemd-sysctl Not tainted 7.0.0-rc1+ #6 PREEMPTLAZY Hardware name: IBM,9009-22A POWER9 (architected) 0x4e0202 0xf000005 of:IBM,FW950.E0 (VL950_179) hv:phyp pSeries NIP: c0000000008a9ac0 LR: c0000000008a9abc CTR: 0000000000000000 REGS: c0000000141f7390 TRAP: 0700 Not tainted (7.0.0-rc1+) MSR: 8000000000029033 CR: 28004400 XER: 00000005 CFAR: c000000000279318 IRQMASK: 0 GPR00: c0000000008a9abc c0000000141f7630 c00000000252a300 c00000001427b200 GPR04: 0000000000000004 0000000000000000 c000000000278fd0 0000000000000000 GPR08: fffffffffffe0000 0000000000000000 0000000000000000 0000000022004400 GPR12: c000000000f644b0 c000000017ff8f00 0000000000000000 0000000000000000 GPR16: 0000000000000000 c0000000141f7aa0 0000000000000000 c0000000141f7a88 GPR20: 0000000000000000 0000000000400cc0 ffffffffffffffff c00000001427b180 GPR24: 0000000000000004 00000000000c0cc0 c000000004e89a20 c00000005de90011 GPR28: 0000000000010010 c00000005df00000 c000000006017f80 c00c000000177a00 NIP [c0000000008a9ac0] alloc_slab_obj_exts_early.constprop.0+0x524/0x534 LR [c0000000008a9abc] alloc_slab_obj_exts_early.constprop.0+0x520/0x534 Call Trace: [c0000000141f7630] [c0000000008a9abc] alloc_slab_obj_exts_early.constprop.0+0x520/0x534 (unreliable) [c0000000141f76c0] [c0000000008aafbc] allocate_slab+0x154/0x94c [c0000000141f7760] [c0000000008b41c0] refill_objects+0x124/0x16c [c0000000141f77c0] [c0000000008b4be0] __pcs_replace_empty_main+0x2b0/0x444 [c0000000141f7810] [c0000000008b9600] __kvmalloc_node_noprof+0x840/0x914 [c0000000141f7900] [c000000000a3dd40] seq_read_iter+0x60c/0xb00 [c0000000141f7a10] [c000000000b36b24] proc_reg_read_iter+0x154/0x1fc [c0000000141f7a50] [c0000000009cee7c] vfs_read+0x39c/0x4e4 [c0000000141f7b30] [c0000000009d0214] ksys_read+0x9c/0x180 [c0000000141f7b90] [c00000000003a8d0] system_call_exception+0x1e0/0x4b0 [c0000000141f7e50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec This leads to slab_obj_ext() returning the first slabobj_ext or all objects and confuses the reference counting of object cgroups [1] and memory (un)charging for memory cgroups [2]. Fortunately, the counters field has 32 unused bits instead of 16 on 64-bit CPUs, which is wide enough to hold any value of s->size. Change the type to unsigned int. Reported-by: Venkat Rao Bagalkote Closes: https://lore.kernel.org/lkml/ca241daa-e7e7-4604-a48d-de91ec9184a5@linux.ibm.com [1] Closes: https://lore.kernel.org/all/ddff7c7d-c0c3-4780-808f-9a83268bbf0c@linux.ibm.com [2] Fixes: 7a8e71bc619d ("mm/slab: use stride to access slabobj_ext") Signed-off-by: Harry Yoo Link: https://patch.msgid.link/20260303135722.2680521-1-harry.yoo@oracle.com Reviewed-by: Hao Li Tested-by: Venkat Rao Bagalkote Signed-off-by: Vlastimil Babka (SUSE) --- mm/slab.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index f6ef862b60ef..e9ab292acd22 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -59,7 +59,7 @@ struct freelist_counters { * to save memory. In case ->stride field is not available, * such optimizations are disabled. */ - unsigned short stride; + unsigned int stride; #endif }; }; @@ -559,20 +559,20 @@ static inline void put_slab_obj_exts(unsigned long obj_exts) } #ifdef CONFIG_64BIT -static inline void slab_set_stride(struct slab *slab, unsigned short stride) +static inline void slab_set_stride(struct slab *slab, unsigned int stride) { slab->stride = stride; } -static inline unsigned short slab_get_stride(struct slab *slab) +static inline unsigned int slab_get_stride(struct slab *slab) { return slab->stride; } #else -static inline void slab_set_stride(struct slab *slab, unsigned short stride) +static inline void slab_set_stride(struct slab *slab, unsigned int stride) { VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext)); } -static inline unsigned short slab_get_stride(struct slab *slab) +static inline unsigned int slab_get_stride(struct slab *slab) { return sizeof(struct slabobj_ext); }