mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
slab fixes for 7.0-rc1
-----BEGIN PGP SIGNATURE----- iQFPBAABCAA5FiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmmhvA4bFIAAAAAABAAO bWFudTIsMi41KzEuMTEsMiwyAAoJELvgsHXSRYiaqPEIAJAN0BlnFoaWew6YSiEO RjVx1G14Xdp4H9Vc93cg3pXXTvBlAMgNSxEhyiXXCwFTGjnsUqowDqDU6+cJCr7k KqVLPh1gHR4aIsXcgufOqu9W9hv/vQrd2d9/8qSD+k2jwcOEIk/oFyjRPrzxCrxg sH4hWy+zfJW1mllMXbMRj3RVRIrvITmnLK0J0ByzuTXjrswyHhsY+6Bl2G5Q7F4V MuHLZcUAcav/gi4r0d/RoK3m37lC7mGUBT7xsGvt/vRCTCgQvwHbKu47Wq7c+ozd dP2Dz3qNIR/q2BdDx5ftC8bacR+QE2E3EcYhraOVjLcEsMUO3e7n83umgOixyIT+ fNU= =FjaS -----END PGP SIGNATURE----- Merge tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab Pull slab fixes from Vlastimil Babka: - Fix for spurious page allocation warnings on sheaf refill (Harry Yoo) - Fix for CONFIG_MEM_ALLOC_PROFILING_DEBUG warnings (Suren Baghdasaryan) - Fix for kernel-doc warning on ksize() (Sanjay Chitroda) - Fix to avoid setting slab->stride later than on slab allocation. Doesn't yet fix the reports from powerpc; debugging is making progress (Harry Yoo) * tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: initialize slab->stride early to avoid memory ordering issues mm/slub: drop duplicate kernel-doc for ksize() mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT mm/slab: pass __GFP_NOWARN to refill_sheaf() if fallback is available
This commit is contained in:
commit
3feb464fb7
4 changed files with 39 additions and 30 deletions
|
|
@ -139,6 +139,8 @@ enum {
|
|||
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
|
||||
*
|
||||
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
|
||||
* mark_obj_codetag_empty() should be called upon freeing for objects allocated
|
||||
* with this flag to indicate that their NULL tags are expected and normal.
|
||||
*/
|
||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
|
||||
|
|
|
|||
|
|
@ -517,18 +517,6 @@ void kfree_sensitive(const void *objp);
|
|||
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
|
||||
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
|
||||
|
||||
/**
|
||||
* ksize - Report actual allocation size of associated object
|
||||
*
|
||||
* @objp: Pointer returned from a prior kmalloc()-family allocation.
|
||||
*
|
||||
* This should not be used for writing beyond the originally requested
|
||||
* allocation size. Either use krealloc() or round up the allocation size
|
||||
* with kmalloc_size_roundup() prior to allocation. If this is used to
|
||||
* access beyond the originally requested allocation size, UBSAN_BOUNDS
|
||||
* and/or FORTIFY_SOURCE may trip, since they only know about the
|
||||
* originally allocated size via the __alloc_size attribute.
|
||||
*/
|
||||
size_t ksize(const void *objp);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
|
|
|||
|
|
@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
|
|||
|
||||
/* Determine object index from a given position */
|
||||
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
|
||||
void *addr, void *obj)
|
||||
void *addr, const void *obj)
|
||||
{
|
||||
return reciprocal_divide(kasan_reset_tag(obj) - addr,
|
||||
cache->reciprocal_size);
|
||||
}
|
||||
|
||||
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
||||
const struct slab *slab, void *obj)
|
||||
const struct slab *slab, const void *obj)
|
||||
{
|
||||
if (is_kfence_address(obj))
|
||||
return 0;
|
||||
|
|
|
|||
51
mm/slub.c
51
mm/slub.c
|
|
@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
|||
|
||||
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
|
||||
|
||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
|
||||
static inline void mark_obj_codetag_empty(const void *obj)
|
||||
{
|
||||
struct slab *obj_exts_slab;
|
||||
struct slab *obj_slab;
|
||||
unsigned long slab_exts;
|
||||
|
||||
obj_exts_slab = virt_to_slab(obj_exts);
|
||||
slab_exts = slab_obj_exts(obj_exts_slab);
|
||||
obj_slab = virt_to_slab(obj);
|
||||
slab_exts = slab_obj_exts(obj_slab);
|
||||
if (slab_exts) {
|
||||
get_slab_obj_exts(slab_exts);
|
||||
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
|
||||
obj_exts_slab, obj_exts);
|
||||
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
|
||||
unsigned int offs = obj_to_index(obj_slab->slab_cache,
|
||||
obj_slab, obj);
|
||||
struct slabobj_ext *ext = slab_obj_ext(obj_slab,
|
||||
slab_exts, offs);
|
||||
|
||||
if (unlikely(is_codetag_empty(&ext->ref))) {
|
||||
|
|
@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
|||
|
||||
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
||||
|
||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
|
||||
static inline void mark_obj_codetag_empty(const void *obj) {}
|
||||
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
|
||||
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
struct slabobj_ext *vec, unsigned int objects) {}
|
||||
|
|
@ -2196,7 +2196,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
retry:
|
||||
old_exts = READ_ONCE(slab->obj_exts);
|
||||
handle_failed_objexts_alloc(old_exts, vec, objects);
|
||||
slab_set_stride(slab, sizeof(struct slabobj_ext));
|
||||
|
||||
if (new_slab) {
|
||||
/*
|
||||
|
|
@ -2211,7 +2210,7 @@ retry:
|
|||
* assign slabobj_exts in parallel. In this case the existing
|
||||
* objcg vector should be reused.
|
||||
*/
|
||||
mark_objexts_empty(vec);
|
||||
mark_obj_codetag_empty(vec);
|
||||
if (unlikely(!allow_spin))
|
||||
kfree_nolock(vec);
|
||||
else
|
||||
|
|
@ -2254,7 +2253,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
|
|||
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
|
||||
* the extension for obj_exts is expected to be NULL.
|
||||
*/
|
||||
mark_objexts_empty(obj_exts);
|
||||
mark_obj_codetag_empty(obj_exts);
|
||||
if (allow_spin)
|
||||
kfree(obj_exts);
|
||||
else
|
||||
|
|
@ -2272,6 +2271,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
|
|||
void *addr;
|
||||
unsigned long obj_exts;
|
||||
|
||||
/* Initialize stride early to avoid memory ordering issues */
|
||||
slab_set_stride(slab, sizeof(struct slabobj_ext));
|
||||
|
||||
if (!need_slab_obj_exts(s))
|
||||
return;
|
||||
|
||||
|
|
@ -2288,7 +2290,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
|
|||
obj_exts |= MEMCG_DATA_OBJEXTS;
|
||||
#endif
|
||||
slab->obj_exts = obj_exts;
|
||||
slab_set_stride(slab, sizeof(struct slabobj_ext));
|
||||
} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
|
||||
unsigned int offset = obj_exts_offset_in_object(s);
|
||||
|
||||
|
|
@ -2312,6 +2313,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
|
|||
|
||||
#else /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
static inline void mark_obj_codetag_empty(const void *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_slab_obj_exts(struct slab *slab)
|
||||
{
|
||||
}
|
||||
|
|
@ -2783,6 +2788,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
|
|||
|
||||
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
|
||||
{
|
||||
/*
|
||||
* If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
|
||||
* corresponding extension is NULL and alloc_tag_sub() will throw a
|
||||
* warning, therefore replace NULL with CODETAG_EMPTY to indicate
|
||||
* that the extension for this sheaf is expected to be NULL.
|
||||
*/
|
||||
if (s->flags & SLAB_KMALLOC)
|
||||
mark_obj_codetag_empty(sheaf);
|
||||
|
||||
kfree(sheaf);
|
||||
|
||||
stat(s, SHEAF_FREE);
|
||||
|
|
@ -2822,7 +2836,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
|
|||
if (!sheaf)
|
||||
return NULL;
|
||||
|
||||
if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
|
||||
if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
|
||||
free_empty_sheaf(s, sheaf);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -4575,7 +4589,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
|
|||
return NULL;
|
||||
|
||||
if (empty) {
|
||||
if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
|
||||
if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
|
||||
full = empty;
|
||||
} else {
|
||||
/*
|
||||
|
|
@ -4890,9 +4904,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
|
|||
static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
|
||||
struct slab_sheaf *sheaf, gfp_t gfp)
|
||||
{
|
||||
int ret = 0;
|
||||
gfp_t gfp_nomemalloc;
|
||||
int ret;
|
||||
|
||||
ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
|
||||
gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
|
||||
if (gfp_pfmemalloc_allowed(gfp))
|
||||
gfp_nomemalloc |= __GFP_NOWARN;
|
||||
|
||||
ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
|
||||
|
||||
if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
|
||||
return ret;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue