mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:04:41 +01:00
kfence: Enable context analysis
Enable context analysis for the KFENCE subsystem. Notable, kfence_handle_page_fault() required minor restructure, which also fixed a subtle race; arguably that function is more readable now. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20251219154418.3592607-29-elver@google.com
This commit is contained in:
parent
48eb4b9a3d
commit
0f5d764862
4 changed files with 25 additions and 15 deletions
|
|
@ -1,5 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
CONTEXT_ANALYSIS := y
|
||||
|
||||
obj-y := core.o report.o
|
||||
|
||||
CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
|
|
|
|||
|
|
@ -133,8 +133,8 @@ struct kfence_metadata *kfence_metadata __read_mostly;
|
|||
static struct kfence_metadata *kfence_metadata_init __read_mostly;
|
||||
|
||||
/* Freelist with available objects. */
|
||||
static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
|
||||
static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
|
||||
DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
|
||||
static struct list_head kfence_freelist __guarded_by(&kfence_freelist_lock) = LIST_HEAD_INIT(kfence_freelist);
|
||||
|
||||
/*
|
||||
* The static key to set up a KFENCE allocation; or if static keys are not used
|
||||
|
|
@ -254,6 +254,7 @@ static bool kfence_unprotect(unsigned long addr)
|
|||
}
|
||||
|
||||
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
|
||||
__must_hold(&meta->lock)
|
||||
{
|
||||
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
|
||||
unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
|
||||
|
|
@ -289,6 +290,7 @@ static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
|
|||
static noinline void
|
||||
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
|
||||
unsigned long *stack_entries, size_t num_stack_entries)
|
||||
__must_hold(&meta->lock)
|
||||
{
|
||||
struct kfence_track *track =
|
||||
next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
|
||||
|
|
@ -486,7 +488,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
|||
alloc_covered_add(alloc_stack_hash, 1);
|
||||
|
||||
/* Set required slab fields. */
|
||||
slab = virt_to_slab((void *)meta->addr);
|
||||
slab = virt_to_slab(addr);
|
||||
slab->slab_cache = cache;
|
||||
slab->objects = 1;
|
||||
|
||||
|
|
@ -515,6 +517,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
|||
static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
|
||||
{
|
||||
struct kcsan_scoped_access assert_page_exclusive;
|
||||
u32 alloc_stack_hash;
|
||||
unsigned long flags;
|
||||
bool init;
|
||||
|
||||
|
|
@ -547,9 +550,10 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
|
|||
/* Mark the object as freed. */
|
||||
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
|
||||
init = slab_want_init_on_free(meta->cache);
|
||||
alloc_stack_hash = meta->alloc_stack_hash;
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
alloc_covered_add(meta->alloc_stack_hash, -1);
|
||||
alloc_covered_add(alloc_stack_hash, -1);
|
||||
|
||||
/* Check canary bytes for memory corruption. */
|
||||
check_canary(meta);
|
||||
|
|
@ -594,6 +598,7 @@ static void rcu_guarded_free(struct rcu_head *h)
|
|||
* which partial initialization succeeded.
|
||||
*/
|
||||
static unsigned long kfence_init_pool(void)
|
||||
__context_unsafe(/* constructor */)
|
||||
{
|
||||
unsigned long addr, start_pfn;
|
||||
int i;
|
||||
|
|
@ -1220,6 +1225,7 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
|
|||
{
|
||||
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
|
||||
struct kfence_metadata *to_report = NULL;
|
||||
unsigned long unprotected_page = 0;
|
||||
enum kfence_error_type error_type;
|
||||
unsigned long flags;
|
||||
|
||||
|
|
@ -1253,9 +1259,8 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
|
|||
if (!to_report)
|
||||
goto out;
|
||||
|
||||
raw_spin_lock_irqsave(&to_report->lock, flags);
|
||||
to_report->unprotected_page = addr;
|
||||
error_type = KFENCE_ERROR_OOB;
|
||||
unprotected_page = addr;
|
||||
|
||||
/*
|
||||
* If the object was freed before we took the look we can still
|
||||
|
|
@ -1267,7 +1272,6 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
|
|||
if (!to_report)
|
||||
goto out;
|
||||
|
||||
raw_spin_lock_irqsave(&to_report->lock, flags);
|
||||
error_type = KFENCE_ERROR_UAF;
|
||||
/*
|
||||
* We may race with __kfence_alloc(), and it is possible that a
|
||||
|
|
@ -1279,6 +1283,8 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
|
|||
|
||||
out:
|
||||
if (to_report) {
|
||||
raw_spin_lock_irqsave(&to_report->lock, flags);
|
||||
to_report->unprotected_page = unprotected_page;
|
||||
kfence_report_error(addr, is_write, regs, to_report, error_type);
|
||||
raw_spin_unlock_irqrestore(&to_report->lock, flags);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -34,6 +34,8 @@
|
|||
/* Maximum stack depth for reports. */
|
||||
#define KFENCE_STACK_DEPTH 64
|
||||
|
||||
extern raw_spinlock_t kfence_freelist_lock;
|
||||
|
||||
/* KFENCE object states. */
|
||||
enum kfence_object_state {
|
||||
KFENCE_OBJECT_UNUSED, /* Object is unused. */
|
||||
|
|
@ -53,7 +55,7 @@ struct kfence_track {
|
|||
|
||||
/* KFENCE metadata per guarded allocation. */
|
||||
struct kfence_metadata {
|
||||
struct list_head list; /* Freelist node; access under kfence_freelist_lock. */
|
||||
struct list_head list __guarded_by(&kfence_freelist_lock); /* Freelist node. */
|
||||
struct rcu_head rcu_head; /* For delayed freeing. */
|
||||
|
||||
/*
|
||||
|
|
@ -91,13 +93,13 @@ struct kfence_metadata {
|
|||
* In case of an invalid access, the page that was unprotected; we
|
||||
* optimistically only store one address.
|
||||
*/
|
||||
unsigned long unprotected_page;
|
||||
unsigned long unprotected_page __guarded_by(&lock);
|
||||
|
||||
/* Allocation and free stack information. */
|
||||
struct kfence_track alloc_track;
|
||||
struct kfence_track free_track;
|
||||
struct kfence_track alloc_track __guarded_by(&lock);
|
||||
struct kfence_track free_track __guarded_by(&lock);
|
||||
/* For updating alloc_covered on frees. */
|
||||
u32 alloc_stack_hash;
|
||||
u32 alloc_stack_hash __guarded_by(&lock);
|
||||
#ifdef CONFIG_MEMCG
|
||||
struct slabobj_ext obj_exts;
|
||||
#endif
|
||||
|
|
@ -141,6 +143,6 @@ enum kfence_error_type {
|
|||
void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
|
||||
const struct kfence_metadata *meta, enum kfence_error_type type);
|
||||
|
||||
void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
|
||||
void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) __must_hold(&meta->lock);
|
||||
|
||||
#endif /* MM_KFENCE_KFENCE_H */
|
||||
|
|
|
|||
|
|
@ -106,6 +106,7 @@ found:
|
|||
|
||||
static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
|
||||
bool show_alloc)
|
||||
__must_hold(&meta->lock)
|
||||
{
|
||||
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
|
||||
u64 ts_sec = track->ts_nsec;
|
||||
|
|
@ -207,8 +208,6 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
|
|||
if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
|
||||
return;
|
||||
|
||||
if (meta)
|
||||
lockdep_assert_held(&meta->lock);
|
||||
/*
|
||||
* Because we may generate reports in printk-unfriendly parts of the
|
||||
* kernel, such as scheduler code, the use of printk() could deadlock.
|
||||
|
|
@ -263,6 +262,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
|
|||
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
|
||||
|
||||
if (meta) {
|
||||
lockdep_assert_held(&meta->lock);
|
||||
pr_err("\n");
|
||||
kfence_print_object(NULL, meta);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue