kcov: Enable context analysis

Enable context analysis for the KCOV subsystem.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-30-elver@google.com
This commit is contained in:
Marco Elver 2025-12-19 16:40:18 +01:00 committed by Peter Zijlstra
parent 0f5d764862
commit 6556fde265
2 changed files with 27 additions and 11 deletions

View file

@ -43,6 +43,8 @@ KASAN_SANITIZE_kcov.o := n
KCSAN_SANITIZE_kcov.o := n KCSAN_SANITIZE_kcov.o := n
UBSAN_SANITIZE_kcov.o := n UBSAN_SANITIZE_kcov.o := n
KMSAN_SANITIZE_kcov.o := n KMSAN_SANITIZE_kcov.o := n
CONTEXT_ANALYSIS_kcov.o := y
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
obj-y += sched/ obj-y += sched/

View file

@ -55,13 +55,13 @@ struct kcov {
refcount_t refcount; refcount_t refcount;
/* The lock protects mode, size, area and t. */ /* The lock protects mode, size, area and t. */
spinlock_t lock; spinlock_t lock;
enum kcov_mode mode; enum kcov_mode mode __guarded_by(&lock);
/* Size of arena (in long's). */ /* Size of arena (in long's). */
unsigned int size; unsigned int size __guarded_by(&lock);
/* Coverage buffer shared with user space. */ /* Coverage buffer shared with user space. */
void *area; void *area __guarded_by(&lock);
/* Task for which we collect coverage, or NULL. */ /* Task for which we collect coverage, or NULL. */
struct task_struct *t; struct task_struct *t __guarded_by(&lock);
/* Collecting coverage from remote (background) threads. */ /* Collecting coverage from remote (background) threads. */
bool remote; bool remote;
/* Size of remote area (in long's). */ /* Size of remote area (in long's). */
@ -391,6 +391,7 @@ void kcov_task_init(struct task_struct *t)
} }
static void kcov_reset(struct kcov *kcov) static void kcov_reset(struct kcov *kcov)
__must_hold(&kcov->lock)
{ {
kcov->t = NULL; kcov->t = NULL;
kcov->mode = KCOV_MODE_INIT; kcov->mode = KCOV_MODE_INIT;
@ -400,6 +401,7 @@ static void kcov_reset(struct kcov *kcov)
} }
static void kcov_remote_reset(struct kcov *kcov) static void kcov_remote_reset(struct kcov *kcov)
__must_hold(&kcov->lock)
{ {
int bkt; int bkt;
struct kcov_remote *remote; struct kcov_remote *remote;
@ -419,6 +421,7 @@ static void kcov_remote_reset(struct kcov *kcov)
} }
static void kcov_disable(struct task_struct *t, struct kcov *kcov) static void kcov_disable(struct task_struct *t, struct kcov *kcov)
__must_hold(&kcov->lock)
{ {
kcov_task_reset(t); kcov_task_reset(t);
if (kcov->remote) if (kcov->remote)
@ -435,8 +438,11 @@ static void kcov_get(struct kcov *kcov)
static void kcov_put(struct kcov *kcov) static void kcov_put(struct kcov *kcov)
{ {
if (refcount_dec_and_test(&kcov->refcount)) { if (refcount_dec_and_test(&kcov->refcount)) {
kcov_remote_reset(kcov); /* Context-safety: no references left, object being destroyed. */
vfree(kcov->area); context_unsafe(
kcov_remote_reset(kcov);
vfree(kcov->area);
);
kfree(kcov); kfree(kcov);
} }
} }
@ -491,6 +497,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
unsigned long size, off; unsigned long size, off;
struct page *page; struct page *page;
unsigned long flags; unsigned long flags;
void *area;
spin_lock_irqsave(&kcov->lock, flags); spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long); size = kcov->size * sizeof(unsigned long);
@ -499,10 +506,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
res = -EINVAL; res = -EINVAL;
goto exit; goto exit;
} }
area = kcov->area;
spin_unlock_irqrestore(&kcov->lock, flags); spin_unlock_irqrestore(&kcov->lock, flags);
vm_flags_set(vma, VM_DONTEXPAND); vm_flags_set(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) { for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off); page = vmalloc_to_page(area + off);
res = vm_insert_page(vma, vma->vm_start + off, page); res = vm_insert_page(vma, vma->vm_start + off, page);
if (res) { if (res) {
pr_warn_once("kcov: vm_insert_page() failed\n"); pr_warn_once("kcov: vm_insert_page() failed\n");
@ -522,10 +530,10 @@ static int kcov_open(struct inode *inode, struct file *filep)
kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
if (!kcov) if (!kcov)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&kcov->lock);
kcov->mode = KCOV_MODE_DISABLED; kcov->mode = KCOV_MODE_DISABLED;
kcov->sequence = 1; kcov->sequence = 1;
refcount_set(&kcov->refcount, 1); refcount_set(&kcov->refcount, 1);
spin_lock_init(&kcov->lock);
filep->private_data = kcov; filep->private_data = kcov;
return nonseekable_open(inode, filep); return nonseekable_open(inode, filep);
} }
@ -556,6 +564,7 @@ static int kcov_get_mode(unsigned long arg)
* vmalloc fault handling path is instrumented. * vmalloc fault handling path is instrumented.
*/ */
static void kcov_fault_in_area(struct kcov *kcov) static void kcov_fault_in_area(struct kcov *kcov)
__must_hold(&kcov->lock)
{ {
unsigned long stride = PAGE_SIZE / sizeof(unsigned long); unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
unsigned long *area = kcov->area; unsigned long *area = kcov->area;
@ -584,6 +593,7 @@ static inline bool kcov_check_handle(u64 handle, bool common_valid,
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg) unsigned long arg)
__must_hold(&kcov->lock)
{ {
struct task_struct *t; struct task_struct *t;
unsigned long flags, unused; unsigned long flags, unused;
@ -814,6 +824,7 @@ static inline bool kcov_mode_enabled(unsigned int mode)
} }
static void kcov_remote_softirq_start(struct task_struct *t) static void kcov_remote_softirq_start(struct task_struct *t)
__must_hold(&kcov_percpu_data.lock)
{ {
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
unsigned int mode; unsigned int mode;
@ -831,6 +842,7 @@ static void kcov_remote_softirq_start(struct task_struct *t)
} }
static void kcov_remote_softirq_stop(struct task_struct *t) static void kcov_remote_softirq_stop(struct task_struct *t)
__must_hold(&kcov_percpu_data.lock)
{ {
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
@ -896,10 +908,12 @@ void kcov_remote_start(u64 handle)
/* Put in kcov_remote_stop(). */ /* Put in kcov_remote_stop(). */
kcov_get(kcov); kcov_get(kcov);
/* /*
* Read kcov fields before unlock to prevent races with * Read kcov fields before unlocking kcov_remote_lock to prevent races
* KCOV_DISABLE / kcov_remote_reset(). * with KCOV_DISABLE and kcov_remote_reset(); cannot acquire kcov->lock
* here, because it might lead to deadlock given kcov_remote_lock is
* acquired _after_ kcov->lock elsewhere.
*/ */
mode = kcov->mode; mode = context_unsafe(kcov->mode);
sequence = kcov->sequence; sequence = kcov->sequence;
if (in_task()) { if (in_task()) {
size = kcov->remote_size; size = kcov->remote_size;