mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
kcov: Enable context analysis
Enable context analysis for the KCOV subsystem. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20251219154418.3592607-30-elver@google.com
This commit is contained in:
parent
0f5d764862
commit
6556fde265
2 changed files with 27 additions and 11 deletions
|
|
@ -43,6 +43,8 @@ KASAN_SANITIZE_kcov.o := n
|
|||
KCSAN_SANITIZE_kcov.o := n
|
||||
UBSAN_SANITIZE_kcov.o := n
|
||||
KMSAN_SANITIZE_kcov.o := n
|
||||
|
||||
CONTEXT_ANALYSIS_kcov.o := y
|
||||
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
|
||||
|
||||
obj-y += sched/
|
||||
|
|
|
|||
|
|
@ -55,13 +55,13 @@ struct kcov {
|
|||
refcount_t refcount;
|
||||
/* The lock protects mode, size, area and t. */
|
||||
spinlock_t lock;
|
||||
enum kcov_mode mode;
|
||||
enum kcov_mode mode __guarded_by(&lock);
|
||||
/* Size of arena (in long's). */
|
||||
unsigned int size;
|
||||
unsigned int size __guarded_by(&lock);
|
||||
/* Coverage buffer shared with user space. */
|
||||
void *area;
|
||||
void *area __guarded_by(&lock);
|
||||
/* Task for which we collect coverage, or NULL. */
|
||||
struct task_struct *t;
|
||||
struct task_struct *t __guarded_by(&lock);
|
||||
/* Collecting coverage from remote (background) threads. */
|
||||
bool remote;
|
||||
/* Size of remote area (in long's). */
|
||||
|
|
@ -391,6 +391,7 @@ void kcov_task_init(struct task_struct *t)
|
|||
}
|
||||
|
||||
static void kcov_reset(struct kcov *kcov)
|
||||
__must_hold(&kcov->lock)
|
||||
{
|
||||
kcov->t = NULL;
|
||||
kcov->mode = KCOV_MODE_INIT;
|
||||
|
|
@ -400,6 +401,7 @@ static void kcov_reset(struct kcov *kcov)
|
|||
}
|
||||
|
||||
static void kcov_remote_reset(struct kcov *kcov)
|
||||
__must_hold(&kcov->lock)
|
||||
{
|
||||
int bkt;
|
||||
struct kcov_remote *remote;
|
||||
|
|
@ -419,6 +421,7 @@ static void kcov_remote_reset(struct kcov *kcov)
|
|||
}
|
||||
|
||||
static void kcov_disable(struct task_struct *t, struct kcov *kcov)
|
||||
__must_hold(&kcov->lock)
|
||||
{
|
||||
kcov_task_reset(t);
|
||||
if (kcov->remote)
|
||||
|
|
@ -435,8 +438,11 @@ static void kcov_get(struct kcov *kcov)
|
|||
static void kcov_put(struct kcov *kcov)
|
||||
{
|
||||
if (refcount_dec_and_test(&kcov->refcount)) {
|
||||
/* Context-safety: no references left, object being destroyed. */
|
||||
context_unsafe(
|
||||
kcov_remote_reset(kcov);
|
||||
vfree(kcov->area);
|
||||
);
|
||||
kfree(kcov);
|
||||
}
|
||||
}
|
||||
|
|
@ -491,6 +497,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
|
|||
unsigned long size, off;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
void *area;
|
||||
|
||||
spin_lock_irqsave(&kcov->lock, flags);
|
||||
size = kcov->size * sizeof(unsigned long);
|
||||
|
|
@ -499,10 +506,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
|
|||
res = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
area = kcov->area;
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
for (off = 0; off < size; off += PAGE_SIZE) {
|
||||
page = vmalloc_to_page(kcov->area + off);
|
||||
page = vmalloc_to_page(area + off);
|
||||
res = vm_insert_page(vma, vma->vm_start + off, page);
|
||||
if (res) {
|
||||
pr_warn_once("kcov: vm_insert_page() failed\n");
|
||||
|
|
@ -522,10 +530,10 @@ static int kcov_open(struct inode *inode, struct file *filep)
|
|||
kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
|
||||
if (!kcov)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&kcov->lock);
|
||||
kcov->mode = KCOV_MODE_DISABLED;
|
||||
kcov->sequence = 1;
|
||||
refcount_set(&kcov->refcount, 1);
|
||||
spin_lock_init(&kcov->lock);
|
||||
filep->private_data = kcov;
|
||||
return nonseekable_open(inode, filep);
|
||||
}
|
||||
|
|
@ -556,6 +564,7 @@ static int kcov_get_mode(unsigned long arg)
|
|||
* vmalloc fault handling path is instrumented.
|
||||
*/
|
||||
static void kcov_fault_in_area(struct kcov *kcov)
|
||||
__must_hold(&kcov->lock)
|
||||
{
|
||||
unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
|
||||
unsigned long *area = kcov->area;
|
||||
|
|
@ -584,6 +593,7 @@ static inline bool kcov_check_handle(u64 handle, bool common_valid,
|
|||
|
||||
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
__must_hold(&kcov->lock)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long flags, unused;
|
||||
|
|
@ -814,6 +824,7 @@ static inline bool kcov_mode_enabled(unsigned int mode)
|
|||
}
|
||||
|
||||
static void kcov_remote_softirq_start(struct task_struct *t)
|
||||
__must_hold(&kcov_percpu_data.lock)
|
||||
{
|
||||
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
|
||||
unsigned int mode;
|
||||
|
|
@ -831,6 +842,7 @@ static void kcov_remote_softirq_start(struct task_struct *t)
|
|||
}
|
||||
|
||||
static void kcov_remote_softirq_stop(struct task_struct *t)
|
||||
__must_hold(&kcov_percpu_data.lock)
|
||||
{
|
||||
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
|
||||
|
||||
|
|
@ -896,10 +908,12 @@ void kcov_remote_start(u64 handle)
|
|||
/* Put in kcov_remote_stop(). */
|
||||
kcov_get(kcov);
|
||||
/*
|
||||
* Read kcov fields before unlock to prevent races with
|
||||
* KCOV_DISABLE / kcov_remote_reset().
|
||||
* Read kcov fields before unlocking kcov_remote_lock to prevent races
|
||||
* with KCOV_DISABLE and kcov_remote_reset(); cannot acquire kcov->lock
|
||||
* here, because it might lead to deadlock given kcov_remote_lock is
|
||||
* acquired _after_ kcov->lock elsewhere.
|
||||
*/
|
||||
mode = kcov->mode;
|
||||
mode = context_unsafe(kcov->mode);
|
||||
sequence = kcov->sequence;
|
||||
if (in_task()) {
|
||||
size = kcov->remote_size;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue