mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
tracing: Fix WARN_ON in tracing_buffers_mmap_close
When a process forks, the child process copies the parent's VMAs but the
user_mapped reference count is not incremented. As a result, when both the
parent and child processes exit, tracing_buffers_mmap_close() is called
twice. On the second call, user_mapped is already 0, causing the function to
return -ENODEV and triggering a WARN_ON.
Normally, this isn't an issue as the memory is mapped with VM_DONTCOPY set.
But this is only a hint, and the application can call
madvise(MADVISE_DOFORK) which resets the VM_DONTCOPY flag. When the
application does that, it can trigger this issue on fork.
Fix it by incrementing the user_mapped reference count without re-mapping
the pages in the VMA's open callback.
Cc: stable@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Vincent Donnefort <vdonnefort@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://patch.msgid.link/20260227025842.1085206-1-wangqing7171@gmail.com
Fixes: cf9f0f7c4c ("tracing: Allow user-space mapping of the ring-buffer")
Reported-by: syzbot+3b5dd2030fe08afdf65d@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=3b5dd2030fe08afdf65d
Tested-by: syzbot+3b5dd2030fe08afdf65d@syzkaller.appspotmail.com
Signed-off-by: Qing Wang <wangqing7171@gmail.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
a5dd6f5866
commit
e39bb9e02b
3 changed files with 35 additions and 0 deletions
|
|
@ -248,6 +248,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
|
||||||
|
|
||||||
int ring_buffer_map(struct trace_buffer *buffer, int cpu,
|
int ring_buffer_map(struct trace_buffer *buffer, int cpu,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
|
void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu);
|
||||||
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
|
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
|
||||||
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
|
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
|
||||||
#endif /* _LINUX_RING_BUFFER_H */
|
#endif /* _LINUX_RING_BUFFER_H */
|
||||||
|
|
|
||||||
|
|
@ -7310,6 +7310,27 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called when a VMA is duplicated (e.g., on fork()) to increment
|
||||||
|
* the user_mapped counter without remapping pages.
|
||||||
|
*/
|
||||||
|
void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu)
|
||||||
|
{
|
||||||
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
|
||||||
|
if (WARN_ON(!cpumask_test_cpu(cpu, buffer->cpumask)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpu_buffer = buffer->buffers[cpu];
|
||||||
|
|
||||||
|
guard(mutex)(&cpu_buffer->mapping_lock);
|
||||||
|
|
||||||
|
if (cpu_buffer->user_mapped)
|
||||||
|
__rb_inc_dec_mapped(cpu_buffer, true);
|
||||||
|
else
|
||||||
|
WARN(1, "Unexpected buffer stat, it should be mapped");
|
||||||
|
}
|
||||||
|
|
||||||
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
|
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
|
||||||
{
|
{
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
|
|
||||||
|
|
@ -8213,6 +8213,18 @@ static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
|
||||||
static inline void put_snapshot_map(struct trace_array *tr) { }
|
static inline void put_snapshot_map(struct trace_array *tr) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called when a VMA is duplicated (e.g., on fork()) to increment
|
||||||
|
* the user_mapped counter without remapping pages.
|
||||||
|
*/
|
||||||
|
static void tracing_buffers_mmap_open(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct ftrace_buffer_info *info = vma->vm_file->private_data;
|
||||||
|
struct trace_iterator *iter = &info->iter;
|
||||||
|
|
||||||
|
ring_buffer_map_dup(iter->array_buffer->buffer, iter->cpu_file);
|
||||||
|
}
|
||||||
|
|
||||||
static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
|
static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct ftrace_buffer_info *info = vma->vm_file->private_data;
|
struct ftrace_buffer_info *info = vma->vm_file->private_data;
|
||||||
|
|
@ -8232,6 +8244,7 @@ static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long a
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct tracing_buffers_vmops = {
|
static const struct vm_operations_struct tracing_buffers_vmops = {
|
||||||
|
.open = tracing_buffers_mmap_open,
|
||||||
.close = tracing_buffers_mmap_close,
|
.close = tracing_buffers_mmap_close,
|
||||||
.may_split = tracing_buffers_may_split,
|
.may_split = tracing_buffers_may_split,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue