mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
um: Stop tracking stub's PID via userspace_pid[]
The PID of the stub process can be obtained from current_mm_id(). There is no need to track it via userspace_pid[]. Stop doing that to simplify the code. Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com> Link: https://patch.msgid.link/20250711065021.2535362-4-tiwei.bie@linux.dev Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
parent
b3fb0eb5c2
commit
f7e9077a16
7 changed files with 8 additions and 26 deletions
|
|
@ -16,11 +16,6 @@
|
|||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
||||
{
|
||||
/*
|
||||
* This is called by fs/exec.c and sys_unshare()
|
||||
* when the new ->mm is used for the first time.
|
||||
*/
|
||||
__switch_mm(&new->context.id);
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
|
|
@ -28,11 +23,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
{
|
||||
unsigned cpu = smp_processor_id();
|
||||
|
||||
if(prev != next){
|
||||
if (prev != next) {
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
if(next != &init_mm)
|
||||
__switch_mm(&next->context.id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,8 +19,6 @@ struct mm_id {
|
|||
int syscall_fd_map[STUB_MAX_FDS];
|
||||
};
|
||||
|
||||
void __switch_mm(struct mm_id *mm_idp);
|
||||
|
||||
void notify_mm_kill(int pid);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@
|
|||
#include <sysdep/ptrace.h>
|
||||
|
||||
extern int using_seccomp;
|
||||
extern int userspace_pid[];
|
||||
|
||||
extern void new_thread_handler(void);
|
||||
extern void handle_syscall(struct uml_pt_regs *regs);
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ void flush_thread(void)
|
|||
|
||||
get_safe_registers(current_pt_regs()->regs.gp,
|
||||
current_pt_regs()->regs.fp);
|
||||
|
||||
__switch_mm(¤t->mm->context.id);
|
||||
}
|
||||
|
||||
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ static int __init start_kernel_proc(void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern int userspace_pid[];
|
||||
|
||||
static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
|
||||
|
||||
int __init start_uml(void)
|
||||
|
|
|
|||
|
|
@ -434,7 +434,6 @@ static int __init init_stub_exe_fd(void)
|
|||
__initcall(init_stub_exe_fd);
|
||||
|
||||
int using_seccomp;
|
||||
int userspace_pid[NR_CPUS];
|
||||
|
||||
/**
|
||||
* start_userspace() - prepare a new userspace process
|
||||
|
|
@ -553,7 +552,7 @@ extern unsigned long tt_extra_sched_jiffies;
|
|||
|
||||
void userspace(struct uml_pt_regs *regs)
|
||||
{
|
||||
int err, status, op, pid = userspace_pid[0];
|
||||
int err, status, op;
|
||||
siginfo_t si_ptrace;
|
||||
siginfo_t *si;
|
||||
int sig;
|
||||
|
|
@ -562,6 +561,8 @@ void userspace(struct uml_pt_regs *regs)
|
|||
interrupt_end();
|
||||
|
||||
while (1) {
|
||||
struct mm_id *mm_id = current_mm_id();
|
||||
|
||||
/*
|
||||
* When we are in time-travel mode, userspace can theoretically
|
||||
* do a *lot* of work without being scheduled. The problem with
|
||||
|
|
@ -590,7 +591,6 @@ void userspace(struct uml_pt_regs *regs)
|
|||
current_mm_sync();
|
||||
|
||||
if (using_seccomp) {
|
||||
struct mm_id *mm_id = current_mm_id();
|
||||
struct stub_data *proc_data = (void *) mm_id->stack;
|
||||
|
||||
err = set_stub_state(regs, proc_data, singlestepping());
|
||||
|
|
@ -644,8 +644,10 @@ void userspace(struct uml_pt_regs *regs)
|
|||
GET_FAULTINFO_FROM_MC(regs->faultinfo, mcontext);
|
||||
}
|
||||
} else {
|
||||
int pid = mm_id->pid;
|
||||
|
||||
/* Flush out any pending syscalls */
|
||||
err = syscall_stub_flush(current_mm_id());
|
||||
err = syscall_stub_flush(mm_id);
|
||||
if (err) {
|
||||
if (err == -ENOMEM)
|
||||
report_enomem();
|
||||
|
|
@ -776,7 +778,6 @@ void userspace(struct uml_pt_regs *regs)
|
|||
__func__, sig);
|
||||
fatal_sigsegv();
|
||||
}
|
||||
pid = userspace_pid[0];
|
||||
interrupt_end();
|
||||
|
||||
/* Avoid -ERESTARTSYS handling in host */
|
||||
|
|
@ -901,8 +902,3 @@ void reboot_skas(void)
|
|||
block_signals_trace();
|
||||
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
|
||||
}
|
||||
|
||||
void __switch_mm(struct mm_id *mm_idp)
|
||||
{
|
||||
userspace_pid[0] = mm_idp->pid;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ int arch_switch_tls(struct task_struct *to)
|
|||
/*
|
||||
* We have no need whatsoever to switch TLS for kernel threads; beyond
|
||||
* that, that would also result in us calling os_set_thread_area with
|
||||
* userspace_pid[cpu] == 0, which gives an error.
|
||||
* task->mm == NULL, which would cause a crash.
|
||||
*/
|
||||
if (likely(to->mm))
|
||||
return load_TLS(O_FORCE, to);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue