From c27cea4416a396a1c5b6b3529dd925f92a69e7d3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:10:56 -0800 Subject: [PATCH 01/25] rcu: Re-implement RCU Tasks Trace in terms of SRCU-fast This commit saves more than 500 lines of RCU code by re-implementing RCU Tasks Trace in terms of SRCU-fast. Follow-up work will remove more code that does not cause problems by its presence, but that is no longer required. This variant places smp_mb() in rcu_read_{,un}lock_trace(), and in the same place that srcu_read_{,un}lock() would put them. These smp_mb() calls will be removed on common-case architectures in a later commit. In the meantime, it serves to enforce ordering between the underlying srcu_read_{,un}lock_fast() markers and the intervening critical section, even on architectures that permit attaching tracepoints on regions of code not watched by RCU. Such architectures defeat SRCU-fast's use of implicit single-instruction, interrupts-disabled, and atomic-operation RCU read-side critical sections, which have no effect when RCU is not watching. The aforementioned later commit will insert these smp_mb() calls only on architectures that have not used noinstr to prevent attaching tracepoints to code where RCU is not watching. [ paulmck: Apply kernel test robot, Boqun Feng, and Zqiang feedback. ] [ paulmck: Split out Tiny SRCU fixes per Andrii Nakryiko feedback. ] Signed-off-by: Paul E. McKenney Tested-by: kernel test robot Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- include/linux/rcupdate_trace.h | 107 ++++-- include/linux/sched.h | 1 + kernel/rcu/tasks.h | 621 +-------------------------------- 3 files changed, 99 insertions(+), 630 deletions(-) diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index e6c44eb428ab..3f46cbe67000 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -12,28 +12,28 @@ #include #include -extern struct lockdep_map rcu_trace_lock_map; +#ifdef CONFIG_TASKS_TRACE_RCU +extern struct srcu_struct rcu_tasks_trace_srcu_struct; +#endif // #ifdef CONFIG_TASKS_TRACE_RCU -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU) static inline int rcu_read_lock_trace_held(void) { - return lock_is_held(&rcu_trace_lock_map); + return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct); } -#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU) static inline int rcu_read_lock_trace_held(void) { return 1; } -#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU) #ifdef CONFIG_TASKS_TRACE_RCU -void rcu_read_unlock_trace_special(struct task_struct *t); - /** * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * @@ -50,12 +50,14 @@ static inline void rcu_read_lock_trace(void) { struct task_struct *t = current; - WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); - barrier(); - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && - t->trc_reader_special.b.need_mb) - smp_mb(); // Pairs with update-side barriers - rcu_lock_acquire(&rcu_trace_lock_map); + if (t->trc_reader_nesting++) { + // In case we interrupted a Tasks Trace RCU reader. + rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map); + return; + } + barrier(); // nesting before scp to protect against interrupt handler. + t->trc_reader_scp = srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct); + smp_mb(); // Placeholder for more selective ordering } /** @@ -69,26 +71,75 @@ static inline void rcu_read_lock_trace(void) */ static inline void rcu_read_unlock_trace(void) { - int nesting; + struct srcu_ctr __percpu *scp; struct task_struct *t = current; - rcu_lock_release(&rcu_trace_lock_map); - nesting = READ_ONCE(t->trc_reader_nesting) - 1; - barrier(); // Critical section before disabling. - // Disable IPI-based setting of .need_qs. - WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); - if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { - WRITE_ONCE(t->trc_reader_nesting, nesting); - return; // We assume shallow reader nesting. - } - WARN_ON_ONCE(nesting != 0); - rcu_read_unlock_trace_special(t); + smp_mb(); // Placeholder for more selective ordering + scp = t->trc_reader_scp; + barrier(); // scp before nesting to protect against interrupt handler. + if (!--t->trc_reader_nesting) + srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp); + else + srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map); } -void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); -void synchronize_rcu_tasks_trace(void); -void rcu_barrier_tasks_trace(void); +/** + * call_rcu_tasks_trace() - Queue a callback trace task-based grace period + * @rhp: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a trace rcu-tasks + * grace period elapses, in other words after all currently executing + * trace rcu-tasks read-side critical sections have completed. These + * read-side critical sections are delimited by calls to rcu_read_lock_trace() + * and rcu_read_unlock_trace(). + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) +{ + call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func); +} + +/** + * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period + * + * Control will return to the caller some time after a trace rcu-tasks + * grace period has elapsed, in other words after all currently executing + * trace rcu-tasks read-side critical sections have elapsed. These read-side + * critical sections are delimited by calls to rcu_read_lock_trace() + * and rcu_read_unlock_trace(). + * + * This is a very specialized primitive, intended only for a few uses in + * tracing and other situations requiring manipulation of function preambles + * and profiling hooks. The synchronize_rcu_tasks_trace() function is not + * (yet) intended for heavy use from multiple CPUs. + * + * See the description of synchronize_rcu() for more detailed information + * on memory ordering guarantees. + */ +static inline void synchronize_rcu_tasks_trace(void) +{ + synchronize_srcu(&rcu_tasks_trace_srcu_struct); +} + +/** + * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. + * + * Note that rcu_barrier_tasks_trace() is not obligated to actually wait, + * for example, if there are no pending callbacks. + */ +static inline void rcu_barrier_tasks_trace(void) +{ + srcu_barrier(&rcu_tasks_trace_srcu_struct); +} + +// Placeholders to enable stepwise transition. +void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq); +void __init rcu_tasks_trace_suppress_unused(void); struct task_struct *get_rcu_tasks_trace_gp_kthread(void); + #else /* * The BPF JIT forms these addresses even when it doesn't call these diff --git a/include/linux/sched.h b/include/linux/sched.h index d395f2810fac..fe39d422b37d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -945,6 +945,7 @@ struct task_struct { #ifdef CONFIG_TASKS_TRACE_RCU int trc_reader_nesting; + struct srcu_ctr __percpu *trc_reader_scp; int trc_ipi_to_cpu; union rcu_special trc_reader_special; struct list_head trc_holdout_list; diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 2dc044fd126e..1fe789c99f36 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -718,7 +718,6 @@ static void __init rcu_tasks_bootup_oddness(void) #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ } - /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) { @@ -803,7 +802,7 @@ static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *t static void exit_tasks_rcu_finish_trace(struct task_struct *t); -#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) +#if defined(CONFIG_TASKS_RCU) //////////////////////////////////////////////////////////////////////// // @@ -898,7 +897,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) rtp->postgp_func(rtp); } -#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ +#endif /* #if defined(CONFIG_TASKS_RCU) */ #ifdef CONFIG_TASKS_RCU @@ -1453,94 +1452,27 @@ EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); // // Tracing variant of Tasks RCU. This variant is designed to be used // to protect tracing hooks, including those of BPF. This variant -// therefore: -// -// 1. Has explicit read-side markers to allow finite grace periods -// in the face of in-kernel loops for PREEMPT=n builds. -// -// 2. Protects code in the idle loop, exception entry/exit, and -// CPU-hotplug code paths, similar to the capabilities of SRCU. -// -// 3. Avoids expensive read-side instructions, having overhead similar -// to that of Preemptible RCU. -// -// There are of course downsides. For example, the grace-period code -// can send IPIs to CPUs, even when those CPUs are in the idle loop or -// in nohz_full userspace. If needed, these downsides can be at least -// partially remedied. -// -// Perhaps most important, this variant of RCU does not affect the vanilla -// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace -// readers can operate from idle, offline, and exception entry/exit in no -// way allows rcu_preempt and rcu_sched readers to also do so. -// -// The implementation uses rcu_tasks_wait_gp(), which relies on function -// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() -// function sets these function pointers up so that rcu_tasks_wait_gp() -// invokes these functions in this order: -// -// rcu_tasks_trace_pregp_step(): -// Disables CPU hotplug, adds all currently executing tasks to the -// holdout list, then checks the state of all tasks that blocked -// or were preempted within their current RCU Tasks Trace read-side -// critical section, adding them to the holdout list if appropriate. -// Finally, this function re-enables CPU hotplug. -// The ->pertask_func() pointer is NULL, so there is no per-task processing. -// rcu_tasks_trace_postscan(): -// Invokes synchronize_rcu() to wait for late-stage exiting tasks -// to finish exiting. -// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: -// Scans the holdout list, attempting to identify a quiescent state -// for each task on the list. If there is a quiescent state, the -// corresponding task is removed from the holdout list. Once this -// list is empty, the grace period has completed. -// rcu_tasks_trace_postgp(): -// Provides the needed full memory barrier and does debug checks. -// -// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. -// -// Pre-grace-period update-side code is ordered before the grace period -// via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period -// read-side code is ordered before the grace period by atomic operations -// on .b.need_qs flag of each task involved in this process, or by scheduler -// context-switch ordering (for locked-down non-running readers). - -// The lockdep state must be outside of #ifdef to be useful. -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key rcu_lock_trace_key; -struct lockdep_map rcu_trace_lock_map = - STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); -EXPORT_SYMBOL_GPL(rcu_trace_lock_map); -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +// is implemented via a straightforward mapping onto SRCU-fast. #ifdef CONFIG_TASKS_TRACE_RCU -// Record outstanding IPIs to each CPU. No point in sending two... -static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); +DEFINE_SRCU_FAST(rcu_tasks_trace_srcu_struct); +EXPORT_SYMBOL_GPL(rcu_tasks_trace_srcu_struct); -// The number of detections of task quiescent state relying on -// heavyweight readers executing explicit memory barriers. -static unsigned long n_heavy_reader_attempts; -static unsigned long n_heavy_reader_updates; -static unsigned long n_heavy_reader_ofl_updates; -static unsigned long n_trc_holdouts; - -void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); -DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, - "RCU Tasks Trace"); - -/* Load from ->trc_reader_special.b.need_qs with proper ordering. */ -static u8 rcu_ld_need_qs(struct task_struct *t) +// Placeholder to suppress build errors through transition period. +void __init rcu_tasks_trace_suppress_unused(void) { - smp_mb(); // Enforce full grace-period ordering. - return smp_load_acquire(&t->trc_reader_special.b.need_qs); -} - -/* Store to ->trc_reader_special.b.need_qs with proper ordering. */ -static void rcu_st_need_qs(struct task_struct *t, u8 v) -{ - smp_store_release(&t->trc_reader_special.b.need_qs, v); - smp_mb(); // Enforce full grace-period ordering. +#ifndef CONFIG_TINY_RCU + show_rcu_tasks_generic_gp_kthread(NULL, NULL); +#endif // #ifndef CONFIG_TINY_RCU + rcu_spawn_tasks_kthread_generic(NULL); + synchronize_rcu_tasks_generic(NULL); + call_rcu_tasks_generic(NULL, NULL, NULL); + call_rcu_tasks_iw_wakeup(NULL); + cblist_init_generic(NULL); +#ifndef CONFIG_TINY_RCU + rcu_tasks_torture_stats_print_generic(NULL, NULL, NULL, NULL); +#endif // #ifndef CONFIG_TINY_RCU } /* @@ -1555,321 +1487,12 @@ u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) } EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); -/* - * If we are the last reader, signal the grace-period kthread. - * Also remove from the per-CPU list of blocked tasks. - */ -void rcu_read_unlock_trace_special(struct task_struct *t) -{ - unsigned long flags; - struct rcu_tasks_percpu *rtpcp; - union rcu_special trs; - - // Open-coded full-word version of rcu_ld_need_qs(). - smp_mb(); // Enforce full grace-period ordering. - trs = smp_load_acquire(&t->trc_reader_special); - - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) - smp_mb(); // Pairs with update-side barriers. - // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. - if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { - u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, - TRC_NEED_QS_CHECKED); - - WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); - } - if (trs.b.blocked) { - rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - list_del_init(&t->trc_blkd_node); - WRITE_ONCE(t->trc_reader_special.b.blocked, false); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - } - WRITE_ONCE(t->trc_reader_nesting, 0); -} -EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); - /* Add a newly blocked reader task to its CPU's list. */ void rcu_tasks_trace_qs_blkd(struct task_struct *t) { - unsigned long flags; - struct rcu_tasks_percpu *rtpcp; - - local_irq_save(flags); - rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); - raw_spin_lock_rcu_node(rtpcp); // irqs already disabled - t->trc_blkd_cpu = smp_processor_id(); - if (!rtpcp->rtp_blkd_tasks.next) - INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); - list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); - WRITE_ONCE(t->trc_reader_special.b.blocked, true); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); } EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); -/* Add a task to the holdout list, if it is not already on the list. */ -static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) -{ - if (list_empty(&t->trc_holdout_list)) { - get_task_struct(t); - list_add(&t->trc_holdout_list, bhp); - n_trc_holdouts++; - } -} - -/* Remove a task from the holdout list, if it is in fact present. */ -static void trc_del_holdout(struct task_struct *t) -{ - if (!list_empty(&t->trc_holdout_list)) { - list_del_init(&t->trc_holdout_list); - put_task_struct(t); - n_trc_holdouts--; - } -} - -/* IPI handler to check task state. */ -static void trc_read_check_handler(void *t_in) -{ - int nesting; - struct task_struct *t = current; - struct task_struct *texp = t_in; - - // If the task is no longer running on this CPU, leave. - if (unlikely(texp != t)) - goto reset_ipi; // Already on holdout list, so will check later. - - // If the task is not in a read-side critical section, and - // if this is the last reader, awaken the grace-period kthread. - nesting = READ_ONCE(t->trc_reader_nesting); - if (likely(!nesting)) { - rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); - goto reset_ipi; - } - // If we are racing with an rcu_read_unlock_trace(), try again later. - if (unlikely(nesting < 0)) - goto reset_ipi; - - // Get here if the task is in a read-side critical section. - // Set its state so that it will update state for the grace-period - // kthread upon exit from that critical section. - rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); - -reset_ipi: - // Allow future IPIs to be sent on CPU and for task. - // Also order this IPI handler against any later manipulations of - // the intended task. - smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ - smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ -} - -/* Callback function for scheduler to check locked-down task. */ -static int trc_inspect_reader(struct task_struct *t, void *bhp_in) -{ - struct list_head *bhp = bhp_in; - int cpu = task_cpu(t); - int nesting; - bool ofl = cpu_is_offline(cpu); - - if (task_curr(t) && !ofl) { - // If no chance of heavyweight readers, do it the hard way. - if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) - return -EINVAL; - - // If heavyweight readers are enabled on the remote task, - // we can inspect its state despite its currently running. - // However, we cannot safely change its state. - n_heavy_reader_attempts++; - // Check for "running" idle tasks on offline CPUs. - if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting)) - return -EINVAL; // No quiescent state, do it the hard way. - n_heavy_reader_updates++; - nesting = 0; - } else { - // The task is not running, so C-language access is safe. - nesting = t->trc_reader_nesting; - WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) - n_heavy_reader_ofl_updates++; - } - - // If not exiting a read-side critical section, mark as checked - // so that the grace-period kthread will remove it from the - // holdout list. - if (!nesting) { - rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); - return 0; // In QS, so done. - } - if (nesting < 0) - return -EINVAL; // Reader transitioning, try again later. - - // The task is in a read-side critical section, so set up its - // state so that it will update state upon exit from that critical - // section. - if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) - trc_add_holdout(t, bhp); - return 0; -} - -/* Attempt to extract the state for the specified task. */ -static void trc_wait_for_one_reader(struct task_struct *t, - struct list_head *bhp) -{ - int cpu; - - // If a previous IPI is still in flight, let it complete. - if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI - return; - - // The current task had better be in a quiescent state. - if (t == current) { - rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); - WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); - return; - } - - // Attempt to nail down the task for inspection. - get_task_struct(t); - if (!task_call_func(t, trc_inspect_reader, bhp)) { - put_task_struct(t); - return; - } - put_task_struct(t); - - // If this task is not yet on the holdout list, then we are in - // an RCU read-side critical section. Otherwise, the invocation of - // trc_add_holdout() that added it to the list did the necessary - // get_task_struct(). Either way, the task cannot be freed out - // from under this code. - - // If currently running, send an IPI, either way, add to list. - trc_add_holdout(t, bhp); - if (task_curr(t) && - time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { - // The task is currently running, so try IPIing it. - cpu = task_cpu(t); - - // If there is already an IPI outstanding, let it happen. - if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) - return; - - per_cpu(trc_ipi_to_cpu, cpu) = true; - t->trc_ipi_to_cpu = cpu; - rcu_tasks_trace.n_ipis++; - if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { - // Just in case there is some other reason for - // failure than the target CPU being offline. - WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", - __func__, cpu); - rcu_tasks_trace.n_ipis_fails++; - per_cpu(trc_ipi_to_cpu, cpu) = false; - t->trc_ipi_to_cpu = -1; - } - } -} - -/* - * Initialize for first-round processing for the specified task. - * Return false if task is NULL or already taken care of, true otherwise. - */ -static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) -{ - // During early boot when there is only the one boot CPU, there - // is no idle task for the other CPUs. Also, the grace-period - // kthread is always in a quiescent state. In addition, just return - // if this task is already on the list. - if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) - return false; - - rcu_st_need_qs(t, 0); - t->trc_ipi_to_cpu = -1; - return true; -} - -/* Do first-round processing for the specified task. */ -static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) -{ - if (rcu_tasks_trace_pertask_prep(t, true)) - trc_wait_for_one_reader(t, hop); -} - -/* Initialize for a new RCU-tasks-trace grace period. */ -static void rcu_tasks_trace_pregp_step(struct list_head *hop) -{ - LIST_HEAD(blkd_tasks); - int cpu; - unsigned long flags; - struct rcu_tasks_percpu *rtpcp; - struct task_struct *t; - - // There shouldn't be any old IPIs, but... - for_each_possible_cpu(cpu) - WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); - - // Disable CPU hotplug across the CPU scan for the benefit of - // any IPIs that might be needed. This also waits for all readers - // in CPU-hotplug code paths. - cpus_read_lock(); - - // These rcu_tasks_trace_pertask_prep() calls are serialized to - // allow safe access to the hop list. - for_each_online_cpu(cpu) { - rcu_read_lock(); - // Note that cpu_curr_snapshot() picks up the target - // CPU's current task while its runqueue is locked with - // an smp_mb__after_spinlock(). This ensures that either - // the grace-period kthread will see that task's read-side - // critical section or the task will see the updater's pre-GP - // accesses. The trailing smp_mb() in cpu_curr_snapshot() - // does not currently play a role other than simplify - // that function's ordering semantics. If these simplified - // ordering semantics continue to be redundant, that smp_mb() - // might be removed. - t = cpu_curr_snapshot(cpu); - if (rcu_tasks_trace_pertask_prep(t, true)) - trc_add_holdout(t, hop); - rcu_read_unlock(); - cond_resched_tasks_rcu_qs(); - } - - // Only after all running tasks have been accounted for is it - // safe to take care of the tasks that have blocked within their - // current RCU tasks trace read-side critical section. - for_each_possible_cpu(cpu) { - rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); - while (!list_empty(&blkd_tasks)) { - rcu_read_lock(); - t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); - list_del_init(&t->trc_blkd_node); - list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - rcu_tasks_trace_pertask(t, hop); - rcu_read_unlock(); - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - } - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - cond_resched_tasks_rcu_qs(); - } - - // Re-enable CPU hotplug now that the holdout list is populated. - cpus_read_unlock(); -} - -/* - * Do intermediate processing between task and holdout scans. - */ -static void rcu_tasks_trace_postscan(struct list_head *hop) -{ - // Wait for late-stage exiting tasks to finish exiting. - // These might have passed the call to exit_tasks_rcu_finish(). - - // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! - synchronize_rcu(); - // Any tasks that exit after this point will set - // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. -} - /* Communicate task state back to the RCU tasks trace stall warning request. */ struct trc_stall_chk_rdr { int nesting; @@ -1877,241 +1500,39 @@ struct trc_stall_chk_rdr { u8 needqs; }; -static int trc_check_slow_task(struct task_struct *t, void *arg) -{ - struct trc_stall_chk_rdr *trc_rdrp = arg; - - if (task_curr(t) && cpu_online(task_cpu(t))) - return false; // It is running, so decline to inspect it. - trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); - trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); - trc_rdrp->needqs = rcu_ld_need_qs(t); - return true; -} - -/* Show the state of a task stalling the current RCU tasks trace GP. */ -static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) -{ - int cpu; - struct trc_stall_chk_rdr trc_rdr; - bool is_idle_tsk = is_idle_task(t); - - if (*firstreport) { - pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); - *firstreport = false; - } - cpu = task_cpu(t); - if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) - pr_alert("P%d: %c%c\n", - t->pid, - ".I"[t->trc_ipi_to_cpu >= 0], - ".i"[is_idle_tsk]); - else - pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", - t->pid, - ".I"[trc_rdr.ipi_to_cpu >= 0], - ".i"[is_idle_tsk], - ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], - ".B"[!!data_race(t->trc_reader_special.b.blocked)], - trc_rdr.nesting, - " !CN"[trc_rdr.needqs & 0x3], - " ?"[trc_rdr.needqs > 0x3], - cpu, cpu_online(cpu) ? "" : "(offline)"); - sched_show_task(t); -} - -/* List stalled IPIs for RCU tasks trace. */ -static void show_stalled_ipi_trace(void) -{ - int cpu; - - for_each_possible_cpu(cpu) - if (per_cpu(trc_ipi_to_cpu, cpu)) - pr_alert("\tIPI outstanding to CPU %d\n", cpu); -} - -/* Do one scan of the holdout list. */ -static void check_all_holdout_tasks_trace(struct list_head *hop, - bool needreport, bool *firstreport) -{ - struct task_struct *g, *t; - - // Disable CPU hotplug across the holdout list scan for IPIs. - cpus_read_lock(); - - list_for_each_entry_safe(t, g, hop, trc_holdout_list) { - // If safe and needed, try to check the current task. - if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && - !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) - trc_wait_for_one_reader(t, hop); - - // If check succeeded, remove this task from the list. - if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && - rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) - trc_del_holdout(t); - else if (needreport) - show_stalled_task_trace(t, firstreport); - cond_resched_tasks_rcu_qs(); - } - - // Re-enable CPU hotplug now that the holdout list scan has completed. - cpus_read_unlock(); - - if (needreport) { - if (*firstreport) - pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); - show_stalled_ipi_trace(); - } -} - -static void rcu_tasks_trace_empty_fn(void *unused) -{ -} - -/* Wait for grace period to complete and provide ordering. */ -static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) -{ - int cpu; - - // Wait for any lingering IPI handlers to complete. Note that - // if a CPU has gone offline or transitioned to userspace in the - // meantime, all IPI handlers should have been drained beforehand. - // Yes, this assumes that CPUs process IPIs in order. If that ever - // changes, there will need to be a recheck and/or timed wait. - for_each_online_cpu(cpu) - if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) - smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); - - smp_mb(); // Caller's code must be ordered after wakeup. - // Pairs with pretty much every ordering primitive. -} - /* Report any needed quiescent state for this exiting task. */ static void exit_tasks_rcu_finish_trace(struct task_struct *t) { - union rcu_special trs = READ_ONCE(t->trc_reader_special); - - rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); - WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); - if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) - rcu_read_unlock_trace_special(t); - else - WRITE_ONCE(t->trc_reader_nesting, 0); } -/** - * call_rcu_tasks_trace() - Queue a callback trace task-based grace period - * @rhp: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a trace rcu-tasks - * grace period elapses, in other words after all currently executing - * trace rcu-tasks read-side critical sections have completed. These - * read-side critical sections are delimited by calls to rcu_read_lock_trace() - * and rcu_read_unlock_trace(). - * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. - */ -void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) -{ - call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); -} -EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); - -/** - * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period - * - * Control will return to the caller some time after a trace rcu-tasks - * grace period has elapsed, in other words after all currently executing - * trace rcu-tasks read-side critical sections have elapsed. These read-side - * critical sections are delimited by calls to rcu_read_lock_trace() - * and rcu_read_unlock_trace(). - * - * This is a very specialized primitive, intended only for a few uses in - * tracing and other situations requiring manipulation of function preambles - * and profiling hooks. The synchronize_rcu_tasks_trace() function is not - * (yet) intended for heavy use from multiple CPUs. - * - * See the description of synchronize_rcu() for more detailed information - * on memory ordering guarantees. - */ -void synchronize_rcu_tasks_trace(void) -{ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); - synchronize_rcu_tasks_generic(&rcu_tasks_trace); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); - -/** - * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. - * - * Although the current implementation is guaranteed to wait, it is not - * obligated to, for example, if there are no pending callbacks. - */ -void rcu_barrier_tasks_trace(void) -{ - rcu_barrier_tasks_generic(&rcu_tasks_trace); -} -EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); - int rcu_tasks_trace_lazy_ms = -1; module_param(rcu_tasks_trace_lazy_ms, int, 0444); static int __init rcu_spawn_tasks_trace_kthread(void) { - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { - rcu_tasks_trace.gp_sleep = HZ / 10; - rcu_tasks_trace.init_fract = HZ / 10; - } else { - rcu_tasks_trace.gp_sleep = HZ / 200; - if (rcu_tasks_trace.gp_sleep <= 0) - rcu_tasks_trace.gp_sleep = 1; - rcu_tasks_trace.init_fract = HZ / 200; - if (rcu_tasks_trace.init_fract <= 0) - rcu_tasks_trace.init_fract = 1; - } - if (rcu_tasks_trace_lazy_ms >= 0) - rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); - rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; - rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; - rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; - rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; - rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); return 0; } #if !defined(CONFIG_TINY_RCU) void show_rcu_tasks_trace_gp_kthread(void) { - char buf[64]; - - snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu", - data_race(n_trc_holdouts), - data_race(n_heavy_reader_ofl_updates), - data_race(n_heavy_reader_updates), - data_race(n_heavy_reader_attempts)); - show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); } EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); void rcu_tasks_trace_torture_stats_print(char *tt, char *tf) { - rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace, tt, tf, ""); } EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print); #endif // !defined(CONFIG_TINY_RCU) struct task_struct *get_rcu_tasks_trace_gp_kthread(void) { - return rcu_tasks_trace.kthread_ptr; + return NULL; } EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) { - *flags = 0; - *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq); } EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); @@ -2251,10 +1672,6 @@ void __init tasks_cblist_init_generic(void) #ifdef CONFIG_TASKS_RUDE_RCU cblist_init_generic(&rcu_tasks_rude); #endif - -#ifdef CONFIG_TASKS_TRACE_RCU - cblist_init_generic(&rcu_tasks_trace); -#endif } static int __init rcu_init_tasks_generic(void) From 46e323599911ddd3368c9993a0572c9876a82ce8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:10:57 -0800 Subject: [PATCH 02/25] context_tracking: Remove rcu_task_trace_heavyweight_{enter,exit}() Because SRCU-fast does not use IPIs for its grace periods, there is no need for real-time workloads to switch to an IPI-free mode, and there is in turn no need for either rcu_task_trace_heavyweight_enter() or rcu_task_trace_heavyweight_exit(). This commit therefore removes them. Signed-off-by: Paul E. McKenney Cc: Frederic Weisbecker Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/context_tracking.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index fb5be6e9b423..a743e7ffa6c0 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -54,24 +54,6 @@ static __always_inline void rcu_task_enter(void) #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ } -/* Turn on heavyweight RCU tasks trace readers on kernel exit. */ -static __always_inline void rcu_task_trace_heavyweight_enter(void) -{ -#ifdef CONFIG_TASKS_TRACE_RCU - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) - current->trc_reader_special.b.need_mb = true; -#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ -} - -/* Turn off heavyweight RCU tasks trace readers on kernel entry. */ -static __always_inline void rcu_task_trace_heavyweight_exit(void) -{ -#ifdef CONFIG_TASKS_TRACE_RCU - if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) - current->trc_reader_special.b.need_mb = false; -#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ -} - /* * Record entry into an extended quiescent state. This is only to be * called when not already in an extended quiescent state, that is, @@ -85,7 +67,6 @@ static noinstr void ct_kernel_exit_state(int offset) * critical sections, and we also must force ordering with the * next idle sojourn. */ - rcu_task_trace_heavyweight_enter(); // Before CT state update! // RCU is still watching. Better not be in extended quiescent state! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !rcu_is_watching_curr_cpu()); (void)ct_state_inc(offset); @@ -108,7 +89,6 @@ static noinstr void ct_kernel_enter_state(int offset) */ seq = ct_state_inc(offset); // RCU is now watching. Better not be in an extended quiescent state! - rcu_task_trace_heavyweight_exit(); // After CT state update! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING)); } From a73fc3dcc60b6d7a2075e2fbdca64fd53600f855 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:10:58 -0800 Subject: [PATCH 03/25] rcu: Clean up after the SRCU-fastification of RCU Tasks Trace Now that RCU Tasks Trace has been re-implemented in terms of SRCU-fast, the ->trc_ipi_to_cpu, ->trc_blkd_cpu, ->trc_blkd_node, ->trc_holdout_list, and ->trc_reader_special task_struct fields are no longer used. In addition, the rcu_tasks_trace_qs(), rcu_tasks_trace_qs_blkd(), exit_tasks_rcu_finish_trace(), and rcu_spawn_tasks_trace_kthread(), show_rcu_tasks_trace_gp_kthread(), rcu_tasks_trace_get_gp_data(), rcu_tasks_trace_torture_stats_print(), and get_rcu_tasks_trace_gp_kthread() functions and all the other functions that they invoke are no longer used. Also, the TRC_NEED_QS and TRC_NEED_QS_CHECKED CPP macros are no longer used. Neither are the rcu_tasks_trace_lazy_ms and rcu_task_ipi_delay rcupdate module parameters and the TASKS_TRACE_RCU_READ_MB Kconfig option. This commit therefore removes all of them. [ paulmck: Apply Alexei Starovoitov feedback. ] Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- .../admin-guide/kernel-parameters.txt | 15 ---- include/linux/rcupdate.h | 31 +------- include/linux/rcupdate_trace.h | 2 - include/linux/sched.h | 5 -- init/init_task.c | 3 - kernel/fork.c | 3 - kernel/rcu/Kconfig | 18 ----- kernel/rcu/rcu.h | 9 --- kernel/rcu/rcuscale.c | 7 -- kernel/rcu/rcutorture.c | 2 - kernel/rcu/tasks.h | 79 +------------------ .../selftests/rcutorture/configs/rcu/TRACE01 | 1 - .../selftests/rcutorture/configs/rcu/TRACE02 | 1 - 13 files changed, 2 insertions(+), 174 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a8d0afde7f85..1b8e5cadbecb 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6249,13 +6249,6 @@ Kernel parameters dynamically) adjusted. This parameter is intended for use in testing. - rcupdate.rcu_task_ipi_delay= [KNL] - Set time in jiffies during which RCU tasks will - avoid sending IPIs, starting with the beginning - of a given grace period. Setting a large - number avoids disturbing real-time workloads, - but lengthens grace periods. - rcupdate.rcu_task_lazy_lim= [KNL] Number of callbacks on a given CPU that will cancel laziness on that CPU. Use -1 to disable @@ -6299,14 +6292,6 @@ Kernel parameters of zero will disable batching. Batching is always disabled for synchronize_rcu_tasks(). - rcupdate.rcu_tasks_trace_lazy_ms= [KNL] - Set timeout in milliseconds RCU Tasks - Trace asynchronous callback batching for - call_rcu_tasks_trace(). A negative value - will take the default. A value of zero will - disable batching. Batching is always disabled - for synchronize_rcu_tasks_trace(). - rcupdate.rcu_self_test= [KNL] Run the RCU early boot self tests diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index c5b30054cd01..bd5a420cf09a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -175,36 +175,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf); # define synchronize_rcu_tasks synchronize_rcu # endif -# ifdef CONFIG_TASKS_TRACE_RCU -// Bits for ->trc_reader_special.b.need_qs field. -#define TRC_NEED_QS 0x1 // Task needs a quiescent state. -#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. - -u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); -void rcu_tasks_trace_qs_blkd(struct task_struct *t); - -# define rcu_tasks_trace_qs(t) \ - do { \ - int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ - \ - if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ - likely(!___rttq_nesting)) { \ - rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ - } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ - !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ - rcu_tasks_trace_qs_blkd(t); \ - } \ - } while (0) -void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); -# else -# define rcu_tasks_trace_qs(t) do { } while (0) -# endif - -#define rcu_tasks_qs(t, preempt) \ -do { \ - rcu_tasks_classic_qs((t), (preempt)); \ - rcu_tasks_trace_qs(t); \ -} while (0) +#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt)) # ifdef CONFIG_TASKS_RUDE_RCU void synchronize_rcu_tasks_rude(void); diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 3f46cbe67000..0bd47f12ecd1 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -136,9 +136,7 @@ static inline void rcu_barrier_tasks_trace(void) } // Placeholders to enable stepwise transition. -void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq); void __init rcu_tasks_trace_suppress_unused(void); -struct task_struct *get_rcu_tasks_trace_gp_kthread(void); #else /* diff --git a/include/linux/sched.h b/include/linux/sched.h index fe39d422b37d..56156643ccac 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -946,11 +946,6 @@ struct task_struct { #ifdef CONFIG_TASKS_TRACE_RCU int trc_reader_nesting; struct srcu_ctr __percpu *trc_reader_scp; - int trc_ipi_to_cpu; - union rcu_special trc_reader_special; - struct list_head trc_holdout_list; - struct list_head trc_blkd_node; - int trc_blkd_cpu; #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ struct sched_info sched_info; diff --git a/init/init_task.c b/init/init_task.c index 49b13d7c3985..db92c404d59a 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -195,9 +195,6 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = { #endif #ifdef CONFIG_TASKS_TRACE_RCU .trc_reader_nesting = 0, - .trc_reader_special.s = 0, - .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list), - .trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node), #endif #ifdef CONFIG_CPUSETS .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq, diff --git a/kernel/fork.c b/kernel/fork.c index b1f3915d5f8e..d7ed107cbb47 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1828,9 +1828,6 @@ static inline void rcu_copy_process(struct task_struct *p) #endif /* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_TRACE_RCU p->trc_reader_nesting = 0; - p->trc_reader_special.s = 0; - INIT_LIST_HEAD(&p->trc_holdout_list); - INIT_LIST_HEAD(&p->trc_blkd_node); #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ } diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 4d9b21f69eaa..8d5a1ecb7d56 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -313,24 +313,6 @@ config RCU_NOCB_CPU_CB_BOOST Say Y here if you want to set RT priority for offloading kthreads. Say N here if you are building a !PREEMPT_RT kernel and are unsure. -config TASKS_TRACE_RCU_READ_MB - bool "Tasks Trace RCU readers use memory barriers in user and idle" - depends on RCU_EXPERT && TASKS_TRACE_RCU - default PREEMPT_RT || NR_CPUS < 8 - help - Use this option to further reduce the number of IPIs sent - to CPUs executing in userspace or idle during tasks trace - RCU grace periods. Given that a reasonable setting of - the rcupdate.rcu_task_ipi_delay kernel boot parameter - eliminates such IPIs for many workloads, proper setting - of this Kconfig option is important mostly for aggressive - real-time installations and for battery-powered devices, - hence the default chosen above. - - Say Y here if you hate IPIs. - Say N here if you hate read-side memory barriers. - Take the default if you are unsure. - config RCU_LAZY bool "RCU callback lazy invocation functionality" depends on RCU_NOCB_CPU diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 9cf01832a6c3..dc5d614b372c 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -544,10 +544,6 @@ struct task_struct *get_rcu_tasks_rude_gp_kthread(void); void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq); #endif // # ifdef CONFIG_TASKS_RUDE_RCU -#ifdef CONFIG_TASKS_TRACE_RCU -void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq); -#endif - #ifdef CONFIG_TASKS_RCU_GENERIC void tasks_cblist_init_generic(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ @@ -673,11 +669,6 @@ void show_rcu_tasks_rude_gp_kthread(void); #else static inline void show_rcu_tasks_rude_gp_kthread(void) {} #endif -#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) -void show_rcu_tasks_trace_gp_kthread(void); -#else -static inline void show_rcu_tasks_trace_gp_kthread(void) {} -#endif #ifdef CONFIG_TINY_RCU static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; } diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 7484d8ad5767..1c50f89fbd6f 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -400,11 +400,6 @@ static void tasks_trace_scale_read_unlock(int idx) rcu_read_unlock_trace(); } -static void rcu_tasks_trace_scale_stats(void) -{ - rcu_tasks_trace_torture_stats_print(scale_type, SCALE_FLAG); -} - static struct rcu_scale_ops tasks_tracing_ops = { .ptype = RCU_TASKS_FLAVOR, .init = rcu_sync_scale_init, @@ -416,8 +411,6 @@ static struct rcu_scale_ops tasks_tracing_ops = { .gp_barrier = rcu_barrier_tasks_trace, .sync = synchronize_rcu_tasks_trace, .exp_sync = synchronize_rcu_tasks_trace, - .rso_gp_kthread = get_rcu_tasks_trace_gp_kthread, - .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_trace_scale_stats, .name = "tasks-tracing" }; diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 07e51974b06b..78a6ebe77d35 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1180,8 +1180,6 @@ static struct rcu_torture_ops tasks_tracing_ops = { .exp_sync = synchronize_rcu_tasks_trace, .call = call_rcu_tasks_trace, .cb_barrier = rcu_barrier_tasks_trace, - .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, - .get_gp_data = rcu_tasks_trace_get_gp_data, .cbflood_max = 50000, .irq_capable = 1, .slow_gps = 1, diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 1fe789c99f36..1249b47f0a8d 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -161,11 +161,6 @@ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); #endif -/* Avoid IPIing CPUs early in the grace period. */ -#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) -static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; -module_param(rcu_task_ipi_delay, int, 0644); - /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) @@ -800,8 +795,6 @@ static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *t #endif // #ifndef CONFIG_TINY_RCU -static void exit_tasks_rcu_finish_trace(struct task_struct *t); - #if defined(CONFIG_TASKS_RCU) //////////////////////////////////////////////////////////////////////// @@ -1321,13 +1314,11 @@ void exit_tasks_rcu_finish(void) raw_spin_lock_irqsave_rcu_node(rtpcp, flags); list_del_init(&t->rcu_tasks_exit_list); raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - - exit_tasks_rcu_finish_trace(t); } #else /* #ifdef CONFIG_TASKS_RCU */ void exit_tasks_rcu_start(void) { } -void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } +void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_RUDE_RCU @@ -1475,69 +1466,6 @@ void __init rcu_tasks_trace_suppress_unused(void) #endif // #ifndef CONFIG_TINY_RCU } -/* - * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for - * the four-byte operand-size restriction of some platforms. - * - * Returns the old value, which is often ignored. - */ -u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) -{ - return cmpxchg(&t->trc_reader_special.b.need_qs, old, new); -} -EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); - -/* Add a newly blocked reader task to its CPU's list. */ -void rcu_tasks_trace_qs_blkd(struct task_struct *t) -{ -} -EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); - -/* Communicate task state back to the RCU tasks trace stall warning request. */ -struct trc_stall_chk_rdr { - int nesting; - int ipi_to_cpu; - u8 needqs; -}; - -/* Report any needed quiescent state for this exiting task. */ -static void exit_tasks_rcu_finish_trace(struct task_struct *t) -{ -} - -int rcu_tasks_trace_lazy_ms = -1; -module_param(rcu_tasks_trace_lazy_ms, int, 0444); - -static int __init rcu_spawn_tasks_trace_kthread(void) -{ - return 0; -} - -#if !defined(CONFIG_TINY_RCU) -void show_rcu_tasks_trace_gp_kthread(void) -{ -} -EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); - -void rcu_tasks_trace_torture_stats_print(char *tt, char *tf) -{ -} -EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print); -#endif // !defined(CONFIG_TINY_RCU) - -struct task_struct *get_rcu_tasks_trace_gp_kthread(void) -{ - return NULL; -} -EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); - -void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) -{ -} -EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); - -#else /* #ifdef CONFIG_TASKS_TRACE_RCU */ -static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ #ifndef CONFIG_TINY_RCU @@ -1545,7 +1473,6 @@ void show_rcu_tasks_gp_kthreads(void) { show_rcu_tasks_classic_gp_kthread(); show_rcu_tasks_rude_gp_kthread(); - show_rcu_tasks_trace_gp_kthread(); } #endif /* #ifndef CONFIG_TINY_RCU */ @@ -1684,10 +1611,6 @@ static int __init rcu_init_tasks_generic(void) rcu_spawn_tasks_rude_kthread(); #endif -#ifdef CONFIG_TASKS_TRACE_RCU - rcu_spawn_tasks_trace_kthread(); -#endif - // Run the self-tests. rcu_tasks_initiate_self_tests(); diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 index 85b407467454..18efab346381 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 @@ -10,5 +10,4 @@ CONFIG_PROVE_LOCKING=n #CHECK#CONFIG_PROVE_RCU=n CONFIG_FORCE_TASKS_TRACE_RCU=y #CHECK#CONFIG_TASKS_TRACE_RCU=y -CONFIG_TASKS_TRACE_RCU_READ_MB=y CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 index 9003c56cd764..8da390e82829 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 @@ -9,6 +9,5 @@ CONFIG_PROVE_LOCKING=y #CHECK#CONFIG_PROVE_RCU=y CONFIG_FORCE_TASKS_TRACE_RCU=y #CHECK#CONFIG_TASKS_TRACE_RCU=y -CONFIG_TASKS_TRACE_RCU_READ_MB=n CONFIG_RCU_EXPERT=y CONFIG_DEBUG_OBJECTS=y From 176a6aeaf1eb97b8ddf88e324fd1cbf47d52ba28 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:10:59 -0800 Subject: [PATCH 04/25] rcu: Move rcu_tasks_trace_srcu_struct out of #ifdef CONFIG_TASKS_RCU_GENERIC Moving the rcu_tasks_trace_srcu_struct structure instance out from under the CONFIG_TASKS_RCU_GENERIC Kconfig option permits the CONFIG_TASKS_TRACE_RCU Kconfig option to stop enabling this CONFIG_TASKS_RCU_GENERIC Kconfig option. This commit also therefore makes it so. Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/Kconfig | 2 +- kernel/rcu/tasks.h | 42 +++++++++++++----------------------------- 2 files changed, 14 insertions(+), 30 deletions(-) diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 8d5a1ecb7d56..c381a3130116 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -82,7 +82,7 @@ config NEED_SRCU_NMI_SAFE def_bool HAVE_NMI && !ARCH_HAS_NMI_SAFE_THIS_CPU_OPS && !TINY_SRCU config TASKS_RCU_GENERIC - def_bool TASKS_RCU || TASKS_RUDE_RCU || TASKS_TRACE_RCU + def_bool TASKS_RCU || TASKS_RUDE_RCU help This option enables generic infrastructure code supporting task-based RCU implementations. Not for manual selection. diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 1249b47f0a8d..76f952196a29 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1439,35 +1439,6 @@ EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ -//////////////////////////////////////////////////////////////////////// -// -// Tracing variant of Tasks RCU. This variant is designed to be used -// to protect tracing hooks, including those of BPF. This variant -// is implemented via a straightforward mapping onto SRCU-fast. - -#ifdef CONFIG_TASKS_TRACE_RCU - -DEFINE_SRCU_FAST(rcu_tasks_trace_srcu_struct); -EXPORT_SYMBOL_GPL(rcu_tasks_trace_srcu_struct); - -// Placeholder to suppress build errors through transition period. -void __init rcu_tasks_trace_suppress_unused(void) -{ -#ifndef CONFIG_TINY_RCU - show_rcu_tasks_generic_gp_kthread(NULL, NULL); -#endif // #ifndef CONFIG_TINY_RCU - rcu_spawn_tasks_kthread_generic(NULL); - synchronize_rcu_tasks_generic(NULL); - call_rcu_tasks_generic(NULL, NULL, NULL); - call_rcu_tasks_iw_wakeup(NULL); - cblist_init_generic(NULL); -#ifndef CONFIG_TINY_RCU - rcu_tasks_torture_stats_print_generic(NULL, NULL, NULL, NULL); -#endif // #ifndef CONFIG_TINY_RCU -} - -#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ - #ifndef CONFIG_TINY_RCU void show_rcu_tasks_gp_kthreads(void) { @@ -1621,3 +1592,16 @@ core_initcall(rcu_init_tasks_generic); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ static inline void rcu_tasks_bootup_oddness(void) {} #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ + +#ifdef CONFIG_TASKS_TRACE_RCU + +//////////////////////////////////////////////////////////////////////// +// +// Tracing variant of Tasks RCU. This variant is designed to be used +// to protect tracing hooks, including those of BPF. This variant +// is implemented via a straightforward mapping onto SRCU-fast. + +DEFINE_SRCU_FAST(rcu_tasks_trace_srcu_struct); +EXPORT_SYMBOL_GPL(rcu_tasks_trace_srcu_struct); + +#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ From 1a72f4bb6f3eaa5af674cb10802f7064bf71d10a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:11:00 -0800 Subject: [PATCH 05/25] rcu: Add noinstr-fast rcu_read_{,un}lock_tasks_trace() APIs When expressing RCU Tasks Trace in terms of SRCU-fast, it was necessary to keep a nesting count and per-CPU srcu_ctr structure pointer in the task_struct structure, which is slow to access. But an alternative is to instead make rcu_read_lock_tasks_trace() and rcu_read_unlock_tasks_trace(), which match the underlying SRCU-fast semantics, avoiding the task_struct accesses. When all callers have switched to the new API, the previous rcu_read_lock_trace() and rcu_read_unlock_trace() APIs will be removed. The rcu_read_{,un}lock_{,tasks_}trace() functions need to use smp_mb() only if invoked where RCU is not watching, that is, from locations where a call to rcu_is_watching() would return false. In architectures that define the ARCH_WANTS_NO_INSTR Kconfig option, use of noinstr and friends ensures that tracing happens only where RCU is watching, so those architectures can dispense entirely with the read-side calls to smp_mb(). Other architectures include these read-side calls by default, but in many installations there might be either larger than average tolerance for risk, prohibition of removing tracing on a running system, or careful review and approval of removing of tracing. Such installations can build their kernels with CONFIG_TASKS_TRACE_RCU_NO_MB=y to avoid those read-side calls to smp_mb(), thus accepting responsibility for run-time removal of tracing from code regions that RCU is not watching. Those wishing to disable read-side memory barriers for an entire architecture can select this TASKS_TRACE_RCU_NO_MB Kconfig option, hence the polarity. [ paulmck: Apply Peter Zijlstra feedback. ] Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- include/linux/rcupdate_trace.h | 65 +++++++++++++++++++++++++++++----- kernel/rcu/Kconfig | 23 ++++++++++++ 2 files changed, 80 insertions(+), 8 deletions(-) diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 0bd47f12ecd1..f47ba9c07460 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -34,6 +34,53 @@ static inline int rcu_read_lock_trace_held(void) #ifdef CONFIG_TASKS_TRACE_RCU +/** + * rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section + * + * When synchronize_rcu_tasks_trace() is invoked by one task, then that + * task is guaranteed to block until all other tasks exit their read-side + * critical sections. Similarly, if call_rcu_trace() is invoked on one + * task while other tasks are within RCU read-side critical sections, + * invocation of the corresponding RCU callback is deferred until after + * the all the other tasks exit their critical sections. + * + * For more details, please see the documentation for + * srcu_read_lock_fast(). For a description of how implicit RCU + * readers provide the needed ordering for architectures defining the + * ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace + * code where RCU is not watching), please see the __srcu_read_lock_fast() + * (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided + * the needed ordering. + */ +static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void) +{ + struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct); + + rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map); + if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB)) + smp_mb(); // Provide ordering on noinstr-incomplete architectures. + return ret; +} + +/** + * rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section + * @scp: return value from corresponding rcu_read_lock_tasks_trace(). + * + * Pairs with the preceding call to rcu_read_lock_tasks_trace() that + * returned the value passed in via scp. + * + * For more details, please see the documentation for rcu_read_unlock(). + * For memory-ordering information, please see the header comment for the + * rcu_read_lock_tasks_trace() function. + */ +static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp) +{ + if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB)) + smp_mb(); // Provide ordering on noinstr-incomplete architectures. + __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp); + srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map); +} + /** * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * @@ -50,14 +97,15 @@ static inline void rcu_read_lock_trace(void) { struct task_struct *t = current; + rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map); if (t->trc_reader_nesting++) { // In case we interrupted a Tasks Trace RCU reader. - rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map); return; } barrier(); // nesting before scp to protect against interrupt handler. - t->trc_reader_scp = srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct); - smp_mb(); // Placeholder for more selective ordering + t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct); + if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB)) + smp_mb(); // Placeholder for more selective ordering } /** @@ -74,13 +122,14 @@ static inline void rcu_read_unlock_trace(void) struct srcu_ctr __percpu *scp; struct task_struct *t = current; - smp_mb(); // Placeholder for more selective ordering scp = t->trc_reader_scp; barrier(); // scp before nesting to protect against interrupt handler. - if (!--t->trc_reader_nesting) - srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp); - else - srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map); + if (!--t->trc_reader_nesting) { + if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB)) + smp_mb(); // Placeholder for more selective ordering + __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp); + } + srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map); } /** diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index c381a3130116..762299291e09 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -142,6 +142,29 @@ config TASKS_TRACE_RCU default n select IRQ_WORK +config TASKS_TRACE_RCU_NO_MB + bool "Override RCU Tasks Trace inclusion of read-side memory barriers" + depends on RCU_EXPERT && TASKS_TRACE_RCU + default ARCH_WANTS_NO_INSTR + help + This option prevents the use of read-side memory barriers in + rcu_read_lock_tasks_trace() and rcu_read_unlock_tasks_trace() + even in kernels built with CONFIG_ARCH_WANTS_NO_INSTR=n, that is, + in kernels that do not have noinstr set up in entry/exit code. + By setting this option, you are promising to carefully review + use of ftrace, BPF, and friends to ensure that no tracing + operation is attached to a function that runs in that portion + of the entry/exit code that RCU does not watch, that is, + where rcu_is_watching() returns false. Alternatively, you + might choose to never remove traces except by rebooting. + + Those wishing to disable read-side memory barriers for an entire + architecture can select this Kconfig option, hence the polarity. + + Say Y here if you need speed and will review use of tracing. + Say N here for certain esoteric testing of RCU itself. + Take the default if you are unsure. + config RCU_STALL_COMMON def_bool TREE_RCU help From c0872be261f490d02c157051373a31f2df60f64e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:11:01 -0800 Subject: [PATCH 06/25] rcu: Update Requirements.rst for RCU Tasks Trace This commit updates the documentation to declare that RCU Tasks Trace is implemented as a thin wrapper around SRCU-fast. Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- .../RCU/Design/Requirements/Requirements.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index ba417a08b93d..b5cdbba3ec2e 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -2780,12 +2780,12 @@ Tasks Trace RCU ~~~~~~~~~~~~~~~ Some forms of tracing need to sleep in readers, but cannot tolerate -SRCU's read-side overhead, which includes a full memory barrier in both -srcu_read_lock() and srcu_read_unlock(). This need is handled by a -Tasks Trace RCU that uses scheduler locking and IPIs to synchronize with -readers. Real-time systems that cannot tolerate IPIs may build their -kernels with ``CONFIG_TASKS_TRACE_RCU_READ_MB=y``, which avoids the IPIs at -the expense of adding full memory barriers to the read-side primitives. +SRCU's read-side overhead, which includes a full memory barrier in +both srcu_read_lock() and srcu_read_unlock(). This need is handled by +a Tasks Trace RCU API implemented as thin wrappers around SRCU-fast, +which avoids the read-side memory barriers, at least for architectures +that apply noinstr to kernel entry/exit code (or that build with +``CONFIG_TASKS_TRACE_RCU_NO_MB=y``. The tasks-trace-RCU API is also reasonably compact, consisting of rcu_read_lock_trace(), rcu_read_unlock_trace(), From e55c2e287174280ddef47ad58e83567a88ece39d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:11:02 -0800 Subject: [PATCH 07/25] checkpatch: Deprecate rcu_read_{,un}lock_trace() Uses of rcu_read_lock_trace() and rcu_read_unlock_trace() are better served by the new rcu_read_lock_tasks_trace() and rcu_read_unlock_tasks_trace() APIs. Therefore, mark the old APIs as deprecated. Signed-off-by: Paul E. McKenney Acked-by: Joe Perches Cc: Andy Whitcroft Cc: Dwaipayan Ray Cc: Lukas Bulwahn Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- scripts/checkpatch.pl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c0250244cf7a..362a8d1cd327 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -863,7 +863,9 @@ our %deprecated_apis = ( #These should be enough to drive away new IDR users "DEFINE_IDR" => "DEFINE_XARRAY", "idr_init" => "xa_init", - "idr_init_base" => "xa_init_flags" + "idr_init_base" => "xa_init_flags", + "rcu_read_lock_trace" => "rcu_read_lock_tasks_trace", + "rcu_read_unlock_trace" => "rcu_read_unlock_tasks_trace", ); #Create a search pattern for all these strings to speed up a loop below From a525ccd4d3e91ff123960e44a8892fb76c8217ea Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:11:03 -0800 Subject: [PATCH 08/25] srcu: Create an rcu_tasks_trace_expedite_current() function This commit creates an rcu_tasks_trace_expedite_current() function that expedites the current (and possibly the next) RCU Tasks Trace grace period. If the current RCU Tasks Trace grace period is already waiting, that wait will complete before the expediting takes effect. If there is no RCU Tasks Trace grace period in flight, this function might well create one. Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- include/linux/rcupdate_trace.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index f47ba9c07460..cee89e51e45c 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -184,6 +184,20 @@ static inline void rcu_barrier_tasks_trace(void) srcu_barrier(&rcu_tasks_trace_srcu_struct); } +/** + * rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period + * + * Cause the current Tasks Trace RCU grace period to become expedited. + * The grace period following the current one might also be expedited. + * If there is no current grace period, one might be created. If the + * current grace period is currently sleeping, that sleep will complete + * before expediting will take effect. + */ +static inline void rcu_tasks_trace_expedite_current(void) +{ + srcu_expedite_current(&rcu_tasks_trace_srcu_struct); +} + // Placeholders to enable stepwise transition. void __init rcu_tasks_trace_suppress_unused(void); From 760f05bc830d86667c07af6c80dc58d599061a67 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:11:04 -0800 Subject: [PATCH 09/25] rcutorture: Test rcu_tasks_trace_expedite_current() This commit adds a ->exp_current member to the tasks_tracing_ops structure to test the rcu_tasks_trace_expedite_current() function. [ paulmck: Apply kernel test robot feedback. ] Signed-off-by: Paul E. McKenney Cc: Andrii Nakryiko Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/rcutorture.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 78a6ebe77d35..d00b043823ae 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1178,6 +1178,7 @@ static struct rcu_torture_ops tasks_tracing_ops = { .deferred_free = rcu_tasks_tracing_torture_deferred_free, .sync = synchronize_rcu_tasks_trace, .exp_sync = synchronize_rcu_tasks_trace, + .exp_current = rcu_tasks_trace_expedite_current, .call = call_rcu_tasks_trace, .cb_barrier = rcu_barrier_tasks_trace, .cbflood_max = 50000, From e8a534a6718c3ac99823a61230139555672112a1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:53 -0800 Subject: [PATCH 10/25] rcutorture: Add context checks to rcu_torture_timer() This commit adds irq, NMI, and softirq context checks to the rcu_torture_timer() function. Just because you are paranoid does not mean that they are not out to get you... ;-) Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcutorture.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 07e51974b06b..d24b5c199e3b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2455,6 +2455,9 @@ static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); */ static void rcu_torture_timer(struct timer_list *unused) { + WARN_ON_ONCE(!in_serving_softirq()); + WARN_ON_ONCE(in_hardirq()); + WARN_ON_ONCE(in_nmi()); atomic_long_inc(&n_rcu_torture_timers); (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); From 3ce40539cc0055b2df49303979c5b6a4a8321be4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:54 -0800 Subject: [PATCH 11/25] torture: Parallelize kvm-series.sh guest-OS execution Currently, kvm-series.sh builds and runs serially, which makes for long execution times. This commit changes its logic to build all of the needed kernels serially, but then run the corresponding guest OSes concurrently in batches using the entire machine. On large systems, this results in order-of-magnitude speedups of the guest-OS execution portion of the runtime. Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- .../selftests/rcutorture/bin/kvm-series.sh | 174 +++++++++++++++--- 1 file changed, 153 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-series.sh b/tools/testing/selftests/rcutorture/bin/kvm-series.sh index 2ff905a1853b..6729687861f2 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-series.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-series.sh @@ -15,7 +15,7 @@ # This script is intended to replace kvm-check-branches.sh by providing # ease of use and faster execution. -T="`mktemp -d ${TMPDIR-/tmp}/kvm-series.sh.XXXXXX`" +T="`mktemp -d ${TMPDIR-/tmp}/kvm-series.sh.XXXXXX`"; export T trap 'rm -rf $T' 0 scriptname=$0 @@ -53,40 +53,62 @@ shift RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE PATH=${RCUTORTURE}/bin:$PATH; export PATH +RES="${RCUTORTURE}/res"; export RES . functions.sh ret=0 -nfail=0 +nbuildfail=0 +nrunfail=0 nsuccess=0 -faillist= +ncpus=0 +buildfaillist= +runfaillist= successlist= cursha1="`git rev-parse --abbrev-ref HEAD`" ds="`date +%Y.%m.%d-%H.%M.%S`-series" +DS="${RES}/${ds}"; export DS startdate="`date`" starttime="`get_starttime`" echo " --- " $scriptname $args | tee -a $T/log echo " --- Results directory: " $ds | tee -a $T/log +# Do all builds. Iterate through commits within a given scenario +# because builds normally go faster from one commit to the next within a +# given scenario. In contrast, switching scenarios on each rebuild will +# often force a full rebuild due to Kconfig differences, for example, +# turning preemption on and off. Defer actual runs in order to run +# lots of them concurrently on large systems. +touch $T/torunlist for config in ${config_list} do sha_n=0 for sha in ${sha1_list} do sha1=${sha_n}.${sha} # Enable "sort -k1nr" to list commits in order. + echo echo Starting ${config}/${sha1} at `date` | tee -a $T/log - git checkout "${sha}" - time tools/testing/selftests/rcutorture/bin/kvm.sh --configs "$config" --datestamp "$ds/${config}/${sha1}" --duration 1 "$@" + git checkout --detach "${sha}" + tools/testing/selftests/rcutorture/bin/kvm.sh --configs "$config" --datestamp "$ds/${config}/${sha1}" --duration 1 --build-only --trust-make "$@" curret=$? if test "${curret}" -ne 0 then - nfail=$((nfail+1)) - faillist="$faillist ${config}/${sha1}(${curret})" + nbuildfail=$((nbuildfail+1)) + buildfaillist="$buildfaillist ${config}/${sha1}(${curret})" else - nsuccess=$((nsuccess+1)) - successlist="$successlist ${config}/${sha1}" - # Successful run, so remove large files. - rm -f ${RCUTORTURE}/$ds/${config}/${sha1}/{vmlinux,bzImage,System.map,Module.symvers} + batchncpus="`grep -v "^# cpus=" "${DS}/${config}/${sha1}/batches" | awk '{ sum += $3 } END { print sum }'`" + echo run_one_qemu ${sha_n} ${config}/${sha1} ${batchncpus} >> $T/torunlist + if test "${ncpus}" -eq 0 + then + ncpus="`grep "^# cpus=" "${DS}/${config}/${sha1}/batches" | sed -e 's/^# cpus=//'`" + case "${ncpus}" in + ^[0-9]*$) + ;; + *) + ncpus=0 + ;; + esac + fi fi if test "${ret}" -eq 0 then @@ -95,22 +117,132 @@ do sha_n=$((sha_n+1)) done done + +# If the user did not specify the number of CPUs, use them all. +if test "${ncpus}" -eq 0 +then + ncpus="`identify_qemu_vcpus`" +fi + +cpusused=0 +touch $T/successlistfile +touch $T/faillistfile + +# do_run_one_qemu ds resultsdir qemu_curout +# +# Start the specified qemu run and record its success or failure. +do_run_one_qemu () { + local ret + local ds="$1" + local resultsdir="$2" + local qemu_curout="$3" + + tools/testing/selftests/rcutorture/bin/kvm-again.sh "${DS}/${resultsdir}" --link inplace-force > ${qemu_curout} 2>&1 + ret=$? + if test "${ret}" -eq 0 + then + echo ${resultsdir} >> $T/successlistfile + # Successful run, so remove large files. + rm -f ${DS}/${resultsdir}/{vmlinux,bzImage,System.map,Module.symvers} + else + echo "${resultsdir}(${ret})" >> $T/faillistfile + fi +} + +# cleanup_qemu_batch batchncpus +# +# Update success and failure lists, files, and counts at the end of +# a batch. +cleanup_qemu_batch () { + local batchncpus="$1" + + echo Waiting, cpusused=${cpusused}, ncpus=${ncpus} `date` | tee -a $T/log + wait + cpusused="${batchncpus}" + nsuccessbatch="`wc -l $T/successlistfile | awk '{ print $1 }'`" + nsuccess=$((nsuccess+nsuccessbatch)) + successlist="$successlist `cat $T/successlistfile`" + rm $T/successlistfile + touch $T/successlistfile + nfailbatch="`wc -l $T/faillistfile | awk '{ print $1 }'`" + nrunfail=$((nrunfail+nfailbatch)) + runfaillist="$runfaillist `cat $T/faillistfile`" + rm $T/faillistfile + touch $T/faillistfile +} + +# run_one_qemu sha_n config/sha1 batchncpus +# +# Launch into the background the sha_n-th qemu job whose results directory +# is config/sha1 and which uses batchncpus CPUs. Once we reach a job that +# would overflow the number of available CPUs, wait for the previous jobs +# to complete and record their results. +run_one_qemu () { + local sha_n="$1" + local config_sha1="$2" + local batchncpus="$3" + local qemu_curout + + cpusused=$((cpusused+batchncpus)) + if test "${cpusused}" -gt $ncpus + then + cleanup_qemu_batch "${batchncpus}" + fi + echo Starting ${config_sha1} using ${batchncpus} CPUs `date` + qemu_curout="${DS}/${config_sha1}/qemu-series" + do_run_one_qemu "$ds" "${config_sha1}" ${qemu_curout} & +} + +# Re-ordering the runs will mess up the affinity chosen at build time +# (among other things, over-using CPU 0), so suppress it. +TORTURE_NO_AFFINITY="no-affinity"; export TORTURE_NO_AFFINITY + +# Run the kernels (if any) that built correctly. +echo | tee -a $T/log # Put a blank line between build and run messages. +. $T/torunlist +cleanup_qemu_batch "${batchncpus}" + +# Get back to initial checkout/SHA-1. git checkout "${cursha1}" -echo ${nsuccess} SUCCESSES: | tee -a $T/log -echo ${successlist} | fmt | tee -a $T/log -echo | tee -a $T/log -echo ${nfail} FAILURES: | tee -a $T/log -echo ${faillist} | fmt | tee -a $T/log -if test -n "${faillist}" +# Throw away leading and trailing space characters for fmt. +successlist="`echo ${successlist} | sed -e 's/^ *//' -e 's/ *$//'`" +buildfaillist="`echo ${buildfaillist} | sed -e 's/^ *//' -e 's/ *$//'`" +runfaillist="`echo ${runfaillist} | sed -e 's/^ *//' -e 's/ *$//'`" + +# Print lists of successes, build failures, and run failures, if any. +if test "${nsuccess}" -gt 0 then echo | tee -a $T/log - echo Failures across commits: | tee -a $T/log - echo ${faillist} | tr ' ' '\012' | sed -e 's,^[^/]*/,,' -e 's/([0-9]*)//' | + echo ${nsuccess} SUCCESSES: | tee -a $T/log + echo ${successlist} | fmt | tee -a $T/log +fi +if test "${nbuildfail}" -gt 0 +then + echo | tee -a $T/log + echo ${nbuildfail} BUILD FAILURES: | tee -a $T/log + echo ${buildfaillist} | fmt | tee -a $T/log +fi +if test "${nrunfail}" -gt 0 +then + echo | tee -a $T/log + echo ${nrunfail} RUN FAILURES: | tee -a $T/log + echo ${runfaillist} | fmt | tee -a $T/log +fi + +# If there were build or runtime failures, map them to commits. +if test "${nbuildfail}" -gt 0 || test "${nrunfail}" -gt 0 +then + echo | tee -a $T/log + echo Build failures across commits: | tee -a $T/log + echo ${buildfaillist} | tr ' ' '\012' | sed -e 's,^[^/]*/,,' -e 's/([0-9]*)//' | sort | uniq -c | sort -k2n | tee -a $T/log fi + +# Print run summary. +echo | tee -a $T/log echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log -echo Summary: Successes: ${nsuccess} Failures: ${nfail} | tee -a $T/log -cp $T/log tools/testing/selftests/rcutorture/res/${ds} +echo Summary: Successes: ${nsuccess} " "Build Failures: ${nbuildfail} " "Runtime Failures: ${nrunfail}| tee -a $T/log +cp $T/log ${DS} exit "${ret}" From 672621773f7df8cda2ff5635edac4aa5339d097f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:55 -0800 Subject: [PATCH 12/25] torture: Make kvm-series.sh give build numbers and totals The kvm-series.sh script can easily be convinced to do on the order of 1,000 builds, so some sort of progress indicator would be helpful. This commit therefore updates the "Starting" output lines to read as in the following example, adding the ("2 of 4"): Starting TREE01/1.7e0ad1b49057 (2 of 4) at Sat Nov 8 10:08:21 PM PST 2025 Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/bin/kvm-series.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-series.sh b/tools/testing/selftests/rcutorture/bin/kvm-series.sh index 6729687861f2..a00d2e96f6cc 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-series.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-series.sh @@ -32,6 +32,7 @@ then echo "$0: Repetition ('*') not allowed in config list." exit 1 fi +config_list_len="`echo ${config_list} | wc -w | awk '{ print $1; }'`" commit_list="${2}" if test -z "${commit_list}" @@ -47,6 +48,7 @@ then exit 2 fi sha1_list=`cat $T/commits` +sha1_list_len="`echo ${sha1_list} | wc -w | awk '{ print $1; }'`" shift shift @@ -80,6 +82,8 @@ echo " --- Results directory: " $ds | tee -a $T/log # turning preemption on and off. Defer actual runs in order to run # lots of them concurrently on large systems. touch $T/torunlist +n2build="$((config_list_len*sha1_list_len))" +nbuilt=0 for config in ${config_list} do sha_n=0 @@ -87,7 +91,7 @@ do do sha1=${sha_n}.${sha} # Enable "sort -k1nr" to list commits in order. echo - echo Starting ${config}/${sha1} at `date` | tee -a $T/log + echo Starting ${config}/${sha1} "($((nbuilt+1)) of ${n2build})" at `date` | tee -a $T/log git checkout --detach "${sha}" tools/testing/selftests/rcutorture/bin/kvm.sh --configs "$config" --datestamp "$ds/${config}/${sha1}" --duration 1 --build-only --trust-make "$@" curret=$? @@ -115,6 +119,7 @@ do ret=${curret} fi sha_n=$((sha_n+1)) + nbuilt=$((nbuilt+1)) done done From 3d69b6beb8ba932911096138394b5cd3e21b3b92 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:56 -0800 Subject: [PATCH 13/25] torture: Make kvm-series.sh give run numbers and totals The kvm-series.sh script can easily be convinced to run on the order of 1,000 guest OSes, so some sort of progress indicator would be helpful. This commit therefore updates the "Starting" output lines to read as in the following example, adding the ("3 of 4"): Starting TREE02/1.7e0ad1b49057 using 8 CPUs (4 of 4) Sat Nov 8 10:51:06 PM PST 2025 Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/bin/kvm-series.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-series.sh b/tools/testing/selftests/rcutorture/bin/kvm-series.sh index a00d2e96f6cc..c4ee5f910931 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-series.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-series.sh @@ -132,6 +132,8 @@ fi cpusused=0 touch $T/successlistfile touch $T/faillistfile +n2run="`wc -l $T/torunlist | awk '{ print $1; }'`" +nrun=0 # do_run_one_qemu ds resultsdir qemu_curout # @@ -193,9 +195,10 @@ run_one_qemu () { then cleanup_qemu_batch "${batchncpus}" fi - echo Starting ${config_sha1} using ${batchncpus} CPUs `date` + echo Starting ${config_sha1} using ${batchncpus} CPUs "($((nrun+1)) of ${n2run})" `date` qemu_curout="${DS}/${config_sha1}/qemu-series" do_run_one_qemu "$ds" "${config_sha1}" ${qemu_curout} & + nrun="$((nrun+1))" } # Re-ordering the runs will mess up the affinity chosen at build time From dcd6067322ba6750995fbc5486d7c9ada88489ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:57 -0800 Subject: [PATCH 14/25] torture: Make config2csv.sh properly handle comments in .boot files As in strip the "#" and everything after it and *then* tokenize. Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/bin/config2csv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/config2csv.sh b/tools/testing/selftests/rcutorture/bin/config2csv.sh index 0cf55f1bf654..aeab4d6f11ad 100755 --- a/tools/testing/selftests/rcutorture/bin/config2csv.sh +++ b/tools/testing/selftests/rcutorture/bin/config2csv.sh @@ -42,7 +42,7 @@ do grep -v '^#' < $i | grep -v '^ *$' > $T/p if test -r $i.boot then - tr -s ' ' '\012' < $i.boot | grep -v '^#' >> $T/p + sed -e 's/#.*$//' < $i.boot | tr -s ' ' '\012' >> $T/p fi sed -e 's/^[^=]*$/&=?/' < $T/p | sed -e 's/^\([^=]*\)=\(.*\)$/\tp["\1:'"$i"'"] = "\2";\n\tc["\1"] = 1;/' >> $T/p.awk From c89474b9b2ab8ab2c0d2cddadbed781c0f5e8f0c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:13:58 -0800 Subject: [PATCH 15/25] torture: Include commit discription in testid.txt Currently, the testid.txt file in the top-level directory of the rcutorture results contains the output of "git rev-parse HEAD", which just gives the full SHA-1 of the current commit. This is followed by the output of "git status", which is further followed by the output of "git diff". This works, but is less than helpful to human readers scanning a list of commits. This commit therefore instead uses "git show --oneline --no-patch HEAD", which provides a short SHA-1, but also the names of any branches and the commit's title. Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/bin/mktestid.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/mktestid.sh b/tools/testing/selftests/rcutorture/bin/mktestid.sh index 16f9907a4dae..24f6261dab6a 100755 --- a/tools/testing/selftests/rcutorture/bin/mktestid.sh +++ b/tools/testing/selftests/rcutorture/bin/mktestid.sh @@ -18,7 +18,7 @@ fi echo Build directory: `pwd` > ${resdir}/testid.txt if test -d .git then - echo Current commit: `git rev-parse HEAD` >> ${resdir}/testid.txt + echo Current commit: `git show --oneline --no-patch HEAD` >> ${resdir}/testid.txt echo >> ${resdir}/testid.txt echo ' ---' Output of "'"git status"'": >> ${resdir}/testid.txt git status >> ${resdir}/testid.txt From a590a79d19046d3e0f2089f83046f3b87f880359 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Thu, 1 Jan 2026 11:34:16 -0500 Subject: [PATCH 16/25] rcutorture: Prevent concurrent kvm.sh runs on same source tree Add flock-based locking to kvm.sh to prevent multiple instances from running concurrently on the same source tree. This prevents build failures caused by one instance's "make clean" deleting generated files while another instance is building causing build failures. The lock file is placed in the rcutorture directory and added to .gitignore. Signed-off-by: Joel Fernandes Tested-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/.gitignore | 1 + tools/testing/selftests/rcutorture/bin/kvm.sh | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore index f6cbce77460b..b8fd42547a6e 100644 --- a/tools/testing/selftests/rcutorture/.gitignore +++ b/tools/testing/selftests/rcutorture/.gitignore @@ -3,3 +3,4 @@ initrd b[0-9]* res *.swp +.kvm.sh.lock diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index fff15821c44c..d1fbd092e22a 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -275,6 +275,23 @@ do shift done +# Prevent concurrent kvm.sh runs on the same source tree. The flock +# is automatically released when the script exits, even if killed. +TORTURE_LOCK="$RCUTORTURE/.kvm.sh.lock" +if test -z "$dryrun" +then + # Create a file descriptor and flock it, so that when kvm.sh (and its + # children) exit, the flock is released by the kernel automatically. + exec 9>"$TORTURE_LOCK" + if ! flock -n 9 + then + echo "ERROR: Another kvm.sh instance is already running on this tree." + echo " Lock file: $TORTURE_LOCK" + echo " To run kvm.sh, kill all existing kvm.sh runs first." + exit 1 + fi +fi + if test -n "$dryrun" || test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh then : From cf587c6ff2d09866eb53a4620bc1aa561fb0c000 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Thu, 1 Jan 2026 11:34:17 -0500 Subject: [PATCH 17/25] rcutorture: Add --kill-previous option to terminate previous kvm.sh runs When kvm.sh is killed, its child processes (make, gcc, qemu, etc.) may continue running. This prevents new kvm.sh instances from starting even though the parent is gone. Add a --kill-previous option that uses fuser(1) to terminate all processes holding the flock file before attempting to acquire it. This provides a clean way to recover from stale/zombie kvm.sh runs which sometimes may have lots of qemu and compiler processes still disturbing. Signed-off-by: Joel Fernandes Tested-by: Paul E. McKenney Signed-off-by: Boqun Feng --- tools/testing/selftests/rcutorture/bin/kvm.sh | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index d1fbd092e22a..65b04b832733 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -80,6 +80,7 @@ usage () { echo " --kasan" echo " --kconfig Kconfig-options" echo " --kcsan" + echo " --kill-previous" echo " --kmake-arg kernel-make-arguments" echo " --mac nn:nn:nn:nn:nn:nn" echo " --memory megabytes|nnnG" @@ -206,6 +207,9 @@ do --kcsan) TORTURE_KCONFIG_KCSAN_ARG="$debuginfo CONFIG_KCSAN=y CONFIG_KCSAN_STRICT=y CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG ;; + --kill-previous) + TORTURE_KILL_PREVIOUS=1 + ;; --kmake-arg|--kmake-args) checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' TORTURE_KMAKE_ARG="`echo "$TORTURE_KMAKE_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`" @@ -278,6 +282,25 @@ done # Prevent concurrent kvm.sh runs on the same source tree. The flock # is automatically released when the script exits, even if killed. TORTURE_LOCK="$RCUTORTURE/.kvm.sh.lock" + +# Terminate any processes holding the lock file, if requested. +if test -n "$TORTURE_KILL_PREVIOUS" +then + if test -e "$TORTURE_LOCK" + then + echo "Killing processes holding $TORTURE_LOCK..." + if fuser -k "$TORTURE_LOCK" >/dev/null 2>&1 + then + sleep 2 + echo "Previous kvm.sh processes killed." + else + echo "No processes were holding the lock." + fi + else + echo "No lock file exists, nothing to kill." + fi +fi + if test -z "$dryrun" then # Create a file descriptor and flock it, so that when kvm.sh (and its @@ -287,7 +310,7 @@ then then echo "ERROR: Another kvm.sh instance is already running on this tree." echo " Lock file: $TORTURE_LOCK" - echo " To run kvm.sh, kill all existing kvm.sh runs first." + echo " To run kvm.sh, kill all existing kvm.sh runs first (--kill-previous)." exit 1 fi fi From 255019537cfd63d6adc16a55bcbfd79530d5937e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:16:15 -0800 Subject: [PATCH 18/25] rcu: Make expedited RCU CPU stall warnings detect stall-end races If an expedited RCU CPU stall ends just at the stall-warning timeout, the current code will print an expedited stall-warning message, but one that doesn't identify any CPUs or tasks causing the stall. This is most likely to happen for short-timeout stalls, for example, the 20-millisecond timeouts that are sometimes used for small embedded devices. Needless to say, these semi-empty stall-warning messages can be rather confusing. One option would be to suppress the stall-warning message entirely in this case, but the near-miss information can be quite valuable. Detect this race condition and emits a "INFO: Expedited stall ended before state dump start" message to clarify matters. [boqun: Apply feedback from Borislav] Reported-by: Borislav Petkov Signed-off-by: Paul E. McKenney Acked-by: Borislav Petkov (AMD) Signed-off-by: Boqun Feng --- kernel/rcu/tree_exp.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 96c49c56fc14..82cada459e5d 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -589,7 +589,12 @@ static void synchronize_rcu_expedited_stall(unsigned long jiffies_start, unsigne pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask), ".T"[!!data_race(rnp_root->exp_tasks)]); - if (ndetected) { + if (!ndetected) { + // This is invoked from the grace-period worker, so + // a new grace period cannot have started. And if this + // worker were stalled, we would not get here. ;-) + pr_err("INFO: Expedited stall ended before state dump start\n"); + } else { pr_err("blocking rcu_node structures (internal RCU debug):"); rcu_for_each_node_breadth_first(rnp) { if (rnp == rnp_root) From 37d9b475077b5096d41ebdc416a9019bd4fcfdb9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Dec 2025 11:16:16 -0800 Subject: [PATCH 19/25] rcutorture: Correctly compute probability to invoke ->exp_current() Lack of parentheses causes the ->exp_current() function, for example, srcu_expedite_current(), to be called only once in four billion times instead of the intended once in 256 times. This commit therefore adds the needed parentheses. Reported-by: Chris Mason Reported-by: Joel Fernandes Fixes: 950063c6e897 ("rcutorture: Test srcu_expedite_current()") Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcutorture.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 07e51974b06b..83934402a287 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1750,7 +1750,7 @@ rcu_torture_writer(void *arg) ulo[i] = cur_ops->get_comp_state(); gp_snap = cur_ops->start_gp_poll(); rcu_torture_writer_state = RTWS_POLL_WAIT; - if (cur_ops->exp_current && !torture_random(&rand) % 0xff) + if (cur_ops->exp_current && !(torture_random(&rand) & 0xff)) cur_ops->exp_current(); while (!cur_ops->poll_gp_state(gp_snap)) { gp_snap1 = cur_ops->get_gp_state(); @@ -1772,7 +1772,7 @@ rcu_torture_writer(void *arg) cur_ops->get_comp_state_full(&rgo[i]); cur_ops->start_gp_poll_full(&gp_snap_full); rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; - if (cur_ops->exp_current && !torture_random(&rand) % 0xff) + if (cur_ops->exp_current && !(torture_random(&rand) & 0xff)) cur_ops->exp_current(); while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { cur_ops->get_gp_state_full(&gp_snap1_full); From d41e37f26b3157b3f1d10223863519a943aa239b Mon Sep 17 00:00:00 2001 From: Yao Kai Date: Thu, 1 Jan 2026 11:34:10 -0500 Subject: [PATCH 20/25] rcu: Fix rcu_read_unlock() deadloop due to softirq Commit 5f5fa7ea89dc ("rcu: Don't use negative nesting depth in __rcu_read_unlock()") removes the recursion-protection code from __rcu_read_unlock(). Therefore, we could invoke the deadloop in raise_softirq_irqoff() with ftrace enabled as follows: WARNING: CPU: 0 PID: 0 at kernel/trace/trace.c:3021 __ftrace_trace_stack.constprop.0+0x172/0x180 Modules linked in: my_irq_work(O) CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Tainted: G O 6.18.0-rc7-dirty #23 PREEMPT(full) Tainted: [O]=OOT_MODULE Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 RIP: 0010:__ftrace_trace_stack.constprop.0+0x172/0x180 RSP: 0018:ffffc900000034a8 EFLAGS: 00010002 RAX: 0000000000000000 RBX: 0000000000000004 RCX: 0000000000000000 RDX: 0000000000000003 RSI: ffffffff826d7b87 RDI: ffffffff826e9329 RBP: 0000000000090009 R08: 0000000000000005 R09: ffffffff82afbc4c R10: 0000000000000008 R11: 0000000000011d7a R12: 0000000000000000 R13: ffff888003874100 R14: 0000000000000003 R15: ffff8880038c1054 FS: 0000000000000000(0000) GS:ffff8880fa8ea000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055b31fa7f540 CR3: 00000000078f4005 CR4: 0000000000770ef0 PKRU: 55555554 Call Trace: trace_buffer_unlock_commit_regs+0x6d/0x220 trace_event_buffer_commit+0x5c/0x260 trace_event_raw_event_softirq+0x47/0x80 raise_softirq_irqoff+0x6e/0xa0 rcu_read_unlock_special+0xb1/0x160 unwind_next_frame+0x203/0x9b0 __unwind_start+0x15d/0x1c0 arch_stack_walk+0x62/0xf0 stack_trace_save+0x48/0x70 __ftrace_trace_stack.constprop.0+0x144/0x180 trace_buffer_unlock_commit_regs+0x6d/0x220 trace_event_buffer_commit+0x5c/0x260 trace_event_raw_event_softirq+0x47/0x80 raise_softirq_irqoff+0x6e/0xa0 rcu_read_unlock_special+0xb1/0x160 unwind_next_frame+0x203/0x9b0 __unwind_start+0x15d/0x1c0 arch_stack_walk+0x62/0xf0 stack_trace_save+0x48/0x70 __ftrace_trace_stack.constprop.0+0x144/0x180 trace_buffer_unlock_commit_regs+0x6d/0x220 trace_event_buffer_commit+0x5c/0x260 trace_event_raw_event_softirq+0x47/0x80 raise_softirq_irqoff+0x6e/0xa0 rcu_read_unlock_special+0xb1/0x160 unwind_next_frame+0x203/0x9b0 __unwind_start+0x15d/0x1c0 arch_stack_walk+0x62/0xf0 stack_trace_save+0x48/0x70 __ftrace_trace_stack.constprop.0+0x144/0x180 trace_buffer_unlock_commit_regs+0x6d/0x220 trace_event_buffer_commit+0x5c/0x260 trace_event_raw_event_softirq+0x47/0x80 raise_softirq_irqoff+0x6e/0xa0 rcu_read_unlock_special+0xb1/0x160 __is_insn_slot_addr+0x54/0x70 kernel_text_address+0x48/0xc0 __kernel_text_address+0xd/0x40 unwind_get_return_address+0x1e/0x40 arch_stack_walk+0x9c/0xf0 stack_trace_save+0x48/0x70 __ftrace_trace_stack.constprop.0+0x144/0x180 trace_buffer_unlock_commit_regs+0x6d/0x220 trace_event_buffer_commit+0x5c/0x260 trace_event_raw_event_softirq+0x47/0x80 __raise_softirq_irqoff+0x61/0x80 __flush_smp_call_function_queue+0x115/0x420 __sysvec_call_function_single+0x17/0xb0 sysvec_call_function_single+0x8c/0xc0 Commit b41642c87716 ("rcu: Fix rcu_read_unlock() deadloop due to IRQ work") fixed the infinite loop in rcu_read_unlock_special() for IRQ work by setting a flag before calling irq_work_queue_on(). We fix this issue by setting the same flag before calling raise_softirq_irqoff() and rename the flag to defer_qs_pending for more common. Fixes: 5f5fa7ea89dc ("rcu: Don't use negative nesting depth in __rcu_read_unlock()") Reported-by: Tengda Wu Signed-off-by: Yao Kai Reviewed-by: Joel Fernandes Tested-by: Paul E. McKenney Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b8bbe7960cda..2265b9c2906e 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -203,7 +203,7 @@ struct rcu_data { /* during and after the last grace */ /* period it is aware of. */ struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ - int defer_qs_iw_pending; /* Scheduler attention pending? */ + int defer_qs_pending; /* irqwork or softirq pending? */ struct work_struct strict_work; /* Schedule readers for strict GPs. */ /* 2) batch handling */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index dbe2d02be824..95ad967adcf3 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -487,8 +487,8 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) union rcu_special special; rdp = this_cpu_ptr(&rcu_data); - if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING) - rdp->defer_qs_iw_pending = DEFER_QS_IDLE; + if (rdp->defer_qs_pending == DEFER_QS_PENDING) + rdp->defer_qs_pending = DEFER_QS_IDLE; /* * If RCU core is waiting for this CPU to exit its critical section, @@ -645,7 +645,7 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) * 5. Deferred QS reporting does not happen. */ if (rcu_preempt_depth() > 0) - WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE); + WRITE_ONCE(rdp->defer_qs_pending, DEFER_QS_IDLE); } /* @@ -747,7 +747,10 @@ static void rcu_read_unlock_special(struct task_struct *t) // Using softirq, safe to awaken, and either the // wakeup is free or there is either an expedited // GP in flight or a potential need to deboost. - raise_softirq_irqoff(RCU_SOFTIRQ); + if (rdp->defer_qs_pending != DEFER_QS_PENDING) { + rdp->defer_qs_pending = DEFER_QS_PENDING; + raise_softirq_irqoff(RCU_SOFTIRQ); + } } else { // Enabling BH or preempt does reschedule, so... // Also if no expediting and no possible deboosting, @@ -755,11 +758,11 @@ static void rcu_read_unlock_special(struct task_struct *t) // tick enabled. set_need_resched_current(); if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && - needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING && + needs_exp && rdp->defer_qs_pending != DEFER_QS_PENDING && cpu_online(rdp->cpu)) { // Get scheduler to re-evaluate and call hooks. // If !IRQ_WORK, FQS scan will eventually IPI. - rdp->defer_qs_iw_pending = DEFER_QS_PENDING; + rdp->defer_qs_pending = DEFER_QS_PENDING; irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); } } From cee2557ae3b19e0cdfa09695a4d6ba420cc1fd41 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Thu, 1 Jan 2026 11:34:11 -0500 Subject: [PATCH 21/25] srcu: Use suitable gfp_flags for the init_srcu_struct_nodes() For use the init_srcu_struct*() to initialized srcu structure, the srcu structure's->srcu_sup and sda use GFP_KERNEL flags to allocate memory. similarly, if set SRCU_SIZING_INIT, the srcu_sup's->node can still use GFP_KERNEL flags to allocate memory, not need to use GFP_ATOMIC flags all the time. Signed-off-by: Zqiang Reviewed-by: Joel Fernandes Tested-by: Paul E. McKenney Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/srcutree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index ea3f128de06f..c469c708fdd6 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -262,7 +262,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL; ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns(); if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) { - if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) + if (!init_srcu_struct_nodes(ssp, is_static ? GFP_ATOMIC : GFP_KERNEL)) goto err_free_sda; WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG); } From bc3705e20988778791a4a5e9e2700fbc22cc942d Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Thu, 1 Jan 2026 11:34:15 -0500 Subject: [PATCH 22/25] rcu: Reduce synchronize_rcu() latency by reporting GP kthread's CPU QS early The RCU grace period mechanism uses a two-phase FQS (Force Quiescent State) design where the first FQS saves dyntick-idle snapshots and the second FQS compares them. This results in long and unnecessary latency for synchronize_rcu() on idle systems (two FQS waits of ~3ms each with 1000HZ) whenever one FQS wait sufficed. Some investigations showed that the GP kthread's CPU is the holdout CPU a lot of times after the first FQS as - it cannot be detected as "idle" because it's actively running the FQS scan in the GP kthread. Therefore, at the end of rcu_gp_init(), immediately report a quiescent state for the GP kthread's CPU using rcu_qs() + rcu_report_qs_rdp(). The GP kthread cannot be in an RCU read-side critical section while running GP initialization, so this is safe and results in significant latency improvements. The following tests were performed: (1) synchronize_rcu() benchmarking 100 synchronize_rcu() calls with 32 CPUs, 10 runs each (default fqs jiffies settings): Baseline (without fix): | Run | Mean | Min | Max | |-----|-----------|----------|-----------| | 1 | 10.088 ms | 9.989 ms | 18.848 ms | | 2 | 10.064 ms | 9.982 ms | 16.470 ms | | 3 | 10.051 ms | 9.988 ms | 15.113 ms | | 4 | 10.125 ms | 9.929 ms | 22.411 ms | | 5 | 8.695 ms | 5.996 ms | 15.471 ms | | 6 | 10.157 ms | 9.977 ms | 25.723 ms | | 7 | 10.102 ms | 9.990 ms | 20.224 ms | | 8 | 8.050 ms | 5.985 ms | 10.007 ms | | 9 | 10.059 ms | 9.978 ms | 15.934 ms | | 10 | 10.077 ms | 9.984 ms | 17.703 ms | With fix: | Run | Mean | Min | Max | |-----|----------|----------|-----------| | 1 | 6.027 ms | 5.915 ms | 8.589 ms | | 2 | 6.032 ms | 5.984 ms | 9.241 ms | | 3 | 6.010 ms | 5.986 ms | 7.004 ms | | 4 | 6.076 ms | 5.993 ms | 10.001 ms | | 5 | 6.084 ms | 5.893 ms | 10.250 ms | | 6 | 6.034 ms | 5.908 ms | 9.456 ms | | 7 | 6.051 ms | 5.993 ms | 10.000 ms | | 8 | 6.057 ms | 5.941 ms | 10.001 ms | | 9 | 6.016 ms | 5.927 ms | 7.540 ms | | 10 | 6.036 ms | 5.993 ms | 9.579 ms | Summary: - Mean latency: 9.75 ms -> 6.04 ms (38% improvement) - Max latency: 25.72 ms -> 10.25 ms (60% improvement) (2) Bridge setup/teardown latency (Uladzislau Rezki) x86_64 with 64 CPUs, 100 iterations of bridge add/configure/delete: real time 1 - default: 24.221s 2 - this patch: 20.754s (14% faster) 3 - this patch + wake_from_gp: 15.895s (34% faster) 4 - wake_from_gp only: 18.947s (22% faster) Per-synchronize_rcu() latency (in usec): 1 2 3 4 median: 37249.5 31540.5 15765 22480 min: 7881 7918 9803 7857 max: 63651 55639 31861 32040 This patch combined with rcu_normal_wake_from_gp reduces bridge setup/teardown time from 24 seconds to 16 seconds. (3) CPU overhead verification (Uladzislau Rezki) System CPU time across 5 runs showed no measurable increase: default: 1.698s - 1.937s this patch: 1.667s - 1.930s Conclusion: variations are within noise, no CPU overhead regression. (4) rcutorture Tested TREE and SRCU configurations - no regressions. Reviewed-by: "Paul E. McKenney" Tested-by: Uladzislau Rezki (Sony) Tested-by: Paul E. McKenney Tested-by: Samir M Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 293bbd9ac3f4..2c1c9759e278 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -160,6 +160,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, unsigned long gps, unsigned long flags); static void invoke_rcu_core(void); static void rcu_report_exp_rdp(struct rcu_data *rdp); +static void rcu_report_qs_rdp(struct rcu_data *rdp); static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); static bool rcu_rdp_cpu_online(struct rcu_data *rdp); @@ -1983,6 +1984,17 @@ static noinline_for_stack bool rcu_gp_init(void) if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) on_each_cpu(rcu_strict_gp_boundary, NULL, 0); + /* + * Immediately report QS for the GP kthread's CPU. The GP kthread + * cannot be in an RCU read-side critical section while running + * the FQS scan. This eliminates the need for a second FQS wait + * when all CPUs are idle. + */ + preempt_disable(); + rcu_qs(); + rcu_report_qs_rdp(this_cpu_ptr(&rcu_data)); + preempt_enable(); + return true; } From d92eca60fea944b2e9272603308a0fde8b6ae447 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Fri, 23 Jan 2026 09:30:12 -0500 Subject: [PATCH 23/25] rcu/nocb: Remove unnecessary WakeOvfIsDeferred wake path The WakeOvfIsDeferred code path in __call_rcu_nocb_wake() attempts to wake rcuog when the callback count exceeds qhimark and callbacks aren't done with their GP (newly queued or awaiting GP). However, a lot of testing proves this wake is always redundant or useless. In the flooding case, rcuog is always waiting for a GP to finish. So waking up the rcuog thread is pointless. The timer wakeup adds overhead, rcuog simply wakes up and goes back to sleep achieving nothing. This path also adds a full memory barrier, and additional timer expiry modifications unnecessarily. The root cause is that WakeOvfIsDeferred fires when !rcu_segcblist_ready_cbs() (GP not complete), but waking rcuog cannot accelerate GP completion. This commit therefore removes this path. Tested with rcutorture scenarios: TREE01, TREE05, TREE08 (all NOCB configurations) - all pass. Also stress tested using a kernel module that floods call_rcu() to trigger the overload conditions and made the observations confirming the findings. Reviewed-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 3 +-- kernel/rcu/tree_nocb.h | 49 ++++++++++++++---------------------------- 3 files changed, 18 insertions(+), 36 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 293bbd9ac3f4..2921ffb19939 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3769,7 +3769,7 @@ static void rcu_barrier_entrain(struct rcu_data *rdp) } rcu_nocb_unlock(rdp); if (wake_nocb) - wake_nocb_gp(rdp, false); + wake_nocb_gp(rdp); smp_store_release(&rdp->barrier_seq_snap, gseq); } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b8bbe7960cda..9b956293a731 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -301,7 +301,6 @@ struct rcu_data { #define RCU_NOCB_WAKE_BYPASS 1 #define RCU_NOCB_WAKE_LAZY 2 #define RCU_NOCB_WAKE 3 -#define RCU_NOCB_WAKE_FORCE 4 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) /* For jiffies_till_first_fqs and */ @@ -500,7 +499,7 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); static void rcu_init_one_nocb(struct rcu_node *rnp); -static bool wake_nocb_gp(struct rcu_data *rdp, bool force); +static bool wake_nocb_gp(struct rcu_data *rdp); static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, unsigned long j, bool lazy); static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index e6cd56603cad..f525e4f7985b 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -192,7 +192,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) static bool __wake_nocb_gp(struct rcu_data *rdp_gp, struct rcu_data *rdp, - bool force, unsigned long flags) + unsigned long flags) __releases(rdp_gp->nocb_gp_lock) { bool needwake = false; @@ -209,7 +209,7 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, timer_delete(&rdp_gp->nocb_timer); } - if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { + if (READ_ONCE(rdp_gp->nocb_gp_sleep)) { WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); needwake = true; } @@ -225,13 +225,13 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, /* * Kick the GP kthread for this NOCB group. */ -static bool wake_nocb_gp(struct rcu_data *rdp, bool force) +static bool wake_nocb_gp(struct rcu_data *rdp) { unsigned long flags; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); - return __wake_nocb_gp(rdp_gp, rdp, force, flags); + return __wake_nocb_gp(rdp_gp, rdp, flags); } #ifdef CONFIG_RCU_LAZY @@ -518,10 +518,8 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, } /* - * Awaken the no-CBs grace-period kthread if needed, either due to it - * legitimately being asleep or due to overload conditions. - * - * If warranted, also wake up the kthread servicing this CPUs queues. + * Awaken the no-CBs grace-period kthread if needed due to it legitimately + * being asleep. */ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, unsigned long flags) @@ -533,7 +531,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, long lazy_len; long len; struct task_struct *t; - struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; // If we are being polled or there is no kthread, just leave. t = READ_ONCE(rdp->nocb_gp_kthread); @@ -549,22 +546,22 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, lazy_len = READ_ONCE(rdp->lazy_len); if (was_alldone) { rdp->qlen_last_fqs_check = len; + rcu_nocb_unlock(rdp); // Only lazy CBs in bypass list if (lazy_len && bypass_len == lazy_len) { - rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY, TPS("WakeLazy")); } else if (!irqs_disabled_flags(flags)) { /* ... if queue was empty ... */ - rcu_nocb_unlock(rdp); - wake_nocb_gp(rdp, false); + wake_nocb_gp(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { - rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, TPS("WakeEmptyIsDeferred")); } + + return; } else if (len > rdp->qlen_last_fqs_check + qhimark) { /* ... or if many callbacks queued. */ rdp->qlen_last_fqs_check = len; @@ -575,21 +572,10 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rcu_advance_cbs_nowake(rdp->mynode, rdp); rdp->nocb_gp_adv_time = j; } - smp_mb(); /* Enqueue before timer_pending(). */ - if ((rdp->nocb_cb_sleep || - !rcu_segcblist_ready_cbs(&rdp->cblist)) && - !timer_pending(&rdp_gp->nocb_timer)) { - rcu_nocb_unlock(rdp); - wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, - TPS("WakeOvfIsDeferred")); - } else { - rcu_nocb_unlock(rdp); - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); - } - } else { - rcu_nocb_unlock(rdp); - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } + + rcu_nocb_unlock(rdp); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, @@ -966,7 +952,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp, unsigned long flags) __releases(rdp_gp->nocb_gp_lock) { - int ndw; int ret; if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) { @@ -974,8 +959,7 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp, return false; } - ndw = rdp_gp->nocb_defer_wakeup; - ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); + ret = __wake_nocb_gp(rdp_gp, rdp, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); return ret; @@ -991,7 +975,6 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t) trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags); - smp_mb__after_spinlock(); /* Timer expire before wakeup. */ do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags); } @@ -1272,7 +1255,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) } rcu_nocb_try_flush_bypass(rdp, jiffies); rcu_nocb_unlock_irqrestore(rdp, flags); - wake_nocb_gp(rdp, false); + wake_nocb_gp(rdp); sc->nr_to_scan -= _count; count += _count; if (sc->nr_to_scan <= 0) @@ -1657,7 +1640,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) { } -static bool wake_nocb_gp(struct rcu_data *rdp, bool force) +static bool wake_nocb_gp(struct rcu_data *rdp) { return false; } From b11c1efa7ffedbb3e880d31370d2cb37394ef9f4 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Fri, 23 Jan 2026 09:30:22 -0500 Subject: [PATCH 24/25] rcu/nocb: Remove dead callback overload handling During callback overload (exceeding qhimark), the NOCB code attempts opportunistic advancement via rcu_advance_cbs_nowake(). Analysis shows this code path is practically unreachable and serves no useful purpose. Testing with 300,000 callback floods showed: - 30 overload conditions triggered - 0 advancements actually occurred While a theoretical window exists where this code could execute (e.g., vCPU preemption between gp_seq update and rcu_nocb_gp_cleanup()), even if it did, the advancement would be redundant. The rcuog kthread must still run to wake the rcuoc callback thread - we would just be duplicating work that rcuog will perform when it finally gets to run. Since this path provides no meaningful benefit and extensive testing confirms it is never useful, remove it entirely. Reviewed-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/tree_nocb.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index f525e4f7985b..64a8ff350f92 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -526,8 +526,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, __releases(rdp->nocb_lock) { long bypass_len; - unsigned long cur_gp_seq; - unsigned long j; long lazy_len; long len; struct task_struct *t; @@ -562,16 +560,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, } return; - } else if (len > rdp->qlen_last_fqs_check + qhimark) { - /* ... or if many callbacks queued. */ - rdp->qlen_last_fqs_check = len; - j = jiffies; - if (j != rdp->nocb_gp_adv_time && - rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && - rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { - rcu_advance_cbs_nowake(rdp->mynode, rdp); - rdp->nocb_gp_adv_time = j; - } } rcu_nocb_unlock(rdp); From cc74050f13e5f15de7835b96d633484dd6776f53 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Fri, 23 Jan 2026 09:30:31 -0500 Subject: [PATCH 25/25] rcu/nocb: Extract nocb_defer_wakeup_cancel() helper The pattern of checking nocb_defer_wakeup and deleting the timer is duplicated in __wake_nocb_gp() and nocb_gp_wait(). Extract this into a common helper function nocb_defer_wakeup_cancel(). This removes code duplication and makes it easier to maintain. Reviewed-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Joel Fernandes Signed-off-by: Boqun Feng --- kernel/rcu/tree_nocb.h | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 64a8ff350f92..b3337c7231cc 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -190,6 +190,15 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) init_swait_queue_head(&rnp->nocb_gp_wq[1]); } +/* Clear any pending deferred wakeup timer (nocb_gp_lock must be held). */ +static void nocb_defer_wakeup_cancel(struct rcu_data *rdp_gp) +{ + if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { + WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); + timer_delete(&rdp_gp->nocb_timer); + } +} + static bool __wake_nocb_gp(struct rcu_data *rdp_gp, struct rcu_data *rdp, unsigned long flags) @@ -204,10 +213,7 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, return false; } - if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { - WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); - timer_delete(&rdp_gp->nocb_timer); - } + nocb_defer_wakeup_cancel(rdp_gp); if (READ_ONCE(rdp_gp->nocb_gp_sleep)) { WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); @@ -788,10 +794,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) if (rdp_toggling) my_rdp->nocb_toggling_rdp = NULL; - if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { - WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); - timer_delete(&my_rdp->nocb_timer); - } + nocb_defer_wakeup_cancel(my_rdp); WRITE_ONCE(my_rdp->nocb_gp_sleep, true); raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); } else {