sched/isolation: Convert housekeeping cpumasks to rcu pointers

HK_TYPE_DOMAIN's cpumask will soon be made modifiable by cpuset.
A synchronization mechanism is then needed to synchronize the updates
with the housekeeping cpumask readers.

Turn the housekeeping cpumasks into RCU pointers. Once a housekeeping
cpumask will be modified, the update side will wait for an RCU grace
period and propagate the change to interested subsystem when deemed
necessary.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Marco Crivellari <marco.crivellari@suse.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
This commit is contained in:
Frederic Weisbecker 2025-05-27 15:36:03 +02:00
parent a7e546354d
commit 27c3a5967f
2 changed files with 38 additions and 23 deletions

View file

@ -21,7 +21,7 @@ DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
EXPORT_SYMBOL_GPL(housekeeping_overridden);
struct housekeeping {
cpumask_var_t cpumasks[HK_TYPE_MAX];
struct cpumask __rcu *cpumasks[HK_TYPE_MAX];
unsigned long flags;
};
@ -33,17 +33,28 @@ bool housekeeping_enabled(enum hk_type type)
}
EXPORT_SYMBOL_GPL(housekeeping_enabled);
const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden)) {
if (housekeeping.flags & BIT(type)) {
return rcu_dereference_check(housekeeping.cpumasks[type], 1);
}
}
return cpu_possible_mask;
}
EXPORT_SYMBOL_GPL(housekeeping_cpumask);
int housekeeping_any_cpu(enum hk_type type)
{
int cpu;
if (static_branch_unlikely(&housekeeping_overridden)) {
if (housekeeping.flags & BIT(type)) {
cpu = sched_numa_find_closest(housekeeping.cpumasks[type], smp_processor_id());
cpu = sched_numa_find_closest(housekeeping_cpumask(type), smp_processor_id());
if (cpu < nr_cpu_ids)
return cpu;
cpu = cpumask_any_and_distribute(housekeeping.cpumasks[type], cpu_online_mask);
cpu = cpumask_any_and_distribute(housekeeping_cpumask(type), cpu_online_mask);
if (likely(cpu < nr_cpu_ids))
return cpu;
/*
@ -59,28 +70,18 @@ int housekeeping_any_cpu(enum hk_type type)
}
EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
return housekeeping.cpumasks[type];
return cpu_possible_mask;
}
EXPORT_SYMBOL_GPL(housekeeping_cpumask);
void housekeeping_affine(struct task_struct *t, enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
set_cpus_allowed_ptr(t, housekeeping.cpumasks[type]);
set_cpus_allowed_ptr(t, housekeeping_cpumask(type));
}
EXPORT_SYMBOL_GPL(housekeeping_affine);
bool housekeeping_test_cpu(int cpu, enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]);
if (static_branch_unlikely(&housekeeping_overridden) && housekeeping.flags & BIT(type))
return cpumask_test_cpu(cpu, housekeeping_cpumask(type));
return true;
}
EXPORT_SYMBOL_GPL(housekeeping_test_cpu);
@ -96,20 +97,33 @@ void __init housekeeping_init(void)
if (housekeeping.flags & HK_FLAG_KERNEL_NOISE)
sched_tick_offload_init();
/*
* Realloc with a proper allocator so that any cpumask update
* can indifferently free the old version with kfree().
*/
for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) {
struct cpumask *omask, *nmask = kmalloc(cpumask_size(), GFP_KERNEL);
if (WARN_ON_ONCE(!nmask))
return;
omask = rcu_dereference(housekeeping.cpumasks[type]);
/* We need at least one CPU to handle housekeeping work */
WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type]));
WARN_ON_ONCE(cpumask_empty(omask));
cpumask_copy(nmask, omask);
RCU_INIT_POINTER(housekeeping.cpumasks[type], nmask);
memblock_free(omask, cpumask_size());
}
}
static void __init housekeeping_setup_type(enum hk_type type,
cpumask_var_t housekeeping_staging)
{
struct cpumask *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);
alloc_bootmem_cpumask_var(&housekeeping.cpumasks[type]);
cpumask_copy(housekeeping.cpumasks[type],
housekeeping_staging);
cpumask_copy(mask, housekeeping_staging);
RCU_INIT_POINTER(housekeeping.cpumasks[type], mask);
}
static int __init housekeeping_setup(char *str, unsigned long flags)
@ -162,7 +176,7 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
for_each_set_bit(type, &iter_flags, HK_TYPE_MAX) {
if (!cpumask_equal(housekeeping_staging,
housekeeping.cpumasks[type])) {
housekeeping_cpumask(type))) {
pr_warn("Housekeeping: nohz_full= must match isolcpus=\n");
goto free_housekeeping_staging;
}
@ -183,7 +197,7 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
iter_flags = flags & (HK_FLAG_KERNEL_NOISE | HK_FLAG_DOMAIN);
first_cpu = (type == HK_TYPE_MAX || !iter_flags) ? 0 :
cpumask_first_and_and(cpu_present_mask,
housekeeping_staging, housekeeping.cpumasks[type]);
housekeeping_staging, housekeeping_cpumask(type));
if (first_cpu >= min(nr_cpu_ids, setup_max_cpus)) {
pr_warn("Housekeeping: must include one present CPU "
"neither in nohz_full= nor in isolcpus=domain, "

View file

@ -42,6 +42,7 @@
#include <linux/ktime_api.h>
#include <linux/lockdep_api.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/module.h>