mirror of
https://github.com/torvalds/linux.git
synced 2026-03-14 02:06:15 +01:00
sched: Add cpus_share_resources API
Add cpus_share_resources() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. On a machine with clusters cpus_share_resources() will test whether two cpus are within the same cluster. On a non-cluster machine it will behaves the same as cpus_share_cache(). So we use "resources" here for cache resources. Signed-off-by: Barry Song <song.bao.hua@hisilicon.com> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-and-reviewed-by: Chen Yu <yu.c.chen@intel.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lkml.kernel.org/r/20231019033323.54147-2-yangyicong@huawei.com
This commit is contained in:
parent
5ebde09d91
commit
b95303e0ae
5 changed files with 40 additions and 1 deletions
|
|
@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
|
|||
*/
|
||||
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
|
||||
|
||||
/*
|
||||
* Domain members share CPU cluster (LLC tags or L2 cache)
|
||||
*
|
||||
* NEEDS_GROUPS: Clusters are shared between groups.
|
||||
*/
|
||||
SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
|
||||
|
||||
/*
|
||||
* Domain members share CPU package resources (i.e. caches)
|
||||
*
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void)
|
|||
#ifdef CONFIG_SCHED_CLUSTER
|
||||
static inline int cpu_cluster_flags(void)
|
||||
{
|
||||
return SD_SHARE_PKG_RESOURCES;
|
||||
return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -179,6 +179,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
|
|||
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
|
||||
|
||||
bool cpus_share_cache(int this_cpu, int that_cpu);
|
||||
bool cpus_share_resources(int this_cpu, int that_cpu);
|
||||
|
||||
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
||||
typedef int (*sched_domain_flags_f)(void);
|
||||
|
|
@ -232,6 +233,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline bool cpus_share_resources(int this_cpu, int that_cpu)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
|
|
|
|||
|
|
@ -3939,6 +3939,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
|
|||
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Whether CPUs are share cache resources, which means LLC on non-cluster
|
||||
* machines and LLC tag or L2 on machines with clusters.
|
||||
*/
|
||||
bool cpus_share_resources(int this_cpu, int that_cpu)
|
||||
{
|
||||
if (this_cpu == that_cpu)
|
||||
return true;
|
||||
|
||||
return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
|
||||
}
|
||||
|
||||
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
|
||||
{
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -1853,6 +1853,7 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
|
|||
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
|
||||
DECLARE_PER_CPU(int, sd_llc_size);
|
||||
DECLARE_PER_CPU(int, sd_llc_id);
|
||||
DECLARE_PER_CPU(int, sd_share_id);
|
||||
DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
|
||||
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
|
||||
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
|
||||
|
|
|
|||
|
|
@ -668,6 +668,7 @@ static void destroy_sched_domains(struct sched_domain *sd)
|
|||
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
|
||||
DEFINE_PER_CPU(int, sd_llc_size);
|
||||
DEFINE_PER_CPU(int, sd_llc_id);
|
||||
DEFINE_PER_CPU(int, sd_share_id);
|
||||
DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
|
||||
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
|
||||
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
|
||||
|
|
@ -693,6 +694,17 @@ static void update_top_cache_domain(int cpu)
|
|||
per_cpu(sd_llc_id, cpu) = id;
|
||||
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
|
||||
|
||||
sd = lowest_flag_domain(cpu, SD_CLUSTER);
|
||||
if (sd)
|
||||
id = cpumask_first(sched_domain_span(sd));
|
||||
|
||||
/*
|
||||
* This assignment should be placed after the sd_llc_id as
|
||||
* we want this id equals to cluster id on cluster machines
|
||||
* but equals to LLC id on non-Cluster machines.
|
||||
*/
|
||||
per_cpu(sd_share_id, cpu) = id;
|
||||
|
||||
sd = lowest_flag_domain(cpu, SD_NUMA);
|
||||
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
|
||||
|
||||
|
|
@ -1550,6 +1562,7 @@ static struct cpumask ***sched_domains_numa_masks;
|
|||
*/
|
||||
#define TOPOLOGY_SD_FLAGS \
|
||||
(SD_SHARE_CPUCAPACITY | \
|
||||
SD_CLUSTER | \
|
||||
SD_SHARE_PKG_RESOURCES | \
|
||||
SD_NUMA | \
|
||||
SD_ASYM_PACKING)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue