mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:24:31 +01:00
fs/resctrl: Pass struct rdtgroup instead of individual members
Reading monitoring data for a monitoring group requires both the RMID and CLOSID. The RMID and CLOSID are members of struct rdtgroup but passed separately to several functions involved in retrieving event data. When "mbm_event" counter assignment mode is enabled, a counter ID is required to read event data. The counter ID is obtained through mbm_cntr_get(), which expects a struct rdtgroup pointer. Provide a pointer to the struct rdtgroup as parameter to functions involved in retrieving event data to simplify access to RMID, CLOSID, and counter ID. Suggested-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Babu Moger <babu.moger@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Link: https://lore.kernel.org/cover.1757108044.git.babu.moger@amd.com
This commit is contained in:
parent
aab2c5088c
commit
bc53eea6c2
1 changed files with 18 additions and 15 deletions
|
|
@ -413,9 +413,11 @@ static void mbm_cntr_free(struct rdt_mon_domain *d, int cntr_id)
|
|||
memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
|
||||
}
|
||||
|
||||
static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
||||
static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
u32 closid = rdtgrp->closid;
|
||||
u32 rmid = rdtgrp->mon.rmid;
|
||||
struct rdt_mon_domain *d;
|
||||
struct mbm_state *m;
|
||||
int err, ret;
|
||||
|
|
@ -475,8 +477,8 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
|||
/*
|
||||
* mbm_bw_count() - Update bw count from values previously read by
|
||||
* __mon_event_count().
|
||||
* @closid: The closid used to identify the cached mbm_state.
|
||||
* @rmid: The rmid used to identify the cached mbm_state.
|
||||
* @rdtgrp: resctrl group associated with the CLOSID and RMID to identify
|
||||
* the cached mbm_state.
|
||||
* @rr: The struct rmid_read populated by __mon_event_count().
|
||||
*
|
||||
* Supporting function to calculate the memory bandwidth
|
||||
|
|
@ -484,9 +486,11 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
|||
* __mon_event_count() is compared with the chunks value from the previous
|
||||
* invocation. This must be called once per second to maintain values in MBps.
|
||||
*/
|
||||
static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
||||
static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
|
||||
{
|
||||
u64 cur_bw, bytes, cur_bytes;
|
||||
u32 closid = rdtgrp->closid;
|
||||
u32 rmid = rdtgrp->mon.rmid;
|
||||
struct mbm_state *m;
|
||||
|
||||
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
|
||||
|
|
@ -515,7 +519,7 @@ void mon_event_count(void *info)
|
|||
|
||||
rdtgrp = rr->rgrp;
|
||||
|
||||
ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
|
||||
ret = __mon_event_count(rdtgrp, rr);
|
||||
|
||||
/*
|
||||
* For Ctrl groups read data from child monitor groups and
|
||||
|
|
@ -526,8 +530,7 @@ void mon_event_count(void *info)
|
|||
|
||||
if (rdtgrp->type == RDTCTRL_GROUP) {
|
||||
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
||||
if (__mon_event_count(entry->closid, entry->mon.rmid,
|
||||
rr) == 0)
|
||||
if (__mon_event_count(entry, rr) == 0)
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -658,7 +661,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
|
|||
}
|
||||
|
||||
static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
|
||||
u32 closid, u32 rmid, enum resctrl_event_id evtid)
|
||||
struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
|
||||
{
|
||||
struct rmid_read rr = {0};
|
||||
|
||||
|
|
@ -672,30 +675,30 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
|
|||
return;
|
||||
}
|
||||
|
||||
__mon_event_count(closid, rmid, &rr);
|
||||
__mon_event_count(rdtgrp, &rr);
|
||||
|
||||
/*
|
||||
* If the software controller is enabled, compute the
|
||||
* bandwidth for this event id.
|
||||
*/
|
||||
if (is_mba_sc(NULL))
|
||||
mbm_bw_count(closid, rmid, &rr);
|
||||
mbm_bw_count(rdtgrp, &rr);
|
||||
|
||||
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
|
||||
}
|
||||
|
||||
static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
|
||||
u32 closid, u32 rmid)
|
||||
struct rdtgroup *rdtgrp)
|
||||
{
|
||||
/*
|
||||
* This is protected from concurrent reads from user as both
|
||||
* the user and overflow handler hold the global mutex.
|
||||
*/
|
||||
if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
|
||||
mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
|
||||
mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
|
||||
|
||||
if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
|
||||
mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
|
||||
mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -768,11 +771,11 @@ void mbm_handle_overflow(struct work_struct *work)
|
|||
d = container_of(work, struct rdt_mon_domain, mbm_over.work);
|
||||
|
||||
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
|
||||
mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
|
||||
mbm_update(r, d, prgrp);
|
||||
|
||||
head = &prgrp->mon.crdtgrp_list;
|
||||
list_for_each_entry(crgrp, head, mon.crdtgrp_list)
|
||||
mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
|
||||
mbm_update(r, d, crgrp);
|
||||
|
||||
if (is_mba_sc(NULL))
|
||||
update_mba_bw(prgrp, d);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue