mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:24:31 +01:00
fs/resctrl: Support counter read/reset with mbm_event assignment mode
When "mbm_event" counter assignment mode is enabled, the architecture requires a counter ID to read the event data. Introduce an is_mbm_cntr field in struct rmid_read to indicate whether counter assignment mode is in use. Update the logic to call resctrl_arch_cntr_read() and resctrl_arch_reset_cntr() when the assignment mode is active. Report 'Unassigned' in case the user attempts to read an event without assigning a hardware counter. Suggested-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Babu Moger <babu.moger@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Link: https://lore.kernel.org/cover.1757108044.git.babu.moger@amd.com
This commit is contained in:
parent
2a65b72c16
commit
159f36cd4d
4 changed files with 62 additions and 16 deletions
|
|
@ -434,6 +434,12 @@ When monitoring is enabled all MON groups will also contain:
|
|||
for the L3 cache they occupy). These are named "mon_sub_L3_YY"
|
||||
where "YY" is the node number.
|
||||
|
||||
When the 'mbm_event' counter assignment mode is enabled, reading
|
||||
an MBM event of a MON group returns 'Unassigned' if no hardware
|
||||
counter is assigned to it. For CTRL_MON groups, 'Unassigned' is
|
||||
returned if the MBM event does not have an assigned counter in the
|
||||
CTRL_MON group nor in any of its associated MON groups.
|
||||
|
||||
"mon_hw_id":
|
||||
Available only with debug option. The identifier used by hardware
|
||||
for the monitor group. On x86 this is the RMID.
|
||||
|
|
|
|||
|
|
@ -563,10 +563,15 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
|||
rr->r = r;
|
||||
rr->d = d;
|
||||
rr->first = first;
|
||||
rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
|
||||
if (IS_ERR(rr->arch_mon_ctx)) {
|
||||
rr->err = -EINVAL;
|
||||
return;
|
||||
if (resctrl_arch_mbm_cntr_assign_enabled(r) &&
|
||||
resctrl_is_mbm_event(evtid)) {
|
||||
rr->is_mbm_cntr = true;
|
||||
} else {
|
||||
rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
|
||||
if (IS_ERR(rr->arch_mon_ctx)) {
|
||||
rr->err = -EINVAL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU);
|
||||
|
|
@ -582,7 +587,8 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
|||
else
|
||||
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
|
||||
|
||||
resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
|
||||
if (rr->arch_mon_ctx)
|
||||
resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
|
||||
}
|
||||
|
||||
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
|
|
@ -653,10 +659,16 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
|||
|
||||
checkresult:
|
||||
|
||||
/*
|
||||
* -ENOENT is a special case, set only when "mbm_event" counter assignment
|
||||
* mode is enabled and no counter has been assigned.
|
||||
*/
|
||||
if (rr.err == -EIO)
|
||||
seq_puts(m, "Error\n");
|
||||
else if (rr.err == -EINVAL)
|
||||
seq_puts(m, "Unavailable\n");
|
||||
else if (rr.err == -ENOENT)
|
||||
seq_puts(m, "Unassigned\n");
|
||||
else
|
||||
seq_printf(m, "%llu\n", rr.val);
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,8 @@ struct mon_data {
|
|||
* @evtid: Which monitor event to read.
|
||||
* @first: Initialize MBM counter when true.
|
||||
* @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
|
||||
* @is_mbm_cntr: true if "mbm_event" counter assignment mode is enabled and it
|
||||
* is an MBM event.
|
||||
* @err: Error encountered when reading counter.
|
||||
* @val: Returned value of event counter. If @rgrp is a parent resource group,
|
||||
* @val includes the sum of event counts from its child resource groups.
|
||||
|
|
@ -125,6 +127,7 @@ struct rmid_read {
|
|||
enum resctrl_event_id evtid;
|
||||
bool first;
|
||||
struct cacheinfo *ci;
|
||||
bool is_mbm_cntr;
|
||||
int err;
|
||||
u64 val;
|
||||
void *arch_mon_ctx;
|
||||
|
|
|
|||
|
|
@ -419,12 +419,24 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
|
|||
u32 closid = rdtgrp->closid;
|
||||
u32 rmid = rdtgrp->mon.rmid;
|
||||
struct rdt_mon_domain *d;
|
||||
int cntr_id = -ENOENT;
|
||||
struct mbm_state *m;
|
||||
int err, ret;
|
||||
u64 tval = 0;
|
||||
|
||||
if (rr->is_mbm_cntr) {
|
||||
cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
|
||||
if (cntr_id < 0) {
|
||||
rr->err = -ENOENT;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (rr->first) {
|
||||
resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
|
||||
if (rr->is_mbm_cntr)
|
||||
resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
|
||||
else
|
||||
resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
|
||||
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
|
||||
if (m)
|
||||
memset(m, 0, sizeof(struct mbm_state));
|
||||
|
|
@ -435,8 +447,12 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
|
|||
/* Reading a single domain, must be on a CPU in that domain. */
|
||||
if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
|
||||
return -EINVAL;
|
||||
rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
|
||||
rr->evtid, &tval, rr->arch_mon_ctx);
|
||||
if (rr->is_mbm_cntr)
|
||||
rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
|
||||
rr->evtid, &tval);
|
||||
else
|
||||
rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
|
||||
rr->evtid, &tval, rr->arch_mon_ctx);
|
||||
if (rr->err)
|
||||
return rr->err;
|
||||
|
||||
|
|
@ -460,8 +476,12 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
|
|||
list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
|
||||
if (d->ci_id != rr->ci->id)
|
||||
continue;
|
||||
err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
|
||||
rr->evtid, &tval, rr->arch_mon_ctx);
|
||||
if (rr->is_mbm_cntr)
|
||||
err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
|
||||
rr->evtid, &tval);
|
||||
else
|
||||
err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
|
||||
rr->evtid, &tval, rr->arch_mon_ctx);
|
||||
if (!err) {
|
||||
rr->val += tval;
|
||||
ret = 0;
|
||||
|
|
@ -668,11 +688,15 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
|
|||
rr.r = r;
|
||||
rr.d = d;
|
||||
rr.evtid = evtid;
|
||||
rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
|
||||
if (IS_ERR(rr.arch_mon_ctx)) {
|
||||
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
|
||||
PTR_ERR(rr.arch_mon_ctx));
|
||||
return;
|
||||
if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
|
||||
rr.is_mbm_cntr = true;
|
||||
} else {
|
||||
rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
|
||||
if (IS_ERR(rr.arch_mon_ctx)) {
|
||||
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
|
||||
PTR_ERR(rr.arch_mon_ctx));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
__mon_event_count(rdtgrp, &rr);
|
||||
|
|
@ -684,7 +708,8 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
|
|||
if (is_mba_sc(NULL))
|
||||
mbm_bw_count(rdtgrp, &rr);
|
||||
|
||||
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
|
||||
if (rr.arch_mon_ctx)
|
||||
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
|
||||
}
|
||||
|
||||
static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue