powerpc/kvm-hv-pmu: Add perf-events for Hostwide counters

Update 'kvm-hv-pmu.c' to add five new perf-events mapped to the five
Hostwide counters. Since these newly introduced perf events are at system
wide scope and can be read from any L1-Lpar CPU, 'kvmppc_pmu' scope and
capabilities are updated appropriately.

Also introduce two new helpers. First is kvmppc_update_l0_stats() that uses
the infrastructure introduced in previous patches to issues the
H_GUEST_GET_STATE hcall L0-PowerVM to fetch guest-state-buffer holding the
latest values of these counters which is then parsed and 'l0_stats'
variable updated.

Second helper is kvmppc_pmu_event_update() which is called from
'kvmppv_pmu' callbacks and uses kvmppc_update_l0_stats() to update
'l0_stats' and the update the 'struct perf_event's event-counter.

Some minor updates to kvmppc_pmu_{add, del, read}() to remove some debug
scaffolding code.

Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
Reviewed-by: Athira Rajeev <atrajeev@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250416162740.93143-7-vaibhav@linux.ibm.com
This commit is contained in:
Vaibhav Jain 2025-04-16 21:57:36 +05:30 committed by Madhavan Srinivasan
parent f0c9c49c50
commit 02a1324b08

View file

@ -30,6 +30,11 @@
#include "asm/guest-state-buffer.h"
enum kvmppc_pmu_eventid {
KVMPPC_EVENT_HOST_HEAP,
KVMPPC_EVENT_HOST_HEAP_MAX,
KVMPPC_EVENT_HOST_PGTABLE,
KVMPPC_EVENT_HOST_PGTABLE_MAX,
KVMPPC_EVENT_HOST_PGTABLE_RECLAIM,
KVMPPC_EVENT_MAX,
};
@ -61,8 +66,14 @@ static DEFINE_SPINLOCK(lock_l0_stats);
/* GSB related structs needed to talk to L0 */
static struct kvmppc_gs_msg *gsm_l0_stats;
static struct kvmppc_gs_buff *gsb_l0_stats;
static struct kvmppc_gs_parser gsp_l0_stats;
static struct attribute *kvmppc_pmu_events_attr[] = {
KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP),
KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX),
KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE),
KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX),
KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM),
NULL,
};
@ -71,7 +82,7 @@ static const struct attribute_group kvmppc_pmu_events_group = {
.attrs = kvmppc_pmu_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0");
PMU_FORMAT_ATTR(event, "config:0-5");
static struct attribute *kvmppc_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
@ -88,6 +99,79 @@ static const struct attribute_group *kvmppc_pmu_attr_groups[] = {
NULL,
};
/*
* Issue the hcall to get the L0-host stats.
* Should be called with l0-stat lock held
*/
static int kvmppc_update_l0_stats(void)
{
int rc;
/* With HOST_WIDE flags guestid and vcpuid will be ignored */
rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE);
if (rc)
goto out;
/* Parse the guest state buffer is successful */
rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats);
if (rc)
goto out;
/* Update the l0 returned stats*/
memset(&l0_stats, 0, sizeof(l0_stats));
rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats);
out:
return rc;
}
/* Update the value of the given perf_event */
static int kvmppc_pmu_event_update(struct perf_event *event)
{
int rc;
u64 curr_val, prev_val;
unsigned long flags;
unsigned int config = event->attr.config;
/* Ensure no one else is modifying the l0_stats */
spin_lock_irqsave(&lock_l0_stats, flags);
rc = kvmppc_update_l0_stats();
if (!rc) {
switch (config) {
case KVMPPC_EVENT_HOST_HEAP:
curr_val = l0_stats.guest_heap;
break;
case KVMPPC_EVENT_HOST_HEAP_MAX:
curr_val = l0_stats.guest_heap_max;
break;
case KVMPPC_EVENT_HOST_PGTABLE:
curr_val = l0_stats.guest_pgtable_size;
break;
case KVMPPC_EVENT_HOST_PGTABLE_MAX:
curr_val = l0_stats.guest_pgtable_size_max;
break;
case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM:
curr_val = l0_stats.guest_pgtable_reclaim;
break;
default:
rc = -ENOENT;
break;
}
}
spin_unlock_irqrestore(&lock_l0_stats, flags);
/* If no error than update the perf event */
if (!rc) {
prev_val = local64_xchg(&event->hw.prev_count, curr_val);
if (curr_val > prev_val)
local64_add(curr_val - prev_val, &event->count);
}
return rc;
}
static int kvmppc_pmu_event_init(struct perf_event *event)
{
unsigned int config = event->attr.config;
@ -110,15 +194,19 @@ static int kvmppc_pmu_event_init(struct perf_event *event)
static void kvmppc_pmu_del(struct perf_event *event, int flags)
{
kvmppc_pmu_event_update(event);
}
static int kvmppc_pmu_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
return kvmppc_pmu_event_update(event);
return 0;
}
static void kvmppc_pmu_read(struct perf_event *event)
{
kvmppc_pmu_event_update(event);
}
/* Return the size of the needed guest state buffer */
@ -302,6 +390,8 @@ static struct pmu kvmppc_pmu = {
.read = kvmppc_pmu_read,
.attr_groups = kvmppc_pmu_attr_groups,
.type = -1,
.scope = PERF_PMU_SCOPE_SYS_WIDE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
};
static int __init kvmppc_register_pmu(void)