mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
Misc perf fixes:
- Require group events for branch counter groups and
PEBS counter snapshotting groups to be x86 events.
- Fix the handling of counter-snapshotting of non-precise
events, where counter values may move backwards a bit,
temporarily, confusing the code.
- Restrict perf/KVM PEBS to guest-owned events.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-----BEGIN PGP SIGNATURE-----
iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmgXFBERHG1pbmdvQGtl
cm5lbC5vcmcACgkQEnMQ0APhK1h6/Q/9Ew1anraqM4kV21I9P3SsXX2HqMePd1WZ
o2n3CwJMtS38FDd4ouHUf5ByIaDLGfb5klMgdxHoTEwoZCXyAq1w04iHQFMn0b3m
34FX7TBYqmg+hAhkXV2VSJzrgeSCWxxJskjarxHXv6Ahlgdkc+Xpqb2pzLKiS1Mp
JUf/yQKIlp1U89vJWPpCtVGAaKdc3e+R8gl39xHIvwYlfUz60c6vUTDtKquTdADg
FWtjxPJGazOlNUD7zygR2vZ9Uy50mesTw6ArKUW7LvKpVmjVICBbT0CHu9PekFLc
mUs0qIYDYk3Qd5/eaNb5UCfQEjWY3Cni+OXnn4dL4Q/ftYzVEn0EMbR8GMh2ZdD0
rs7gPm/OgGjS4Fw+T2uw45iMxTryQxHmbDYj4zEtDKzRlcyMGLwzo191xwM+bjD6
Rp0anF53srh4QLdDQLR5JvMdP+EuFBycMwhok3GkRCc2BClyn/weHzzJ6YEE/lyj
0CJg4wCjYPULFR0jUEFtWDZdrHoC2KmsnzkuBAEvg6hNInbLNcLJx+9KBb9yib01
Ruz3auLw05TbPrmeA9QHHba+NUcy/OyRLD5gxfI21GRw/LRf1mP8Sg9Ub+WZuFVf
0u/+7SaQ3l5z2wqT0IyN8g4tJ6OseHM16/hbHPKf60b2z/GrhxCZrUh6AcdgkgIi
EzJybNXxmag=
=F7wJ
-----END PGP SIGNATURE-----
Merge tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc perf fixes from Ingo Molnar:
- Require group events for branch counter groups and
PEBS counter snapshotting groups to be x86 events.
- Fix the handling of counter-snapshotting of non-precise
events, where counter values may move backwards a bit,
temporarily, confusing the code.
- Restrict perf/KVM PEBS to guest-owned events.
* tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.
perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting
perf/x86/intel: Check the X86 leader for pebs_counter_event_group
perf/x86/intel: Only check the group flag for X86 leader
This commit is contained in:
commit
3f3041b9e4
4 changed files with 30 additions and 6 deletions
|
|
@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int is_x86_event(struct perf_event *event)
|
||||
int is_x86_event(struct perf_event *event)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
|||
|
|
@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
|
|||
arr[pebs_enable] = (struct perf_guest_switch_msr){
|
||||
.msr = MSR_IA32_PEBS_ENABLE,
|
||||
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
|
||||
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
|
||||
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
|
||||
};
|
||||
|
||||
if (arr[pebs_enable].host) {
|
||||
|
|
|
|||
|
|
@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
|
|||
*/
|
||||
intel_pmu_save_and_restart_reload(event, count);
|
||||
}
|
||||
} else
|
||||
intel_pmu_save_and_restart(event);
|
||||
} else {
|
||||
/*
|
||||
* For a non-precise event, it's possible the
|
||||
* counters-snapshotting records a positive value for the
|
||||
* overflowed event. Then the HW auto-reload mechanism
|
||||
* reset the counter to 0 immediately, because the
|
||||
* pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
|
||||
* is not set. The counter backwards may be observed in a
|
||||
* PMI handler.
|
||||
*
|
||||
* Since the event value has been updated when processing the
|
||||
* counters-snapshotting record, only needs to set the new
|
||||
* period for the counter.
|
||||
*/
|
||||
if (is_pebs_counter_event_group(event))
|
||||
static_call(x86_pmu_set_period)(event);
|
||||
else
|
||||
intel_pmu_save_and_restart(event);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
|
|
|
|||
|
|
@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
|
|||
return is_metric_event(event) || is_slots_event(event);
|
||||
}
|
||||
|
||||
int is_x86_event(struct perf_event *event);
|
||||
|
||||
static inline bool check_leader_group(struct perf_event *leader, int flags)
|
||||
{
|
||||
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
|
||||
}
|
||||
|
||||
static inline bool is_branch_counters_group(struct perf_event *event)
|
||||
{
|
||||
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
|
||||
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
|
||||
}
|
||||
|
||||
static inline bool is_pebs_counter_event_group(struct perf_event *event)
|
||||
{
|
||||
return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
|
||||
return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
|
||||
}
|
||||
|
||||
struct amd_nb {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue