mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
KVM: arm64: Eagerly save VMCR on exit
We currently save/restore the VMCR register in a pretty lazy way (on load/put, consistently with what we do with the APRs). However, we are going to need the group-enable bits that are backed by VMCR on each entry (so that we can avoid injecting interrupts for disabled groups). Move the synchronisation from put to sync, which results in some minor churn in the nVHE hypercalls to simplify things. Tested-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Tested-by: Mark Brown <broonie@kernel.org> Link: https://msgid.link/20251120172540.2267180-21-maz@kernel.org Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
parent
dd598fc113
commit
cf72ee6371
8 changed files with 13 additions and 22 deletions
|
|
@ -79,7 +79,7 @@ enum __kvm_host_smccc_func {
|
|||
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
|
|||
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
|||
|
|
@ -659,8 +659,7 @@ nommu:
|
|||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
|
||||
&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -157,6 +157,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|||
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
|
||||
|
||||
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
|
||||
host_cpu_if->vgic_vmcr = hyp_cpu_if->vgic_vmcr;
|
||||
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
|
||||
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
|
||||
}
|
||||
|
|
@ -464,11 +465,11 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
|
|||
__vgic_v3_init_lrs();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
|
||||
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
|
|
@ -616,7 +617,7 @@ static const hcall_t host_hcall[] = {
|
|||
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_save_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
|
||||
HANDLE_FUNC(__pkvm_reserve_vm),
|
||||
HANDLE_FUNC(__pkvm_unreserve_vm),
|
||||
|
|
|
|||
|
|
@ -235,6 +235,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
|
|||
}
|
||||
}
|
||||
|
||||
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
||||
|
||||
if (cpu_if->vgic_hcr & ICH_HCR_EL2_LRENPIE) {
|
||||
u64 val = read_gicreg(ICH_HCR_EL2);
|
||||
cpu_if->vgic_hcr &= ~ICH_HCR_EL2_EOIcount;
|
||||
|
|
@ -332,10 +334,6 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
|
|||
{
|
||||
u64 val;
|
||||
|
||||
if (!cpu_if->vgic_sre) {
|
||||
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
||||
}
|
||||
|
||||
/* Only restore SRE if the host implements the GICv2 interface */
|
||||
if (static_branch_unlikely(&vgic_v3_has_v2_compat)) {
|
||||
val = read_gicreg(ICC_SRE_EL2);
|
||||
|
|
@ -357,7 +355,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
|
|||
write_gicreg(0, ICH_HCR_EL2);
|
||||
}
|
||||
|
||||
static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
u64 val;
|
||||
u32 nr_pre_bits;
|
||||
|
|
@ -518,13 +516,6 @@ static void __vgic_v3_write_vmcr(u32 vmcr)
|
|||
write_gicreg(vmcr, ICH_VMCR_EL2);
|
||||
}
|
||||
|
||||
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
__vgic_v3_save_aprs(cpu_if);
|
||||
if (cpu_if->vgic_sre)
|
||||
cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
|
||||
}
|
||||
|
||||
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
__vgic_v3_compat_mode_enable();
|
||||
|
|
|
|||
|
|
@ -451,6 +451,7 @@ void vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
|||
if (!base)
|
||||
return;
|
||||
|
||||
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
|
||||
|
||||
if (used_lrs)
|
||||
save_lrs(vcpu, base);
|
||||
|
|
@ -495,6 +496,5 @@ void vgic_v2_put(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
|
||||
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
|
||||
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -340,7 +340,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
|
|||
u64 val;
|
||||
int i;
|
||||
|
||||
__vgic_v3_save_vmcr_aprs(s_cpu_if);
|
||||
__vgic_v3_save_aprs(s_cpu_if);
|
||||
__vgic_v3_deactivate_traps(s_cpu_if);
|
||||
__vgic_v3_save_state(s_cpu_if);
|
||||
|
||||
|
|
|
|||
|
|
@ -815,7 +815,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if (likely(!is_protected_kvm_enabled()))
|
||||
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
|
||||
WARN_ON(vgic_v4_put(vcpu));
|
||||
|
||||
if (has_vhe())
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue