mirror of
https://github.com/torvalds/linux.git
synced 2026-03-07 23:04:33 +01:00
Arm:
* Make sure we don't leak any S1POE state from guest to guest when
the feature is supported on the HW, but not enabled on the host
* Propagate the ID registers from the host into non-protected VMs
managed by pKVM, ensuring that the guest sees the intended feature set
* Drop double kern_hyp_va() from unpin_host_sve_state(), which could
bite us if we were to change kern_hyp_va() to not being idempotent
* Don't leak stage-2 mappings in protected mode
* Correctly align the faulting address when dealing with single page
stage-2 mappings for PAGE_SIZE > 4kB
* Fix detection of virtualisation-capable GICv5 IRS, due to the
maintainer being obviously fat fingered... [his words, not mine]
* Remove duplication of code retrieving the ASID for the purpose of
S1 PT handling
* Fix slightly abusive const-ification in vgic_set_kvm_info()
Generic:
* Remove internal Kconfigs that are now set on all architectures.
* Remove per-architecture code to enable KVM_CAP_SYNC_MMU, all
architectures finally enable it in Linux 7.0.
-----BEGIN PGP SIGNATURE-----
iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmmkSMUUHHBib256aW5p
QHJlZGhhdC5jb20ACgkQv/vSX3jHroO2tggAgOHvH9lwgHxFADAOKXPEsv+Tw6cB
zuUBPo6e5XQQx6BJQVo8ctveCBbA0ybRJXbju6lgS/Bg9qQYKjZbNrYdKwBwhw3b
JBUFTIJgl7lLU5gEANNtvgiiNUc2z1GfG8NQ5ovMJ0/MDEEI0YZjkGoe8ZyJ54vg
qrNHBROKvt54wVHXFGhZ1z8u4RqJ/WCmy21wYbwiMgQGXq9ugRRfhnu3iGNO8BcK
bSSh4cP7qusO5Nj0m/XYD/68VwvyogaEkh4sNS1VH0FOoONRWs80Q6iT2HjCGgv6
mAzCx/yB9ziSQRis3oYYEBH23HZxRVDVBeSBlPcbxE3VM7SGzp7O8L692g==
=+mC+
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini:
"Arm:
- Make sure we don't leak any S1POE state from guest to guest when
the feature is supported on the HW, but not enabled on the host
- Propagate the ID registers from the host into non-protected VMs
managed by pKVM, ensuring that the guest sees the intended feature
set
- Drop double kern_hyp_va() from unpin_host_sve_state(), which could
bite us if we were to change kern_hyp_va() to not being idempotent
- Don't leak stage-2 mappings in protected mode
- Correctly align the faulting address when dealing with single page
stage-2 mappings for PAGE_SIZE > 4kB
- Fix detection of virtualisation-capable GICv5 IRS, due to the
maintainer being obviously fat fingered... [his words, not mine]
- Remove duplication of code retrieving the ASID for the purpose of
S1 PT handling
- Fix slightly abusive const-ification in vgic_set_kvm_info()
Generic:
- Remove internal Kconfigs that are now set on all architectures
- Remove per-architecture code to enable KVM_CAP_SYNC_MMU, all
architectures finally enable it in Linux 7.0"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: always define KVM_CAP_SYNC_MMU
KVM: remove CONFIG_KVM_GENERIC_MMU_NOTIFIER
KVM: arm64: Deduplicate ASID retrieval code
irqchip/gic-v5: Fix inversion of IRS_IDR0.virt flag
KVM: arm64: Revert accidental drop of kvm_uninit_stage2_mmu() for non-NV VMs
KVM: arm64: Fix protected mode handling of pages larger than 4kB
KVM: arm64: vgic: Handle const qualifier from gic_kvm_info allocation type
KVM: arm64: Remove redundant kern_hyp_va() in unpin_host_sve_state()
KVM: arm64: Fix ID register initialization for non-protected pKVM guests
KVM: arm64: Optimise away S1POE handling when not supported by host
KVM: arm64: Hide S1POE from guests when not supported by the host
This commit is contained in:
commit
949d0a46ad
26 changed files with 87 additions and 128 deletions
|
|
@ -1396,7 +1396,10 @@ or its flags may be modified, but it may not be resized.
|
|||
Memory for the region is taken starting at the address denoted by the
|
||||
field userspace_addr, which must point at user addressable memory for
|
||||
the entire memory slot size. Any object may back this memory, including
|
||||
anonymous memory, ordinary files, and hugetlbfs.
|
||||
anonymous memory, ordinary files, and hugetlbfs. Changes in the backing
|
||||
of the memory region are automatically reflected into the guest.
|
||||
For example, an mmap() that affects the region will be made visible
|
||||
immediately. Another example is madvise(MADV_DROP).
|
||||
|
||||
On architectures that support a form of address tagging, userspace_addr must
|
||||
be an untagged address.
|
||||
|
|
@ -1412,11 +1415,6 @@ use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it,
|
|||
to make a new slot read-only. In this case, writes to this memory will be
|
||||
posted to userspace as KVM_EXIT_MMIO exits.
|
||||
|
||||
When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of
|
||||
the memory region are automatically reflected into the guest. For example, an
|
||||
mmap() that affects the region will be made visible immediately. Another
|
||||
example is madvise(MADV_DROP).
|
||||
|
||||
For TDX guest, deleting/moving memory region loses guest memory contents.
|
||||
Read only region isn't supported. Only as-id 0 is supported.
|
||||
|
||||
|
|
|
|||
|
|
@ -1616,7 +1616,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
|||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
|
||||
#define kvm_has_s1poe(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
(system_supports_poe() && \
|
||||
kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
|
||||
#define kvm_has_ras(k) \
|
||||
(kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
|
||||
|
|
|
|||
|
|
@ -397,6 +397,8 @@ int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
|
|||
int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
|
||||
void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
|
||||
|
||||
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime);
|
||||
|
||||
#define vncr_fixmap(c) \
|
||||
({ \
|
||||
u32 __c = (c); \
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ menuconfig KVM
|
|||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
select KVM_COMMON
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select KVM_MMIO
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
|
|
|
|||
|
|
@ -358,7 +358,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
break;
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
|
||||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_ARM_PSCI:
|
||||
|
|
|
|||
|
|
@ -540,31 +540,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
|||
wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
|
||||
|
||||
wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG);
|
||||
if (wr->nG) {
|
||||
u64 asid_ttbr, tcr;
|
||||
|
||||
switch (wi->regime) {
|
||||
case TR_EL10:
|
||||
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
|
||||
asid_ttbr = ((tcr & TCR_A1) ?
|
||||
vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
|
||||
vcpu_read_sys_reg(vcpu, TTBR0_EL1));
|
||||
break;
|
||||
case TR_EL20:
|
||||
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
asid_ttbr = ((tcr & TCR_A1) ?
|
||||
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
|
||||
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
|
||||
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
|
||||
!(tcr & TCR_ASID16))
|
||||
wr->asid &= GENMASK(7, 0);
|
||||
}
|
||||
if (wr->nG)
|
||||
wr->asid = get_asid_by_regime(vcpu, wi->regime);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -342,6 +342,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
|
|||
/* No restrictions for non-protected VMs. */
|
||||
if (!kvm_vm_is_protected(kvm)) {
|
||||
hyp_vm->kvm.arch.flags = host_arch_flags;
|
||||
hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
|
||||
|
||||
bitmap_copy(kvm->arch.vcpu_features,
|
||||
host_kvm->arch.vcpu_features,
|
||||
|
|
@ -391,7 +392,7 @@ static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|||
if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
|
||||
return;
|
||||
|
||||
sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
|
||||
sve_state = hyp_vcpu->vcpu.arch.sve_state;
|
||||
hyp_unpin_shared_mem(sve_state,
|
||||
sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
|
||||
}
|
||||
|
|
@ -471,6 +472,35 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
|
||||
const struct kvm *host_kvm = hyp_vm->host_kvm;
|
||||
struct kvm *kvm = &hyp_vm->kvm;
|
||||
|
||||
if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
|
||||
return 0;
|
||||
|
||||
memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
|
||||
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
|
||||
else
|
||||
ret = vm_copy_id_regs(hyp_vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
struct pkvm_hyp_vm *hyp_vm,
|
||||
struct kvm_vcpu *host_vcpu)
|
||||
|
|
@ -490,8 +520,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
|||
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
|
||||
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
|
||||
|
||||
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
|
||||
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
|
||||
ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
ret = pkvm_vcpu_init_traps(hyp_vcpu);
|
||||
if (ret)
|
||||
|
|
|
|||
|
|
@ -1754,14 +1754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
}
|
||||
|
||||
/*
|
||||
* Both the canonical IPA and fault IPA must be hugepage-aligned to
|
||||
* ensure we find the right PFN and lay down the mapping in the right
|
||||
* place.
|
||||
* Both the canonical IPA and fault IPA must be aligned to the
|
||||
* mapping size to ensure we find the right PFN and lay down the
|
||||
* mapping in the right place.
|
||||
*/
|
||||
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
|
||||
fault_ipa &= ~(vma_pagesize - 1);
|
||||
ipa &= ~(vma_pagesize - 1);
|
||||
}
|
||||
fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
|
||||
ipa = ALIGN_DOWN(ipa, vma_pagesize);
|
||||
|
||||
gfn = ipa >> PAGE_SHIFT;
|
||||
mte_allowed = kvm_vma_mte_allowed(vma);
|
||||
|
|
|
|||
|
|
@ -854,6 +854,33 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
|
|||
return kvm_inject_nested_sync(vcpu, esr_el2);
|
||||
}
|
||||
|
||||
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime)
|
||||
{
|
||||
enum vcpu_sysreg ttbr_elx;
|
||||
u64 tcr;
|
||||
u16 asid;
|
||||
|
||||
switch (regime) {
|
||||
case TR_EL10:
|
||||
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
|
||||
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1;
|
||||
break;
|
||||
case TR_EL20:
|
||||
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx));
|
||||
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
|
||||
!(tcr & TCR_ASID16))
|
||||
asid &= GENMASK(7, 0);
|
||||
|
||||
return asid;
|
||||
}
|
||||
|
||||
static void invalidate_vncr(struct vncr_tlb *vt)
|
||||
{
|
||||
vt->valid = false;
|
||||
|
|
@ -1154,9 +1181,6 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!kvm->arch.nested_mmus_size)
|
||||
return;
|
||||
|
||||
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
|
||||
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
|
||||
|
||||
|
|
@ -1336,20 +1360,8 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
|
|||
if (read_vncr_el2(vcpu) != vt->gva)
|
||||
return false;
|
||||
|
||||
if (vt->wr.nG) {
|
||||
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
u64 ttbr = ((tcr & TCR_A1) ?
|
||||
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
|
||||
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
|
||||
u16 asid;
|
||||
|
||||
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
|
||||
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
|
||||
!(tcr & TCR_ASID16))
|
||||
asid &= GENMASK(7, 0);
|
||||
|
||||
return asid == vt->wr.asid;
|
||||
}
|
||||
if (vt->wr.nG)
|
||||
return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1452,21 +1464,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
|
|||
if (read_vncr_el2(vcpu) != vt->gva)
|
||||
return;
|
||||
|
||||
if (vt->wr.nG) {
|
||||
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
u64 ttbr = ((tcr & TCR_A1) ?
|
||||
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
|
||||
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
|
||||
u16 asid;
|
||||
|
||||
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
|
||||
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
|
||||
!(tcr & TCR_ASID16))
|
||||
asid &= GENMASK(7, 0);
|
||||
|
||||
if (asid != vt->wr.asid)
|
||||
return;
|
||||
}
|
||||
if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid)
|
||||
return;
|
||||
|
||||
vt->cpu = smp_processor_id();
|
||||
|
||||
|
|
|
|||
|
|
@ -1816,6 +1816,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||
ID_AA64MMFR3_EL1_SCTLRX |
|
||||
ID_AA64MMFR3_EL1_S1POE |
|
||||
ID_AA64MMFR3_EL1_S1PIE;
|
||||
|
||||
if (!system_supports_poe())
|
||||
val &= ~ID_AA64MMFR3_EL1_S1POE;
|
||||
break;
|
||||
case SYS_ID_MMFR4_EL1:
|
||||
val &= ~ID_MMFR4_EL1_CCIDX;
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ config KVM
|
|||
select KVM_COMMON
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_MMIO
|
||||
select VIRT_XFER_TO_GUEST_WORK
|
||||
select SCHED_INFO
|
||||
|
|
|
|||
|
|
@ -118,7 +118,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_ENABLE_CAP:
|
||||
case KVM_CAP_READONLY_MEM:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_IMMEDIATE_EXIT:
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_MP_STATE:
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ config KVM
|
|||
select KVM_COMMON
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select KVM_MMIO
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
help
|
||||
|
|
|
|||
|
|
@ -1035,7 +1035,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_ENABLE_CAP:
|
||||
case KVM_CAP_READONLY_MEM:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_IMMEDIATE_EXIT:
|
||||
r = 1;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ config KVM_BOOK3S_64_HANDLER
|
|||
config KVM_BOOK3S_PR_POSSIBLE
|
||||
bool
|
||||
select KVM_MMIO
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
|
||||
config KVM_BOOK3S_HV_POSSIBLE
|
||||
bool
|
||||
|
|
@ -81,7 +80,6 @@ config KVM_BOOK3S_64_HV
|
|||
tristate "KVM for POWER7 and later using hypervisor mode in host"
|
||||
depends on KVM_BOOK3S_64 && PPC_POWERNV
|
||||
select KVM_BOOK3S_HV_POSSIBLE
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_BOOK3S_HV_PMU
|
||||
select CMA
|
||||
help
|
||||
|
|
@ -203,7 +201,6 @@ config KVM_E500V2
|
|||
depends on !CONTEXT_TRACKING_USER
|
||||
select KVM
|
||||
select KVM_MMIO
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
help
|
||||
Support running unmodified E500 guest kernels in virtual machines on
|
||||
E500v2 host processors.
|
||||
|
|
@ -220,7 +217,6 @@ config KVM_E500MC
|
|||
select KVM
|
||||
select KVM_MMIO
|
||||
select KVM_BOOKE_HV
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
help
|
||||
Support running unmodified E500MC/E5500/E6500 guest kernels in
|
||||
virtual machines on E500MC/E5500/E6500 host processors.
|
||||
|
|
|
|||
|
|
@ -623,12 +623,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
|
||||
!kvmppc_hv_ops->enable_nested(NULL));
|
||||
break;
|
||||
#endif
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER));
|
||||
r = 1;
|
||||
break;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
case KVM_CAP_PPC_HTAB_FD:
|
||||
r = hv_enabled;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ config KVM
|
|||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_MMIO
|
||||
select VIRT_XFER_TO_GUEST_WORK
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select SCHED_INFO
|
||||
select GUEST_PERF_EVENTS if PERF_EVENTS
|
||||
help
|
||||
|
|
|
|||
|
|
@ -181,7 +181,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
break;
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
|
||||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_READONLY_MEM:
|
||||
|
|
|
|||
|
|
@ -28,9 +28,7 @@ config KVM
|
|||
select HAVE_KVM_INVALID_WAKEUPS
|
||||
select HAVE_KVM_NO_POLL
|
||||
select KVM_VFIO
|
||||
select MMU_NOTIFIER
|
||||
select VIRT_XFER_TO_GUEST_WORK
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_MMU_LOCKLESS_AGING
|
||||
help
|
||||
Support hosting paravirtualized guest machines using the SIE
|
||||
|
|
|
|||
|
|
@ -601,7 +601,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
switch (ext) {
|
||||
case KVM_CAP_S390_PSW:
|
||||
case KVM_CAP_S390_GMAP:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
#ifdef CONFIG_KVM_S390_UCONTROL
|
||||
case KVM_CAP_S390_UCONTROL:
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ if VIRTUALIZATION
|
|||
config KVM_X86
|
||||
def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n)
|
||||
select KVM_COMMON
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_ELIDE_TLB_FLUSH_IF_YOUNG
|
||||
select KVM_MMU_LOCKLESS_AGING
|
||||
select HAVE_KVM_IRQCHIP
|
||||
|
|
|
|||
|
|
@ -4805,7 +4805,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
#endif
|
||||
case KVM_CAP_NOP_IO_DELAY:
|
||||
case KVM_CAP_MP_STATE:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_USER_NMI:
|
||||
case KVM_CAP_IRQ_INJECT_STATUS:
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
|
|
|
|||
|
|
@ -699,7 +699,7 @@ static int __init gicv5_irs_init(struct gicv5_irs_chip_data *irs_data)
|
|||
*/
|
||||
if (list_empty(&irs_nodes)) {
|
||||
idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR0);
|
||||
gicv5_global_data.virt_capable = !FIELD_GET(GICV5_IRS_IDR0_VIRT, idr);
|
||||
gicv5_global_data.virt_capable = !!FIELD_GET(GICV5_IRS_IDR0_VIRT, idr);
|
||||
|
||||
idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1);
|
||||
irs_setup_pri_bits(idr);
|
||||
|
|
|
|||
|
|
@ -253,7 +253,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
union kvm_mmu_notifier_arg {
|
||||
unsigned long attributes;
|
||||
};
|
||||
|
|
@ -275,7 +274,6 @@ struct kvm_gfn_range {
|
|||
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
|
||||
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
|
||||
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
|
||||
#endif
|
||||
|
||||
enum {
|
||||
OUTSIDE_GUEST_MODE,
|
||||
|
|
@ -849,13 +847,12 @@ struct kvm {
|
|||
struct hlist_head irq_ack_notifier_list;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
struct mmu_notifier mmu_notifier;
|
||||
unsigned long mmu_invalidate_seq;
|
||||
long mmu_invalidate_in_progress;
|
||||
gfn_t mmu_invalidate_range_start;
|
||||
gfn_t mmu_invalidate_range_end;
|
||||
#endif
|
||||
|
||||
struct list_head devices;
|
||||
u64 manual_dirty_log_protect;
|
||||
struct dentry *debugfs_dentry;
|
||||
|
|
@ -2118,7 +2115,6 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
|
|||
extern const struct kvm_stats_header kvm_vcpu_stats_header;
|
||||
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
|
||||
{
|
||||
if (unlikely(kvm->mmu_invalidate_in_progress))
|
||||
|
|
@ -2196,7 +2192,6 @@ static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
|
|||
|
||||
return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ config KVM_COMMON
|
|||
bool
|
||||
select EVENTFD
|
||||
select INTERVAL_TREE
|
||||
select MMU_NOTIFIER
|
||||
select PREEMPT_NOTIFIERS
|
||||
|
||||
config HAVE_KVM_PFNCACHE
|
||||
|
|
@ -93,24 +94,16 @@ config HAVE_KVM_PM_NOTIFIER
|
|||
config KVM_GENERIC_HARDWARE_ENABLING
|
||||
bool
|
||||
|
||||
config KVM_GENERIC_MMU_NOTIFIER
|
||||
select MMU_NOTIFIER
|
||||
bool
|
||||
|
||||
config KVM_ELIDE_TLB_FLUSH_IF_YOUNG
|
||||
depends on KVM_GENERIC_MMU_NOTIFIER
|
||||
bool
|
||||
|
||||
config KVM_MMU_LOCKLESS_AGING
|
||||
depends on KVM_GENERIC_MMU_NOTIFIER
|
||||
bool
|
||||
|
||||
config KVM_GENERIC_MEMORY_ATTRIBUTES
|
||||
depends on KVM_GENERIC_MMU_NOTIFIER
|
||||
bool
|
||||
|
||||
config KVM_GUEST_MEMFD
|
||||
depends on KVM_GENERIC_MMU_NOTIFIER
|
||||
select XARRAY_MULTI
|
||||
bool
|
||||
|
||||
|
|
|
|||
|
|
@ -502,7 +502,6 @@ void kvm_destroy_vcpus(struct kvm *kvm)
|
|||
}
|
||||
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
|
||||
{
|
||||
return container_of(mn, struct kvm, mmu_notifier);
|
||||
|
|
@ -901,15 +900,6 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
|
|||
return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
|
||||
|
||||
static int kvm_init_mmu_notifier(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
|
||||
static int kvm_pm_notifier_call(struct notifier_block *bl,
|
||||
unsigned long state,
|
||||
|
|
@ -1226,10 +1216,8 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
|
|||
out_err_no_debugfs:
|
||||
kvm_coalesced_mmio_free(kvm);
|
||||
out_no_coalesced_mmio:
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
if (kvm->mmu_notifier.ops)
|
||||
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
|
||||
#endif
|
||||
out_err_no_mmu_notifier:
|
||||
kvm_disable_virtualization();
|
||||
out_err_no_disable:
|
||||
|
|
@ -1292,7 +1280,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||
kvm->buses[i] = NULL;
|
||||
}
|
||||
kvm_coalesced_mmio_free(kvm);
|
||||
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
|
||||
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
|
||||
/*
|
||||
* At this point, pending calls to invalidate_range_start()
|
||||
|
|
@ -1311,9 +1298,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||
kvm->mn_active_invalidate_count = 0;
|
||||
else
|
||||
WARN_ON(kvm->mmu_invalidate_in_progress);
|
||||
#else
|
||||
kvm_flush_shadow_all(kvm);
|
||||
#endif
|
||||
kvm_arch_destroy_vm(kvm);
|
||||
kvm_destroy_devices(kvm);
|
||||
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
|
||||
|
|
@ -4886,6 +4870,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
|
|||
static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
|
||||
{
|
||||
switch (arg) {
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
case KVM_CAP_USER_MEMORY2:
|
||||
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue