KVM/riscv changes for 6.20

- Fixes for issues discoverd by KVM API fuzzing in
   kvm_riscv_aia_imsic_has_attr(), kvm_riscv_aia_imsic_rw_attr(),
   and kvm_riscv_vcpu_aia_imsic_update()
 - Allow Zalasr, Zilsd and Zclsd extensions for Guest/VM
 - Add riscv vm satp modes in KVM selftests
 - Transparent huge page support for G-stage
 - Adjust the number of available guest irq files based on
   MMIO register sizes in DeviceTree or ACPI
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmmF8FsACgkQrUjsVaLH
 LAfDYxAAh3jlLkHGlPiWtKcZ/cS+uvpA5hE52h+UmCUOU7mRuvnoA+zS3HcW8lQo
 qyZt/NNE4qZ7vNhcDp+BTPIGAv06lwCbsPaBkGMA94jrBHXko6GBb5qkiIqi+L0M
 nkUABfM5l3Rsleo8JJEGEn5Egr7waNQBr8TynF6yChAJlnbuEVskaxzwzl+s7COV
 wHrU4OfkXBDCLwyuP65oJbBpP+P2ylJV25gl6E0oGv2CIcMpgJIMibbTewqzVFuR
 Z79/GhRC64ds7+vlHhOuajehbMcBSAnkGZGC6IMOp63gyswtXZvXfI+x3uv+i1KS
 D5gdO7sT6WBl/Y8IDQTTv4Tuk5I9I6luClVzJtfxaIp9I5wNPx9FS4qKipUxbu+e
 EFWs/mC+6U7MRm49n8FwXfoDwiFYm2XA6VB2FZdAwePxJKsqON5UKI3TDNTxNuh7
 rbUOFOUn3azyHgHD/WuVXRnFK4VUs0YVFgW/cx4hUWLafVkiWW/5ve5vsx1jmiBG
 EFN/db6unjUXa/ZIC3y/hJ1UhTBVdSKupbawWmksHav8ugE69o7GF8r5J7/RQtTj
 6MHTNFwvatjaWVzCCjYQ+hV/qGD2SMB0D7rReV28D44KFQCrCgTmkpJoZKu+Uq2B
 sjI1XW8kH/n3OX/Sllj3ZO+VOfeXWlBC6yW5ARhnsEvoc4bHWpk=
 =PSMs
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-6.20-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.20

- Fixes for issues discoverd by KVM API fuzzing in
  kvm_riscv_aia_imsic_has_attr(), kvm_riscv_aia_imsic_rw_attr(),
  and kvm_riscv_vcpu_aia_imsic_update()
- Allow Zalasr, Zilsd and Zclsd extensions for Guest/VM
- Add riscv vm satp modes in KVM selftests
- Transparent huge page support for G-stage
- Adjust the number of available guest irq files based on
  MMIO register sizes in DeviceTree or ACPI
This commit is contained in:
Paolo Bonzini 2026-02-09 19:05:42 +01:00
commit 54f15ebfc6
15 changed files with 330 additions and 22 deletions

View file

@ -192,6 +192,9 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZFBFMIN,
KVM_RISCV_ISA_EXT_ZVFBFMIN,
KVM_RISCV_ISA_EXT_ZVFBFWMA,
KVM_RISCV_ISA_EXT_ZCLSD,
KVM_RISCV_ISA_EXT_ZILSD,
KVM_RISCV_ISA_EXT_ZALASR,
KVM_RISCV_ISA_EXT_MAX,
};

View file

@ -630,7 +630,7 @@ int kvm_riscv_aia_init(void)
*/
if (gc)
kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
BIT(gc->guest_index_bits) - 1);
gc->nr_guest_files);
else
kvm_riscv_aia_nr_hgei = 0;

View file

@ -797,6 +797,10 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
return 1;
/* IMSIC vCPU state may not be initialized yet */
if (!imsic)
return 1;
/* Read old IMSIC VS-file details */
read_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
@ -952,8 +956,10 @@ int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
if (!vcpu)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
imsic = vcpu->arch.aia_context.imsic_state;
if (!imsic)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
read_lock_irqsave(&imsic->vsfile_lock, flags);
@ -993,8 +999,11 @@ int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
if (!vcpu)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
imsic = vcpu->arch.aia_context.imsic_state;
if (!imsic)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
return imsic_mrif_isel_check(imsic->nr_eix, isel);
}

View file

@ -305,6 +305,142 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return pte_young(ptep_get(ptep));
}
static bool fault_supports_gstage_huge_mapping(struct kvm_memory_slot *memslot,
unsigned long hva)
{
hva_t uaddr_start, uaddr_end;
gpa_t gpa_start;
size_t size;
size = memslot->npages * PAGE_SIZE;
uaddr_start = memslot->userspace_addr;
uaddr_end = uaddr_start + size;
gpa_start = memslot->base_gfn << PAGE_SHIFT;
/*
* Pages belonging to memslots that don't have the same alignment
* within a PMD for userspace and GPA cannot be mapped with g-stage
* PMD entries, because we'll end up mapping the wrong pages.
*
* Consider a layout like the following:
*
* memslot->userspace_addr:
* +-----+--------------------+--------------------+---+
* |abcde|fgh vs-stage block | vs-stage block tv|xyz|
* +-----+--------------------+--------------------+---+
*
* memslot->base_gfn << PAGE_SHIFT:
* +---+--------------------+--------------------+-----+
* |abc|def g-stage block | g-stage block |tvxyz|
* +---+--------------------+--------------------+-----+
*
* If we create those g-stage blocks, we'll end up with this incorrect
* mapping:
* d -> f
* e -> g
* f -> h
*/
if ((gpa_start & (PMD_SIZE - 1)) != (uaddr_start & (PMD_SIZE - 1)))
return false;
/*
* Next, let's make sure we're not trying to map anything not covered
* by the memslot. This means we have to prohibit block size mappings
* for the beginning and end of a non-block aligned and non-block sized
* memory slot (illustrated by the head and tail parts of the
* userspace view above containing pages 'abcde' and 'xyz',
* respectively).
*
* Note that it doesn't matter if we do the check using the
* userspace_addr or the base_gfn, as both are equally aligned (per
* the check above) and equally sized.
*/
return (hva >= ALIGN(uaddr_start, PMD_SIZE)) && (hva < ALIGN_DOWN(uaddr_end, PMD_SIZE));
}
static int get_hva_mapping_size(struct kvm *kvm,
unsigned long hva)
{
int size = PAGE_SIZE;
unsigned long flags;
pgd_t pgd;
p4d_t p4d;
pud_t pud;
pmd_t pmd;
/*
* Disable IRQs to prevent concurrent tear down of host page tables,
* e.g. if the primary MMU promotes a P*D to a huge page and then frees
* the original page table.
*/
local_irq_save(flags);
/*
* Read each entry once. As above, a non-leaf entry can be promoted to
* a huge page _during_ this walk. Re-reading the entry could send the
* walk into the weeks, e.g. p*d_leaf() returns false (sees the old
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
pgd = pgdp_get(pgd_offset(kvm->mm, hva));
if (pgd_none(pgd))
goto out;
p4d = p4dp_get(p4d_offset(&pgd, hva));
if (p4d_none(p4d) || !p4d_present(p4d))
goto out;
pud = pudp_get(pud_offset(&p4d, hva));
if (pud_none(pud) || !pud_present(pud))
goto out;
if (pud_leaf(pud)) {
size = PUD_SIZE;
goto out;
}
pmd = pmdp_get(pmd_offset(&pud, hva));
if (pmd_none(pmd) || !pmd_present(pmd))
goto out;
if (pmd_leaf(pmd))
size = PMD_SIZE;
out:
local_irq_restore(flags);
return size;
}
static unsigned long transparent_hugepage_adjust(struct kvm *kvm,
struct kvm_memory_slot *memslot,
unsigned long hva,
kvm_pfn_t *hfnp, gpa_t *gpa)
{
kvm_pfn_t hfn = *hfnp;
/*
* Make sure the adjustment is done only for THP pages. Also make
* sure that the HVA and GPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
if (fault_supports_gstage_huge_mapping(memslot, hva)) {
int sz;
sz = get_hva_mapping_size(kvm, hva);
if (sz < PMD_SIZE)
return sz;
*gpa &= PMD_MASK;
hfn &= ~(PTRS_PER_PMD - 1);
*hfnp = hfn;
return PMD_SIZE;
}
return PAGE_SIZE;
}
int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write,
struct kvm_gstage_mapping *out_map)
@ -398,6 +534,10 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
/* Check if we are backed by a THP and thus use block mapping if possible */
if (vma_pagesize == PAGE_SIZE)
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
if (writable) {
mark_page_dirty_in_slot(kvm, memslot, gfn);
ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,

View file

@ -50,6 +50,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZAAMO),
KVM_ISA_EXT_ARR(ZABHA),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZALASR),
KVM_ISA_EXT_ARR(ZALRSC),
KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
@ -63,6 +64,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZCB),
KVM_ISA_EXT_ARR(ZCD),
KVM_ISA_EXT_ARR(ZCF),
KVM_ISA_EXT_ARR(ZCLSD),
KVM_ISA_EXT_ARR(ZCMOP),
KVM_ISA_EXT_ARR(ZFA),
KVM_ISA_EXT_ARR(ZFBFMIN),
@ -79,6 +81,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
KVM_ISA_EXT_ARR(ZILSD),
KVM_ISA_EXT_ARR(ZIMOP),
KVM_ISA_EXT_ARR(ZKND),
KVM_ISA_EXT_ARR(ZKNE),
@ -187,6 +190,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZAAMO:
case KVM_RISCV_ISA_EXT_ZABHA:
case KVM_RISCV_ISA_EXT_ZACAS:
case KVM_RISCV_ISA_EXT_ZALASR:
case KVM_RISCV_ISA_EXT_ZALRSC:
case KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_RISCV_ISA_EXT_ZBA:

View file

@ -494,12 +494,9 @@ int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low
}
ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
if (ret)
ret = SBI_ERR_INVALID_ADDRESS;
goto free_mem;
}
ret = 0;
free_mem:
kfree(einfo);
out:

View file

@ -47,6 +47,7 @@ pud_t *pud_offset(p4d_t *p4d, unsigned long address)
return (pud_t *)p4d;
}
EXPORT_SYMBOL_GPL(pud_offset);
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
@ -55,6 +56,7 @@ p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
return (p4d_t *)pgd;
}
EXPORT_SYMBOL_GPL(p4d_offset);
#endif
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP

View file

@ -784,7 +784,7 @@ static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
{
u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
u32 i, j, index, nr_parent_irqs, nr_mmios, nr_guest_files, nr_handlers = 0;
struct imsic_global_config *global;
struct imsic_local_config *local;
void __iomem **mmios_va = NULL;
@ -878,6 +878,7 @@ int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
}
/* Configure handlers for target CPUs */
global->nr_guest_files = BIT(global->guest_index_bits) - 1;
for (i = 0; i < nr_parent_irqs; i++) {
rc = imsic_get_parent_hartid(fwnode, i, &hartid);
if (rc) {
@ -918,6 +919,15 @@ int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
local->msi_pa = mmios[index].start + reloff;
local->msi_va = mmios_va[index] + reloff;
/*
* KVM uses global->nr_guest_files to determine the available guest
* interrupt files on each CPU. Take the minimum number of guest
* interrupt files across all CPUs to avoid KVM incorrectly allocating
* an unexisted or unmapped guest interrupt file on some CPUs.
*/
nr_guest_files = (resource_size(&mmios[index]) - reloff) / IMSIC_MMIO_PAGE_SZ - 1;
global->nr_guest_files = min(global->nr_guest_files, nr_guest_files);
nr_handlers++;
}

View file

@ -68,6 +68,9 @@ struct imsic_global_config {
/* Number of guest interrupt identities */
u32 nr_guest_ids;
/* Number of guest interrupt files per core */
u32 nr_guest_files;
/* Per-CPU IMSIC addresses */
struct imsic_local_config __percpu *local;
};

View file

@ -198,6 +198,17 @@ enum vm_guest_mode {
VM_MODE_P36V48_64K,
VM_MODE_P47V47_16K,
VM_MODE_P36V47_16K,
VM_MODE_P56V57_4K, /* For riscv64 */
VM_MODE_P56V48_4K,
VM_MODE_P56V39_4K,
VM_MODE_P50V57_4K,
VM_MODE_P50V48_4K,
VM_MODE_P50V39_4K,
VM_MODE_P41V57_4K,
VM_MODE_P41V48_4K,
VM_MODE_P41V39_4K,
NUM_VM_MODES,
};
@ -222,10 +233,10 @@ kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
shape; \
})
#if defined(__aarch64__)
extern enum vm_guest_mode vm_mode_default;
#if defined(__aarch64__)
#define VM_MODE_DEFAULT vm_mode_default
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
@ -248,7 +259,7 @@ extern enum vm_guest_mode vm_mode_default;
#error "RISC-V 32-bit kvm selftests not supported"
#endif
#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
#define VM_MODE_DEFAULT vm_mode_default
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)

View file

@ -192,4 +192,6 @@ static inline void local_irq_disable(void)
csr_clear(CSR_SSTATUS, SR_SIE);
}
unsigned long riscv64_get_satp_mode(void);
#endif /* SELFTEST_KVM_PROCESSOR_H */

View file

@ -4,7 +4,7 @@
*/
#include "guest_modes.h"
#ifdef __aarch64__
#if defined(__aarch64__) || defined(__riscv)
#include "processor.h"
enum vm_guest_mode vm_mode_default;
#endif
@ -13,9 +13,11 @@ struct guest_mode guest_modes[NUM_VM_MODES];
void guest_modes_append_default(void)
{
#ifndef __aarch64__
#if !defined(__aarch64__) && !defined(__riscv)
guest_mode_append(VM_MODE_DEFAULT, true);
#else
#endif
#ifdef __aarch64__
{
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
uint32_t ipa4k, ipa16k, ipa64k;
@ -74,11 +76,36 @@ void guest_modes_append_default(void)
#ifdef __riscv
{
unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS);
unsigned long satp_mode = riscv64_get_satp_mode() << SATP_MODE_SHIFT;
int i;
if (sz >= 52)
guest_mode_append(VM_MODE_P52V48_4K, true);
if (sz >= 48)
guest_mode_append(VM_MODE_P48V48_4K, true);
switch (sz) {
case 59:
guest_mode_append(VM_MODE_P56V57_4K, satp_mode >= SATP_MODE_57);
guest_mode_append(VM_MODE_P56V48_4K, satp_mode >= SATP_MODE_48);
guest_mode_append(VM_MODE_P56V39_4K, satp_mode >= SATP_MODE_39);
break;
case 50:
guest_mode_append(VM_MODE_P50V57_4K, satp_mode >= SATP_MODE_57);
guest_mode_append(VM_MODE_P50V48_4K, satp_mode >= SATP_MODE_48);
guest_mode_append(VM_MODE_P50V39_4K, satp_mode >= SATP_MODE_39);
break;
case 41:
guest_mode_append(VM_MODE_P41V57_4K, satp_mode >= SATP_MODE_57);
guest_mode_append(VM_MODE_P41V48_4K, satp_mode >= SATP_MODE_48);
guest_mode_append(VM_MODE_P41V39_4K, satp_mode >= SATP_MODE_39);
break;
default:
break;
}
/* set the first supported mode as default */
vm_mode_default = NUM_VM_MODES;
for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) {
if (guest_modes[i].supported && guest_modes[i].enabled)
vm_mode_default = i;
}
TEST_ASSERT(vm_mode_default != NUM_VM_MODES, "No supported mode!");
}
#endif
}

View file

@ -209,6 +209,15 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
[VM_MODE_P47V47_16K] = "PA-bits:47, VA-bits:47, 16K pages",
[VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
[VM_MODE_P56V57_4K] = "PA-bits:56, VA-bits:57, 4K pages",
[VM_MODE_P56V48_4K] = "PA-bits:56, VA-bits:48, 4K pages",
[VM_MODE_P56V39_4K] = "PA-bits:56, VA-bits:39, 4K pages",
[VM_MODE_P50V57_4K] = "PA-bits:50, VA-bits:57, 4K pages",
[VM_MODE_P50V48_4K] = "PA-bits:50, VA-bits:48, 4K pages",
[VM_MODE_P50V39_4K] = "PA-bits:50, VA-bits:39, 4K pages",
[VM_MODE_P41V57_4K] = "PA-bits:41, VA-bits:57, 4K pages",
[VM_MODE_P41V48_4K] = "PA-bits:41, VA-bits:48, 4K pages",
[VM_MODE_P41V39_4K] = "PA-bits:41, VA-bits:39, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@ -236,6 +245,15 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
[VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
[VM_MODE_P47V47_16K] = { 47, 47, 0x4000, 14 },
[VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
[VM_MODE_P56V57_4K] = { 56, 57, 0x1000, 12 },
[VM_MODE_P56V48_4K] = { 56, 48, 0x1000, 12 },
[VM_MODE_P56V39_4K] = { 56, 39, 0x1000, 12 },
[VM_MODE_P50V57_4K] = { 50, 57, 0x1000, 12 },
[VM_MODE_P50V48_4K] = { 50, 48, 0x1000, 12 },
[VM_MODE_P50V39_4K] = { 50, 39, 0x1000, 12 },
[VM_MODE_P41V57_4K] = { 41, 57, 0x1000, 12 },
[VM_MODE_P41V48_4K] = { 41, 48, 0x1000, 12 },
[VM_MODE_P41V39_4K] = { 41, 39, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@ -338,6 +356,21 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
case VM_MODE_P44V64_4K:
vm->mmu.pgtable_levels = 5;
break;
case VM_MODE_P56V57_4K:
case VM_MODE_P50V57_4K:
case VM_MODE_P41V57_4K:
vm->mmu.pgtable_levels = 5;
break;
case VM_MODE_P56V48_4K:
case VM_MODE_P50V48_4K:
case VM_MODE_P41V48_4K:
vm->mmu.pgtable_levels = 4;
break;
case VM_MODE_P56V39_4K:
case VM_MODE_P50V39_4K:
case VM_MODE_P41V39_4K:
vm->mmu.pgtable_levels = 3;
break;
default:
TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
}

View file

@ -8,6 +8,7 @@
#include <linux/compiler.h>
#include <assert.h>
#include "guest_modes.h"
#include "kvm_util.h"
#include "processor.h"
#include "ucall_common.h"
@ -193,22 +194,41 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
{
struct kvm_vm *vm = vcpu->vm;
unsigned long satp;
unsigned long satp_mode;
unsigned long max_satp_mode;
/*
* The RISC-V Sv48 MMU mode supports 56-bit physical address
* for 48-bit virtual address with 4KB last level page size.
*/
switch (vm->mode) {
case VM_MODE_P52V48_4K:
case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
case VM_MODE_P56V57_4K:
case VM_MODE_P50V57_4K:
case VM_MODE_P41V57_4K:
satp_mode = SATP_MODE_57;
break;
case VM_MODE_P56V48_4K:
case VM_MODE_P50V48_4K:
case VM_MODE_P41V48_4K:
satp_mode = SATP_MODE_48;
break;
case VM_MODE_P56V39_4K:
case VM_MODE_P50V39_4K:
case VM_MODE_P41V39_4K:
satp_mode = SATP_MODE_39;
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
max_satp_mode = vcpu_get_reg(vcpu, RISCV_CONFIG_REG(satp_mode));
if ((satp_mode >> SATP_MODE_SHIFT) > max_satp_mode)
TEST_FAIL("Unable to set satp mode 0x%lx, max mode 0x%lx\n",
satp_mode >> SATP_MODE_SHIFT, max_satp_mode);
satp = (vm->mmu.pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
satp |= SATP_MODE_48;
satp |= satp_mode;
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
}
@ -511,3 +531,38 @@ unsigned long get_host_sbi_spec_version(void)
return ret.value;
}
void kvm_selftest_arch_init(void)
{
/*
* riscv64 doesn't have a true default mode, so start by detecting the
* supported vm mode.
*/
guest_modes_append_default();
}
unsigned long riscv64_get_satp_mode(void)
{
int kvm_fd, vm_fd, vcpu_fd, err;
uint64_t val;
struct kvm_one_reg reg = {
.id = RISCV_CONFIG_REG(satp_mode),
.addr = (uint64_t)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, NULL);
TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
close(vcpu_fd);
close(vm_fd);
close(kvm_fd);
return val;
}

View file

@ -65,6 +65,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAAMO:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZABHA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZALASR:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZALRSC:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
@ -78,6 +79,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCD:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCF:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCLSD:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCMOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFBFMIN:
@ -94,6 +96,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZILSD:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIMOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE:
@ -525,6 +528,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZAAMO),
KVM_ISA_EXT_ARR(ZABHA),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZALASR),
KVM_ISA_EXT_ARR(ZALRSC),
KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
@ -538,6 +542,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZCB),
KVM_ISA_EXT_ARR(ZCD),
KVM_ISA_EXT_ARR(ZCF),
KVM_ISA_EXT_ARR(ZCLSD),
KVM_ISA_EXT_ARR(ZCMOP),
KVM_ISA_EXT_ARR(ZFA),
KVM_ISA_EXT_ARR(ZFBFMIN),
@ -554,6 +559,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
KVM_ISA_EXT_ARR(ZILSD),
KVM_ISA_EXT_ARR(ZIMOP),
KVM_ISA_EXT_ARR(ZKND),
KVM_ISA_EXT_ARR(ZKNE),
@ -1166,6 +1172,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(svvptc, SVVPTC);
KVM_ISA_EXT_SIMPLE_CONFIG(zaamo, ZAAMO);
KVM_ISA_EXT_SIMPLE_CONFIG(zabha, ZABHA);
KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS);
KVM_ISA_EXT_SIMPLE_CONFIG(zalasr, ZALASR);
KVM_ISA_EXT_SIMPLE_CONFIG(zalrsc, ZALRSC);
KVM_ISA_EXT_SIMPLE_CONFIG(zawrs, ZAWRS);
KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
@ -1179,6 +1186,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA);
KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB);
KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD);
KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF);
KVM_ISA_EXT_SIMPLE_CONFIG(zclsd, ZCLSD);
KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
KVM_ISA_EXT_SIMPLE_CONFIG(zfbfmin, ZFBFMIN);
@ -1195,6 +1203,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL);
KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
KVM_ISA_EXT_SIMPLE_CONFIG(zilsd, ZILSD);
KVM_ISA_EXT_SIMPLE_CONFIG(zimop, ZIMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND);
KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE);
@ -1247,6 +1256,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zabha,
&config_zacas,
&config_zalrsc,
&config_zalasr,
&config_zawrs,
&config_zba,
&config_zbb,
@ -1259,6 +1269,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zcb,
&config_zcd,
&config_zcf,
&config_zclsd,
&config_zcmop,
&config_zfa,
&config_zfbfmin,
@ -1275,6 +1286,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zihintntl,
&config_zihintpause,
&config_zihpm,
&config_zilsd,
&config_zimop,
&config_zknd,
&config_zkne,