LoongArch KVM changes for v6.20

1. Add more CPUCFG mask bits.
 2. Improve feature detection.
 3. Add FPU/LBT delay load support.
 4. Set default return value in KVM IO bus ops.
 5. Add paravirt preempt feature support.
 6. Add KVM steal time test case for tools/selftests.
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmmFcN4WHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImeqcAD/9OZOg0J14+UXZ2qF0cGvSWKSCD
 I6TRjy2OlVbcUCt7N/M7dppOuaDfv1ilIexulvubglUIvRMJXNvOAjqTU7I4+MOF
 3jjUTklnF9gtMjmWjatWwjo8KHim93zc99FDgy7rRNZRAhosO3BFWJ+b5hEk5RMY
 jOCGXiAMob3+w26KKDC/FK6xSpVt+rcCRNymc9T8/kLYY2fv+cWbXwmk0U4ry6yG
 xGhvzIcsNnjH15rNB9zbleNrw28uxEJ3V/M/F8C5SbF0V71B2XWyRUi5X75ExjzT
 gKzYEwoPhcCBLRd/SMk7RCMk/aGS6sFLGbDLShuG9MRtmJAGk4b92wfIXVVRBiAt
 TzO0xcQdQvVFZnaKHe/r7x7+roA+790oZbJlpVJVpVgV5obiKM9OCLNtCnWD/n5B
 FDV2Xjyfdmk6Br+MSpb7iq+3AKUDAVDEpRLEZkt5nCeVX1IX0y1KdtWb2MxVVULm
 VXncgVLiG4RaQRNk1Gzqhgxml/BfN8im2ytK6I7qnUTAmm/GuqRq5fKLJH0hgASr
 /kHsPcTam6JSKk/YJO2TXw21O1mZE/RwtTRW4bplq5d/X17cqUpqBmf3TelN1uvI
 alx6YkF8lBJ6nd7YHVLypvsEMyPJNNihyjC264E5IcaeRCMjo52Lq/6rkManHZCW
 z+qO3gLbJB3TbkLCXA==
 =CUZl
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-kvm-6.20' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD

LoongArch KVM changes for v6.20

1. Add more CPUCFG mask bits.
2. Improve feature detection.
3. Add FPU/LBT delay load support.
4. Set default return value in KVM IO bus ops.
5. Add paravirt preempt feature support.
6. Add KVM steal time test case for tools/selftests.
This commit is contained in:
Paolo Bonzini 2026-02-09 18:17:01 +01:00
commit c14f646638
17 changed files with 319 additions and 116 deletions

View file

@ -37,6 +37,7 @@
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@ -164,6 +165,7 @@ enum emulation_result {
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
BIT(KVM_FEATURE_PREEMPT) | \
BIT(KVM_FEATURE_STEAL_TIME) | \
BIT(KVM_FEATURE_USER_HCALL) | \
BIT(KVM_FEATURE_VIRT_EXTIOI))
@ -200,6 +202,7 @@ struct kvm_vcpu_arch {
/* Which auxiliary state is loaded (KVM_LARCH_*) */
unsigned int aux_inuse;
unsigned int aux_ldtype;
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
@ -252,6 +255,7 @@ struct kvm_vcpu_arch {
u64 guest_addr;
u64 last_steal;
struct gfn_to_hva_cache cache;
u8 preempted;
} st;
};
@ -265,6 +269,11 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned
csr->csrs[reg] = val;
}
static inline bool kvm_guest_has_msgint(struct kvm_vcpu_arch *arch)
{
return arch->cpucfg[1] & CPUCFG1_MSGINT;
}
static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
{
return arch->cpucfg[2] & CPUCFG2_FP;

View file

@ -37,8 +37,10 @@ struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u32 pad[12];
__u8 preempted;
__u8 pad[47];
};
#define KVM_VCPU_PREEMPTED (1 << 0)
/*
* Hypercall interface for KVM hypervisor

View file

@ -690,6 +690,7 @@
#define LOONGARCH_CSR_ISR3 0xa3
#define LOONGARCH_CSR_IRR 0xa4
#define LOONGARCH_CSR_IPR 0xa5
#define LOONGARCH_CSR_PRID 0xc0

View file

@ -34,6 +34,10 @@ __retry:
return true;
}
#define vcpu_is_preempted vcpu_is_preempted
bool vcpu_is_preempted(int cpu);
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>

View file

@ -105,6 +105,7 @@ struct kvm_fpu {
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
#define KVM_LOONGARCH_VM_FEAT_PTW 8
#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10
/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0

View file

@ -15,6 +15,7 @@
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
#define KVM_FEATURE_IPI 1
#define KVM_FEATURE_STEAL_TIME 2
#define KVM_FEATURE_PREEMPT 3
/* BIT 24 - 31 are features configurable by user space vmm */
#define KVM_FEATURE_VIRT_EXTIOI 24
#define KVM_FEATURE_USER_HCALL 25

View file

@ -12,6 +12,7 @@ static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
static u64 native_steal_clock(int cpu)
@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
return 0;
}
bool vcpu_is_preempted(int cpu)
{
struct kvm_steal_time *src;
if (!static_branch_unlikely(&virt_preempt_key))
return false;
src = &per_cpu(steal_time, cpu);
return !!(src->preempted & KVM_VCPU_PREEMPTED);
}
EXPORT_SYMBOL(vcpu_is_preempted);
#endif
static void pv_cpu_reboot(void *unused)
@ -308,6 +321,9 @@ int __init pv_time_init(void)
pr_err("Failed to install cpu hotplug callbacks\n");
return r;
}
if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
static_branch_enable(&virt_preempt_key);
#endif
static_call_update(pv_steal_clock, paravt_steal_clock);
@ -318,7 +334,10 @@ int __init pv_time_init(void)
static_key_slow_inc(&paravirt_steal_rq_enabled);
#endif
pr_info("Using paravirt steal-time\n");
if (static_key_enabled(&virt_preempt_key))
pr_info("Using paravirt steal-time with preempt enabled\n");
else
pr_info("Using paravirt steal-time with preempt disabled\n");
return 0;
}

View file

@ -754,7 +754,8 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
return RESUME_HOST;
}
kvm_own_fpu(vcpu);
vcpu->arch.aux_ldtype = KVM_LARCH_FPU;
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
return RESUME_GUEST;
}
@ -792,8 +793,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
*/
static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
if (!kvm_guest_has_lsx(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
else {
vcpu->arch.aux_ldtype = KVM_LARCH_LSX;
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
}
return RESUME_GUEST;
}
@ -808,16 +813,24 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
*/
static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
if (!kvm_guest_has_lasx(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
else {
vcpu->arch.aux_ldtype = KVM_LARCH_LASX;
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
}
return RESUME_GUEST;
}
static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lbt(vcpu))
if (!kvm_guest_has_lbt(&vcpu->arch))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
else {
vcpu->arch.aux_ldtype = KVM_LARCH_LBT;
kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
}
return RESUME_GUEST;
}

View file

@ -119,7 +119,7 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, unsigned long *val)
{
int index, ret = 0;
int index;
u64 data = 0;
gpa_t offset;
@ -150,40 +150,36 @@ static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eioint
data = s->coremap[index];
break;
default:
ret = -EINVAL;
break;
}
*val = data;
return ret;
return 0;
}
static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
int ret = -EINVAL;
unsigned long flags, data, offset;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
return -EINVAL;
return 0;
}
if (addr & (len - 1)) {
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
return 0;
}
offset = addr & 0x7;
addr -= offset;
vcpu->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
loongarch_eiointc_read(vcpu, eiointc, addr, &data);
spin_unlock_irqrestore(&eiointc->lock, flags);
if (ret)
return ret;
data = data >> (offset * 8);
switch (len) {
@ -208,7 +204,7 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
gpa_t addr, u64 value, u64 field_mask)
{
int index, irq, ret = 0;
int index, irq;
u8 cpu;
u64 data, old, mask;
gpa_t offset;
@ -287,29 +283,27 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
break;
}
return ret;
return 0;
}
static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret = -EINVAL;
unsigned long flags, value;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
return -EINVAL;
return 0;
}
if (addr & (len - 1)) {
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
return 0;
}
vcpu->stat.eiointc_write_exits++;
@ -317,24 +311,24 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
switch (len) {
case 1:
value = *(unsigned char *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
break;
case 2:
value = *(unsigned short *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
break;
case 4:
value = *(unsigned int *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
break;
default:
value = *(unsigned long *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
break;
}
spin_unlock_irqrestore(&eiointc->lock, flags);
return ret;
return 0;
}
static const struct kvm_io_device_ops kvm_eiointc_ops = {
@ -352,7 +346,7 @@ static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
if (!eiointc) {
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
return -EINVAL;
return 0;
}
addr -= EIOINTC_VIRT_BASE;
@ -376,28 +370,25 @@ static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret = 0;
unsigned long flags;
u32 value = *(u32 *)val;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
return -EINVAL;
return 0;
}
addr -= EIOINTC_VIRT_BASE;
spin_lock_irqsave(&eiointc->lock, flags);
switch (addr) {
case EIOINTC_VIRT_FEATURES:
ret = -EPERM;
break;
case EIOINTC_VIRT_CONFIG:
/*
* eiointc features can only be set at disabled status
*/
if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
ret = -EPERM;
break;
}
eiointc->status = value & eiointc->features;
@ -407,7 +398,7 @@ static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
}
spin_unlock_irqrestore(&eiointc->lock, flags);
return ret;
return 0;
}
static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {

View file

@ -111,7 +111,7 @@ static int mail_send(struct kvm *kvm, uint64_t data)
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return -EINVAL;
return 0;
}
mailbox = ((data & 0xffffffff) >> 2) & 0x7;
offset = IOCSR_IPI_BUF_20 + mailbox * 4;
@ -145,7 +145,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (unlikely(ret)) {
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
return ret;
return 0;
}
/* Construct the mask by scanning the bit 27-30 */
for (i = 0; i < 4; i++) {
@ -162,7 +162,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
if (unlikely(ret))
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
return ret;
return 0;
}
static int any_send(struct kvm *kvm, uint64_t data)
@ -174,7 +174,7 @@ static int any_send(struct kvm *kvm, uint64_t data)
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return -EINVAL;
return 0;
}
offset = data & 0xffff;
@ -183,7 +183,6 @@ static int any_send(struct kvm *kvm, uint64_t data)
static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
{
int ret = 0;
uint32_t offset;
uint64_t res = 0;
@ -202,33 +201,27 @@ static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void
spin_unlock(&vcpu->arch.ipi_state.lock);
break;
case IOCSR_IPI_SET:
res = 0;
break;
case IOCSR_IPI_CLEAR:
res = 0;
break;
case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
if (offset + len > IOCSR_IPI_BUF_38 + 8) {
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
__func__, offset, len);
ret = -EINVAL;
break;
}
res = read_mailbox(vcpu, offset, len);
break;
default:
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
ret = -EINVAL;
break;
}
*(uint64_t *)val = res;
return ret;
return 0;
}
static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
{
int ret = 0;
uint64_t data;
uint32_t offset;
@ -239,7 +232,6 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons
switch (offset) {
case IOCSR_IPI_STATUS:
ret = -EINVAL;
break;
case IOCSR_IPI_EN:
spin_lock(&vcpu->arch.ipi_state.lock);
@ -257,7 +249,6 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons
if (offset + len > IOCSR_IPI_BUF_38 + 8) {
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
__func__, offset, len);
ret = -EINVAL;
break;
}
write_mailbox(vcpu, offset, data, len);
@ -266,18 +257,17 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons
ipi_send(vcpu->kvm, data);
break;
case IOCSR_MAIL_SEND:
ret = mail_send(vcpu->kvm, data);
mail_send(vcpu->kvm, data);
break;
case IOCSR_ANY_SEND:
ret = any_send(vcpu->kvm, data);
any_send(vcpu->kvm, data);
break;
default:
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
ret = -EINVAL;
break;
}
return ret;
return 0;
}
static int kvm_ipi_read(struct kvm_vcpu *vcpu,

View file

@ -74,7 +74,7 @@ void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
{
int ret = 0, offset;
int offset;
u64 data = 0;
void *ptemp;
@ -121,34 +121,32 @@ static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int l
data = s->isr;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock(&s->lock);
if (ret == 0) {
offset = (addr - s->pch_pic_base) & 7;
data = data >> (offset * 8);
memcpy(val, &data, len);
}
offset = (addr - s->pch_pic_base) & 7;
data = data >> (offset * 8);
memcpy(val, &data, len);
return ret;
return 0;
}
static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
int ret;
int ret = 0;
struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
if (!s) {
kvm_err("%s: pch pic irqchip not valid!\n", __func__);
return -EINVAL;
return ret;
}
if (addr & (len - 1)) {
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
return ret;
}
/* statistics of pch pic reading */
@ -161,7 +159,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
int len, const void *val)
{
int ret = 0, offset;
int offset;
u64 old, data, mask;
void *ptemp;
@ -226,29 +224,28 @@ static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
break;
default:
ret = -EINVAL;
break;
}
spin_unlock(&s->lock);
return ret;
return 0;
}
static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret;
int ret = 0;
struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
if (!s) {
kvm_err("%s: pch pic irqchip not valid!\n", __func__);
return -EINVAL;
return ret;
}
if (addr & (len - 1)) {
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
return ret;
}
/* statistics of pch pic writing */

View file

@ -32,7 +32,7 @@ static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
if (cpu_has_msgint && (priority == INT_AVEC)) {
if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) {
set_gcsr_estat(irq);
return 1;
}
@ -64,7 +64,7 @@ static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
if (cpu_has_msgint && (priority == INT_AVEC)) {
if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) {
clear_gcsr_estat(irq);
return 1;
}

View file

@ -192,6 +192,14 @@ static void kvm_init_gcsr_flag(void)
set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2);
set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3);
set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3);
if (cpu_has_msgint) {
set_gcsr_hw_flag(LOONGARCH_CSR_IPR);
set_gcsr_hw_flag(LOONGARCH_CSR_ISR0);
set_gcsr_hw_flag(LOONGARCH_CSR_ISR1);
set_gcsr_hw_flag(LOONGARCH_CSR_ISR2);
set_gcsr_hw_flag(LOONGARCH_CSR_ISR3);
}
}
static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)

View file

@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
}
st = (struct kvm_steal_time __user *)ghc->hva;
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
unsafe_put_user(0, &st->preempted, out);
vcpu->arch.st.preempted = 0;
}
unsafe_get_user(version, &st->version, out);
if (version & 1)
version += 1; /* first time write, random junk */
@ -232,6 +237,27 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
vcpu->arch.flush_gpa = INVALID_GPA;
}
if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
switch (vcpu->arch.aux_ldtype) {
case KVM_LARCH_FPU:
kvm_own_fpu(vcpu);
break;
case KVM_LARCH_LSX:
kvm_own_lsx(vcpu);
break;
case KVM_LARCH_LASX:
kvm_own_lasx(vcpu);
break;
case KVM_LARCH_LBT:
kvm_own_lbt(vcpu);
break;
default:
break;
}
vcpu->arch.aux_ldtype = 0;
}
}
/*
@ -652,6 +678,8 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
static int _kvm_get_cpucfg_mask(int id, u64 *v)
{
unsigned int config;
if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
return -EINVAL;
@ -684,9 +712,17 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
if (cpu_has_ptw)
*v |= CPUCFG2_PTW;
config = read_cpucfg(LOONGARCH_CPUCFG2);
*v |= config & (CPUCFG2_FRECIPE | CPUCFG2_DIV32 | CPUCFG2_LAM_BH);
*v |= config & (CPUCFG2_LAMCAS | CPUCFG2_LLACQ_SCREL | CPUCFG2_SCQ);
return 0;
case LOONGARCH_CPUCFG3:
*v = GENMASK(16, 0);
*v = GENMASK(23, 0);
/* VM does not support memory order and SFB setting */
config = read_cpucfg(LOONGARCH_CPUCFG3);
*v &= config & ~(CPUCFG3_SFB);
*v &= config & ~(CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP | CPUCFG3_SLDORDER_CAP);
return 0;
case LOONGARCH_CPUCFG4:
case LOONGARCH_CPUCFG5:
@ -717,6 +753,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
static int kvm_check_cpucfg(int id, u64 val)
{
int ret;
u32 host;
u64 mask = 0;
ret = _kvm_get_cpucfg_mask(id, &mask);
@ -746,9 +783,16 @@ static int kvm_check_cpucfg(int id, u64 val)
/* LASX architecturally implies LSX and FP but val does not satisfy that */
return -EINVAL;
return 0;
case LOONGARCH_CPUCFG3:
host = read_cpucfg(LOONGARCH_CPUCFG3);
if ((val & CPUCFG3_RVAMAX) > (host & CPUCFG3_RVAMAX))
return -EINVAL;
if ((val & CPUCFG3_SPW_LVL) > (host & CPUCFG3_SPW_LVL))
return -EINVAL;
return 0;
case LOONGARCH_CPUCFG6:
if (val & CPUCFG6_PMP) {
u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
host = read_cpucfg(LOONGARCH_CPUCFG6);
if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
return -EINVAL;
if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
@ -1286,16 +1330,11 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
#ifdef CONFIG_CPU_HAS_LBT
int kvm_own_lbt(struct kvm_vcpu *vcpu)
{
if (!kvm_guest_has_lbt(&vcpu->arch))
return -EINVAL;
preempt_disable();
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
set_csr_euen(CSR_EUEN_LBTEN);
_restore_lbt(&vcpu->arch.lbt);
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
}
preempt_enable();
return 0;
}
@ -1338,8 +1377,6 @@ static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
/*
* Enable FPU for guest
* Set FR and FRE according to guest context
@ -1350,19 +1387,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
kvm_restore_fpu(&vcpu->arch.fpu);
vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
preempt_enable();
}
#ifdef CONFIG_CPU_HAS_LSX
/* Enable LSX and restore context */
int kvm_own_lsx(struct kvm_vcpu *vcpu)
{
if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
return -EINVAL;
preempt_disable();
/* Enable LSX for guest */
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
@ -1384,7 +1414,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
preempt_enable();
return 0;
}
@ -1394,11 +1423,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
/* Enable LASX and restore context */
int kvm_own_lasx(struct kvm_vcpu *vcpu)
{
if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
return -EINVAL;
preempt_disable();
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
@ -1420,7 +1444,6 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
preempt_enable();
return 0;
}
@ -1661,7 +1684,9 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
if (cpu_has_msgint) {
if (kvm_guest_has_msgint(&vcpu->arch)) {
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_IPR);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
@ -1756,7 +1781,9 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
if (cpu_has_msgint) {
if (kvm_guest_has_msgint(&vcpu->arch)) {
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_IPR);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
@ -1773,11 +1800,57 @@ out:
return 0;
}
static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
{
gpa_t gpa;
struct gfn_to_hva_cache *ghc;
struct kvm_memslots *slots;
struct kvm_steal_time __user *st;
gpa = vcpu->arch.st.guest_addr;
if (!(gpa & KVM_STEAL_PHYS_VALID))
return;
/* vCPU may be preempted for many times */
if (vcpu->arch.st.preempted)
return;
/* This happens on process exit */
if (unlikely(current->mm != vcpu->kvm->mm))
return;
gpa &= KVM_STEAL_PHYS_MASK;
ghc = &vcpu->arch.st.cache;
slots = kvm_memslots(vcpu->kvm);
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
ghc->gpa = INVALID_GPA;
return;
}
}
st = (struct kvm_steal_time __user *)ghc->hva;
unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
out:
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
int cpu;
int cpu, idx;
unsigned long flags;
if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
/*
* Take the srcu lock as memslots will be accessed to check
* the gfn cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_set_pv_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
local_irq_save(flags);
cpu = smp_processor_id();
vcpu->arch.last_sched_cpu = cpu;

View file

@ -29,6 +29,21 @@ static void kvm_vm_init_features(struct kvm *kvm)
{
unsigned long val;
if (cpu_has_lsx)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LSX);
if (cpu_has_lasx)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LASX);
if (cpu_has_lbt_x86)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_X86BT);
if (cpu_has_lbt_arm)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_ARMBT);
if (cpu_has_lbt_mips)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MIPSBT);
if (cpu_has_ptw)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PTW);
if (cpu_has_msgint)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MSGINT);
val = read_csr_gcfg();
if (val & CSR_GCFG_GPMP)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
@ -37,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm)
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
if (kvm_pvtime_supported()) {
kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT);
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
}
}
@ -131,35 +148,15 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
{
switch (attr->attr) {
case KVM_LOONGARCH_VM_FEAT_LSX:
if (cpu_has_lsx)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_LASX:
if (cpu_has_lasx)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_X86BT:
if (cpu_has_lbt_x86)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_ARMBT:
if (cpu_has_lbt_arm)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_MIPSBT:
if (cpu_has_lbt_mips)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PTW:
if (cpu_has_ptw)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_MSGINT:
if (cpu_has_msgint)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT:
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_vm_support(&kvm->arch, attr->attr))
return 0;

View file

@ -228,6 +228,7 @@ TEST_GEN_PROGS_loongarch += kvm_page_table_test
TEST_GEN_PROGS_loongarch += memslot_modification_stress_test
TEST_GEN_PROGS_loongarch += memslot_perf_test
TEST_GEN_PROGS_loongarch += set_memory_region_test
TEST_GEN_PROGS_loongarch += steal_time
SPLIT_TESTS += arch_timer
SPLIT_TESTS += get-reg-list

View file

@ -301,6 +301,102 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
pr_info("\n");
}
#elif defined(__loongarch__)
/* steal_time must have 64-byte alignment */
#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u8 preempted;
__u8 pad[47];
};
static void check_status(struct kvm_steal_time *st)
{
GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
}
static void guest_code(int cpu)
{
uint32_t version;
struct kvm_steal_time *st = st_gva[cpu];
memset(st, 0, sizeof(*st));
GUEST_SYNC(0);
check_status(st);
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
version = READ_ONCE(st->version);
check_status(st);
GUEST_SYNC(1);
check_status(st);
GUEST_ASSERT(version < READ_ONCE(st->version));
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
check_status(st);
GUEST_DONE();
}
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
int err;
uint64_t val;
struct kvm_device_attr attr = {
.group = KVM_LOONGARCH_VCPU_CPUCFG,
.attr = CPUCFG_KVM_FEATURE,
.addr = (uint64_t)&val,
};
err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
if (err)
return false;
err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr);
if (err)
return false;
return val & BIT(KVM_FEATURE_STEAL_TIME);
}
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
{
int err;
uint64_t st_gpa;
struct kvm_vm *vm = vcpu->vm;
struct kvm_device_attr attr = {
.group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
.attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
.addr = (uint64_t)&st_gpa,
};
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]);
err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
TEST_ASSERT(err == 0, "No PV stealtime Feature");
st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID;
err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr);
TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA");
}
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
{
struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
ksft_print_msg("VCPU%d:\n", vcpu_idx);
ksft_print_msg(" steal: %lld\n", st->steal);
ksft_print_msg(" flags: %d\n", st->flags);
ksft_print_msg(" version: %d\n", st->version);
ksft_print_msg(" preempted: %d\n", st->preempted);
}
#endif
static void *do_steal_time(void *arg)