mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
x86/msr: Change the function type of native_read_msr_safe()
Modify the function type of native_read_msr_safe() to:
int native_read_msr_safe(u32 msr, u64 *val)
This change makes the function return an error code instead of the
MSR value, aligning it with the type of native_write_msr_safe().
Consequently, their callers can check the results in the same way.
While at it, convert leftover MSR data type "unsigned int" to u32.
Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-16-xin@zytor.com
This commit is contained in:
parent
444b46a128
commit
502ad6e5a6
6 changed files with 45 additions and 47 deletions
|
|
@ -113,18 +113,22 @@ static inline u64 native_read_msr(u32 msr)
|
|||
return val;
|
||||
}
|
||||
|
||||
static inline u64 native_read_msr_safe(u32 msr, int *err)
|
||||
static inline int native_read_msr_safe(u32 msr, u64 *p)
|
||||
{
|
||||
int err;
|
||||
EAX_EDX_DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("1: rdmsr ; xor %[err],%[err]\n"
|
||||
"2:\n\t"
|
||||
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
|
||||
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: [err] "=r" (err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr));
|
||||
if (tracepoint_enabled(read_msr))
|
||||
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
|
||||
|
||||
*p = EAX_EDX_VAL(val, low, high);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Can be uninlined because referenced by paravirt */
|
||||
|
|
@ -204,8 +208,8 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
|
|||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr, low, high) \
|
||||
({ \
|
||||
int __err; \
|
||||
u64 __val = native_read_msr_safe((msr), &__err); \
|
||||
u64 __val; \
|
||||
int __err = native_read_msr_safe((msr), &__val); \
|
||||
(*low) = (u32)__val; \
|
||||
(*high) = (u32)(__val >> 32); \
|
||||
__err; \
|
||||
|
|
@ -213,10 +217,7 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
|
|||
|
||||
static inline int rdmsrq_safe(u32 msr, u64 *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
return native_read_msr_safe(msr, p);
|
||||
}
|
||||
|
||||
static __always_inline u64 rdpmc(int counter)
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ static inline void __write_cr4(unsigned long x)
|
|||
PVOP_VCALL1(cpu.write_cr4, x);
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_msr(unsigned msr)
|
||||
static inline u64 paravirt_read_msr(u32 msr)
|
||||
{
|
||||
return PVOP_CALL1(u64, cpu.read_msr, msr);
|
||||
}
|
||||
|
|
@ -185,9 +185,9 @@ static inline void paravirt_write_msr(u32 msr, u64 val)
|
|||
PVOP_VCALL2(cpu.write_msr, msr, val);
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
|
||||
static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
|
||||
{
|
||||
return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
|
||||
return PVOP_CALL2(int, cpu.read_msr_safe, msr, val);
|
||||
}
|
||||
|
||||
static inline int paravirt_write_msr_safe(u32 msr, u64 val)
|
||||
|
|
@ -225,19 +225,16 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
|
|||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr, a, b) \
|
||||
({ \
|
||||
int _err; \
|
||||
u64 _l = paravirt_read_msr_safe(msr, &_err); \
|
||||
u64 _l; \
|
||||
int _err = paravirt_read_msr_safe((msr), &_l); \
|
||||
(*a) = (u32)_l; \
|
||||
(*b) = _l >> 32; \
|
||||
(*b) = (u32)(_l >> 32); \
|
||||
_err; \
|
||||
})
|
||||
|
||||
static inline int rdmsrq_safe(unsigned msr, u64 *p)
|
||||
static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = paravirt_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
return paravirt_read_msr_safe(msr, p);
|
||||
}
|
||||
|
||||
static __always_inline u64 rdpmc(int counter)
|
||||
|
|
|
|||
|
|
@ -91,14 +91,14 @@ struct pv_cpu_ops {
|
|||
unsigned int *ecx, unsigned int *edx);
|
||||
|
||||
/* Unsafe MSR operations. These will warn or panic on failure. */
|
||||
u64 (*read_msr)(unsigned int msr);
|
||||
u64 (*read_msr)(u32 msr);
|
||||
void (*write_msr)(u32 msr, u64 val);
|
||||
|
||||
/*
|
||||
* Safe MSR operations.
|
||||
* read sets err to 0 or -EIO. write returns 0 or -EIO.
|
||||
* Returns 0 or -EIO.
|
||||
*/
|
||||
u64 (*read_msr_safe)(unsigned int msr, int *err);
|
||||
int (*read_msr_safe)(u32 msr, u64 *val);
|
||||
int (*write_msr_safe)(u32 msr, u64 val);
|
||||
|
||||
u64 (*read_pmc)(int counter);
|
||||
|
|
|
|||
|
|
@ -476,15 +476,13 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void svm_init_erratum_383(void)
|
||||
{
|
||||
int err;
|
||||
u64 val;
|
||||
|
||||
if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
|
||||
return;
|
||||
|
||||
/* Use _safe variants to not break nested virtualization */
|
||||
val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
|
||||
if (err)
|
||||
if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
|
||||
return;
|
||||
|
||||
val |= (1ULL << 47);
|
||||
|
|
@ -649,13 +647,12 @@ static int svm_enable_virtualization_cpu(void)
|
|||
* erratum is present everywhere).
|
||||
*/
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
|
||||
uint64_t len, status = 0;
|
||||
u64 len, status = 0;
|
||||
int err;
|
||||
|
||||
len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
|
||||
err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
|
||||
if (!err)
|
||||
status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
|
||||
&err);
|
||||
err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status);
|
||||
|
||||
if (err)
|
||||
osvw_status = osvw_len = 0;
|
||||
|
|
@ -2146,14 +2143,13 @@ static int ac_interception(struct kvm_vcpu *vcpu)
|
|||
|
||||
static bool is_erratum_383(void)
|
||||
{
|
||||
int err, i;
|
||||
int i;
|
||||
u64 value;
|
||||
|
||||
if (!erratum_383_found)
|
||||
return false;
|
||||
|
||||
value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
|
||||
if (err)
|
||||
if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
|
||||
return false;
|
||||
|
||||
/* Bit 62 may or may not be set for this mce */
|
||||
|
|
@ -2166,8 +2162,7 @@ static bool is_erratum_383(void)
|
|||
for (i = 0; i < 6; ++i)
|
||||
native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
|
||||
|
||||
value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
|
||||
if (!err) {
|
||||
if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
|
||||
value &= ~(1ULL << 2);
|
||||
native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1087,7 +1087,7 @@ static void xen_write_cr4(unsigned long cr4)
|
|||
native_write_cr4(cr4);
|
||||
}
|
||||
|
||||
static u64 xen_do_read_msr(unsigned int msr, int *err)
|
||||
static u64 xen_do_read_msr(u32 msr, int *err)
|
||||
{
|
||||
u64 val = 0; /* Avoid uninitialized value for safe variant. */
|
||||
|
||||
|
|
@ -1095,7 +1095,7 @@ static u64 xen_do_read_msr(unsigned int msr, int *err)
|
|||
return val;
|
||||
|
||||
if (err)
|
||||
val = native_read_msr_safe(msr, err);
|
||||
*err = native_read_msr_safe(msr, &val);
|
||||
else
|
||||
val = native_read_msr(msr);
|
||||
|
||||
|
|
@ -1160,9 +1160,12 @@ static void xen_do_write_msr(u32 msr, u64 val, int *err)
|
|||
}
|
||||
}
|
||||
|
||||
static u64 xen_read_msr_safe(unsigned int msr, int *err)
|
||||
static int xen_read_msr_safe(u32 msr, u64 *val)
|
||||
{
|
||||
return xen_do_read_msr(msr, err);
|
||||
int err;
|
||||
|
||||
*val = xen_do_read_msr(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xen_write_msr_safe(u32 msr, u64 val)
|
||||
|
|
@ -1174,7 +1177,7 @@ static int xen_write_msr_safe(u32 msr, u64 val)
|
|||
return err;
|
||||
}
|
||||
|
||||
static u64 xen_read_msr(unsigned int msr)
|
||||
static u64 xen_read_msr(u32 msr)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
|
|
|||
|
|
@ -319,11 +319,12 @@ static u64 xen_amd_read_pmc(int counter)
|
|||
uint8_t xenpmu_flags = get_xenpmu_flags();
|
||||
|
||||
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
|
||||
uint32_t msr;
|
||||
int err;
|
||||
u32 msr;
|
||||
u64 val;
|
||||
|
||||
msr = amd_counters_base + (counter * amd_msr_step);
|
||||
return native_read_msr_safe(msr, &err);
|
||||
native_read_msr_safe(msr, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
ctxt = &xenpmu_data->pmu.c.amd;
|
||||
|
|
@ -340,15 +341,16 @@ static u64 xen_intel_read_pmc(int counter)
|
|||
uint8_t xenpmu_flags = get_xenpmu_flags();
|
||||
|
||||
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
|
||||
uint32_t msr;
|
||||
int err;
|
||||
u32 msr;
|
||||
u64 val;
|
||||
|
||||
if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
|
||||
msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
|
||||
else
|
||||
msr = MSR_IA32_PERFCTR0 + counter;
|
||||
|
||||
return native_read_msr_safe(msr, &err);
|
||||
native_read_msr_safe(msr, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
ctxt = &xenpmu_data->pmu.c.intel;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue