mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
x86/pvlocks: Move paravirt spinlock functions into own header
Instead of having the pv spinlock function definitions in paravirt.h, move them into the new header paravirt-spinlock.h. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://patch.msgid.link/20260105110520.21356-22-jgross@suse.com
This commit is contained in:
parent
392afe8316
commit
b0b449e6fe
12 changed files with 198 additions and 200 deletions
|
|
@ -78,11 +78,11 @@ void __init hv_init_spinlocks(void)
|
|||
pr_info("PV spinlocks enabled\n");
|
||||
|
||||
__pv_init_lock_hash();
|
||||
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_ops.lock.wait = hv_qlock_wait;
|
||||
pv_ops.lock.kick = hv_qlock_kick;
|
||||
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
|
||||
pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops_lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_ops_lock.wait = hv_qlock_wait;
|
||||
pv_ops_lock.kick = hv_qlock_kick;
|
||||
pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
|
||||
}
|
||||
|
||||
static __init int hv_parse_nopvspin(char *arg)
|
||||
|
|
|
|||
|
|
@ -26,4 +26,10 @@ u64 _paravirt_ident_64(u64);
|
|||
#endif
|
||||
#define paravirt_nop ((void *)nop_func)
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
void paravirt_set_cap(void);
|
||||
#else
|
||||
static inline void paravirt_set_cap(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PARAVIRT_BASE_H */
|
||||
|
|
|
|||
145
arch/x86/include/asm/paravirt-spinlock.h
Normal file
145
arch/x86/include/asm/paravirt-spinlock.h
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef _ASM_X86_PARAVIRT_SPINLOCK_H
|
||||
#define _ASM_X86_PARAVIRT_SPINLOCK_H
|
||||
|
||||
#include <asm/paravirt_types.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include <asm/spinlock_types.h>
|
||||
#endif
|
||||
|
||||
struct qspinlock;
|
||||
|
||||
struct pv_lock_ops {
|
||||
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
|
||||
struct paravirt_callee_save queued_spin_unlock;
|
||||
|
||||
void (*wait)(u8 *ptr, u8 val);
|
||||
void (*kick)(int cpu);
|
||||
|
||||
struct paravirt_callee_save vcpu_is_preempted;
|
||||
} __no_randomize_layout;
|
||||
|
||||
extern struct pv_lock_ops pv_ops_lock;
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __pv_init_lock_hash(void);
|
||||
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
|
||||
extern bool nopvspin;
|
||||
|
||||
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
|
||||
u32 val)
|
||||
{
|
||||
PVOP_VCALL2(pv_ops_lock, queued_spin_lock_slowpath, lock, val);
|
||||
}
|
||||
|
||||
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock,
|
||||
"movb $0, (%%" _ASM_ARG1 ");",
|
||||
ALT_NOT(X86_FEATURE_PVUNLOCK));
|
||||
}
|
||||
|
||||
static __always_inline bool pv_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
|
||||
"xor %%" _ASM_AX ", %%" _ASM_AX ";",
|
||||
ALT_NOT(X86_FEATURE_VCPUPREEMPT));
|
||||
}
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
/**
|
||||
* queued_spin_unlock - release a queued spinlock
|
||||
* @lock : Pointer to queued spinlock structure
|
||||
*
|
||||
* A smp_store_release() on the least-significant byte.
|
||||
*/
|
||||
static inline void native_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
smp_store_release(&lock->locked, 0);
|
||||
}
|
||||
|
||||
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
{
|
||||
pv_queued_spin_lock_slowpath(lock, val);
|
||||
}
|
||||
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
kcsan_release();
|
||||
pv_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
|
||||
static __always_inline void pv_wait(u8 *ptr, u8 val)
|
||||
{
|
||||
PVOP_VCALL2(pv_ops_lock, wait, ptr, val);
|
||||
}
|
||||
|
||||
static __always_inline void pv_kick(int cpu)
|
||||
{
|
||||
PVOP_VCALL1(pv_ops_lock, kick, cpu);
|
||||
}
|
||||
|
||||
void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
|
||||
bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
||||
void __init native_pv_lock_init(void);
|
||||
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
|
||||
bool pv_is_native_spin_unlock(void);
|
||||
__visible bool __native_vcpu_is_preempted(long cpu);
|
||||
bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
/*
|
||||
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
|
||||
*
|
||||
* Native (and PV wanting native due to vCPU pinning) should keep this key
|
||||
* disabled. Native does not touch the key.
|
||||
*
|
||||
* When in a guest then native_pv_lock_init() enables the key first and
|
||||
* KVM/XEN might conditionally disable it later in the boot process again.
|
||||
*/
|
||||
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
/*
|
||||
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
||||
* virt to hijack it.
|
||||
*
|
||||
* Returns:
|
||||
* true - lock has been negotiated, all done;
|
||||
* false - queued_spin_lock_slowpath() will do its thing.
|
||||
*/
|
||||
#define virt_spin_lock virt_spin_lock
|
||||
static inline bool virt_spin_lock(struct qspinlock *lock)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (!static_branch_likely(&virt_spin_lock_key))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
|
||||
* back to a Test-and-Set spinlock, because fair locks have
|
||||
* horrible lock 'holder' preemption issues.
|
||||
*/
|
||||
|
||||
__retry:
|
||||
val = atomic_read(&lock->val);
|
||||
|
||||
if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
|
||||
cpu_relax();
|
||||
goto __retry;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_PARAVIRT_SPINLOCK_H */
|
||||
|
|
@ -19,15 +19,6 @@
|
|||
#include <linux/cpumask.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
|
||||
bool pv_is_native_spin_unlock(void);
|
||||
__visible bool __native_vcpu_is_preempted(long cpu);
|
||||
bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
void __init paravirt_set_cap(void);
|
||||
#endif
|
||||
|
||||
/* The paravirtualized I/O functions */
|
||||
static inline void slow_down_io(void)
|
||||
{
|
||||
|
|
@ -522,46 +513,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
|||
{
|
||||
pv_ops.mmu.set_fixmap(idx, phys, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
|
||||
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
|
||||
u32 val)
|
||||
{
|
||||
PVOP_VCALL2(pv_ops, lock.queued_spin_lock_slowpath, lock, val);
|
||||
}
|
||||
|
||||
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
PVOP_ALT_VCALLEE1(pv_ops, lock.queued_spin_unlock, lock,
|
||||
"movb $0, (%%" _ASM_ARG1 ");",
|
||||
ALT_NOT(X86_FEATURE_PVUNLOCK));
|
||||
}
|
||||
|
||||
static __always_inline void pv_wait(u8 *ptr, u8 val)
|
||||
{
|
||||
PVOP_VCALL2(pv_ops, lock.wait, ptr, val);
|
||||
}
|
||||
|
||||
static __always_inline void pv_kick(int cpu)
|
||||
{
|
||||
PVOP_VCALL1(pv_ops, lock.kick, cpu);
|
||||
}
|
||||
|
||||
static __always_inline bool pv_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return PVOP_ALT_CALLEE1(bool, pv_ops, lock.vcpu_is_preempted, cpu,
|
||||
"xor %%" _ASM_AX ", %%" _ASM_AX ";",
|
||||
ALT_NOT(X86_FEATURE_VCPUPREEMPT));
|
||||
}
|
||||
|
||||
void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
|
||||
bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
|
||||
|
||||
#endif /* SMP && PARAVIRT_SPINLOCKS */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
static __always_inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax;",
|
||||
|
|
@ -588,8 +540,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void native_pv_lock_init(void) __init;
|
||||
|
||||
#else /* __ASSEMBLER__ */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
@ -613,12 +563,6 @@ void native_pv_lock_init(void) __init;
|
|||
#endif /* __ASSEMBLER__ */
|
||||
#else /* CONFIG_PARAVIRT */
|
||||
# define default_banner x86_init_noop
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
static inline void native_pv_lock_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
|
@ -634,10 +578,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||
static inline void paravirt_set_cap(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* __ASSEMBLER__ */
|
||||
#endif /* _ASM_X86_PARAVIRT_H */
|
||||
|
|
|
|||
|
|
@ -184,22 +184,6 @@ struct pv_mmu_ops {
|
|||
#endif
|
||||
} __no_randomize_layout;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include <asm/spinlock_types.h>
|
||||
#endif
|
||||
|
||||
struct qspinlock;
|
||||
|
||||
struct pv_lock_ops {
|
||||
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
|
||||
struct paravirt_callee_save queued_spin_unlock;
|
||||
|
||||
void (*wait)(u8 *ptr, u8 val);
|
||||
void (*kick)(int cpu);
|
||||
|
||||
struct paravirt_callee_save vcpu_is_preempted;
|
||||
} __no_randomize_layout;
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
* number for each function using the offset which we use to indicate
|
||||
* what to patch. */
|
||||
|
|
@ -207,7 +191,6 @@ struct paravirt_patch_template {
|
|||
struct pv_cpu_ops cpu;
|
||||
struct pv_irq_ops irq;
|
||||
struct pv_mmu_ops mmu;
|
||||
struct pv_lock_ops lock;
|
||||
} __no_randomize_layout;
|
||||
|
||||
extern struct paravirt_patch_template pv_ops;
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
#include <asm-generic/qspinlock_types.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/rmwcc.h>
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt-spinlock.h>
|
||||
#endif
|
||||
|
||||
#define _Q_PENDING_LOOPS (1 << 9)
|
||||
|
||||
|
|
@ -27,90 +30,10 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
|
|||
return val;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __pv_init_lock_hash(void);
|
||||
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
|
||||
extern bool nopvspin;
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
/**
|
||||
* queued_spin_unlock - release a queued spinlock
|
||||
* @lock : Pointer to queued spinlock structure
|
||||
*
|
||||
* A smp_store_release() on the least-significant byte.
|
||||
*/
|
||||
static inline void native_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
smp_store_release(&lock->locked, 0);
|
||||
}
|
||||
|
||||
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
{
|
||||
pv_queued_spin_lock_slowpath(lock, val);
|
||||
}
|
||||
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
kcsan_release();
|
||||
pv_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
static inline void native_pv_lock_init(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
|
||||
*
|
||||
* Native (and PV wanting native due to vCPU pinning) should keep this key
|
||||
* disabled. Native does not touch the key.
|
||||
*
|
||||
* When in a guest then native_pv_lock_init() enables the key first and
|
||||
* KVM/XEN might conditionally disable it later in the boot process again.
|
||||
*/
|
||||
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
/*
|
||||
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
||||
* virt to hijack it.
|
||||
*
|
||||
* Returns:
|
||||
* true - lock has been negotiated, all done;
|
||||
* false - queued_spin_lock_slowpath() will do its thing.
|
||||
*/
|
||||
#define virt_spin_lock virt_spin_lock
|
||||
static inline bool virt_spin_lock(struct qspinlock *lock)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (!static_branch_likely(&virt_spin_lock_key))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
|
||||
* back to a Test-and-Set spinlock, because fair locks have
|
||||
* horrible lock 'holder' preemption issues.
|
||||
*/
|
||||
|
||||
__retry:
|
||||
val = atomic_read(&lock->val);
|
||||
|
||||
if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
|
||||
cpu_relax();
|
||||
goto __retry;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#include <asm-generic/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_X86_QSPINLOCK_H */
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
|
|||
|
||||
obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
|
||||
obj-$(CONFIG_PARAVIRT) += paravirt.o
|
||||
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
|
||||
obj-$(CONFIG_PARAVIRT) += paravirt-spinlocks.o
|
||||
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
|
||||
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
|
||||
|
||||
|
|
|
|||
|
|
@ -829,8 +829,10 @@ static void __init kvm_guest_init(void)
|
|||
has_steal_clock = 1;
|
||||
static_call_update(pv_steal_clock, kvm_steal_clock);
|
||||
|
||||
pv_ops.lock.vcpu_is_preempted =
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
pv_ops_lock.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
|
|
@ -1126,11 +1128,11 @@ void __init kvm_spinlock_init(void)
|
|||
pr_info("PV spinlocks enabled\n");
|
||||
|
||||
__pv_init_lock_hash();
|
||||
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops.lock.queued_spin_unlock =
|
||||
pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops_lock.queued_spin_unlock =
|
||||
PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_ops.lock.wait = kvm_wait;
|
||||
pv_ops.lock.kick = kvm_kick_cpu;
|
||||
pv_ops_lock.wait = kvm_wait;
|
||||
pv_ops_lock.kick = kvm_kick_cpu;
|
||||
|
||||
/*
|
||||
* When PV spinlock is enabled which is preferred over
|
||||
|
|
|
|||
|
|
@ -3,12 +3,22 @@
|
|||
* Split spinlock implementation out into its own file, so it can be
|
||||
* compiled in a FTRACE-compatible way.
|
||||
*/
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include <asm/paravirt.h>
|
||||
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __init native_pv_lock_init(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
static_branch_enable(&virt_spin_lock_key);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
native_queued_spin_unlock(lock);
|
||||
|
|
@ -17,7 +27,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
|
|||
|
||||
bool pv_is_native_spin_unlock(void)
|
||||
{
|
||||
return pv_ops.lock.queued_spin_unlock.func ==
|
||||
return pv_ops_lock.queued_spin_unlock.func ==
|
||||
__raw_callee_save___native_queued_spin_unlock;
|
||||
}
|
||||
|
||||
|
|
@ -29,7 +39,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
|
|||
|
||||
bool pv_is_native_vcpu_is_preempted(void)
|
||||
{
|
||||
return pv_ops.lock.vcpu_is_preempted.func ==
|
||||
return pv_ops_lock.vcpu_is_preempted.func ==
|
||||
__raw_callee_save___native_vcpu_is_preempted;
|
||||
}
|
||||
|
||||
|
|
@ -41,3 +51,13 @@ void __init paravirt_set_cap(void)
|
|||
if (!pv_is_native_vcpu_is_preempted())
|
||||
setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
|
||||
}
|
||||
|
||||
struct pv_lock_ops pv_ops_lock = {
|
||||
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
||||
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
||||
.wait = paravirt_nop,
|
||||
.kick = paravirt_nop,
|
||||
.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
|
||||
};
|
||||
EXPORT_SYMBOL(pv_ops_lock);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -57,14 +57,6 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
|
|||
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
|
||||
#endif
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
void __init native_pv_lock_init(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
static_branch_enable(&virt_spin_lock_key);
|
||||
}
|
||||
|
||||
static noinstr void pv_native_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
|
|
@ -221,19 +213,6 @@ struct paravirt_patch_template pv_ops = {
|
|||
|
||||
.mmu.set_fixmap = native_set_fixmap,
|
||||
#endif /* CONFIG_PARAVIRT_XXL */
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
/* Lock ops. */
|
||||
#ifdef CONFIG_SMP
|
||||
.lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
||||
.lock.queued_spin_unlock =
|
||||
PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
||||
.lock.wait = paravirt_nop,
|
||||
.lock.kick = paravirt_nop,
|
||||
.lock.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__native_vcpu_is_preempted),
|
||||
#endif /* SMP */
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
|
|
|
|||
|
|
@ -134,10 +134,10 @@ void __init xen_init_spinlocks(void)
|
|||
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
|
||||
|
||||
__pv_init_lock_hash();
|
||||
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops.lock.queued_spin_unlock =
|
||||
pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
||||
pv_ops_lock.queued_spin_unlock =
|
||||
PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_ops.lock.wait = xen_qlock_wait;
|
||||
pv_ops.lock.kick = xen_qlock_kick;
|
||||
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
|
||||
pv_ops_lock.wait = xen_qlock_wait;
|
||||
pv_ops_lock.kick = xen_qlock_kick;
|
||||
pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -527,6 +527,7 @@ static struct {
|
|||
int idx_off;
|
||||
} pv_ops_tables[] = {
|
||||
{ .name = "pv_ops", },
|
||||
{ .name = "pv_ops_lock", },
|
||||
{ .name = NULL, .idx_off = -1 }
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue