mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
KVM: arm64: Calculate hyp VA size only once
Calculate the hypervisor's VA size only once to maintain consistency between the memory layout and MMU initialization logic. Previously the two would be inconsistent when the kernel is configured for less than IDMAP_VA_BITS of VA space. Signed-off-by: Petteri Kangaslampi <pekangas@google.com> Tested-by: Vincent Donnefort <vdonnefort@google.com> Link: https://patch.msgid.link/20260113194409.2970324-2-pekangas@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
b1a9a9b961
commit
8e8eb10c10
4 changed files with 36 additions and 32 deletions
|
|
@ -103,6 +103,7 @@ alternative_cb_end
|
|||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
void kvm_compute_layout(void);
|
||||
u32 kvm_hyp_va_bits(void);
|
||||
void kvm_apply_hyp_relocations(void);
|
||||
|
||||
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
|
||||
|
|
@ -185,7 +186,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
|
|||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
int __init kvm_mmu_init(u32 *hyp_va_bits);
|
||||
int __init kvm_mmu_init(u32 hyp_va_bits);
|
||||
|
||||
static inline void *__kvm_vector_slot2addr(void *base,
|
||||
enum arm64_hyp_spectre_vector slot)
|
||||
|
|
|
|||
|
|
@ -2568,7 +2568,7 @@ static void pkvm_hyp_init_ptrauth(void)
|
|||
/* Inits Hyp-mode on all online CPUs */
|
||||
static int __init init_hyp_mode(void)
|
||||
{
|
||||
u32 hyp_va_bits;
|
||||
u32 hyp_va_bits = kvm_hyp_va_bits();
|
||||
int cpu;
|
||||
int err = -ENOMEM;
|
||||
|
||||
|
|
@ -2582,7 +2582,7 @@ static int __init init_hyp_mode(void)
|
|||
/*
|
||||
* Allocate Hyp PGD and setup Hyp identity mapping
|
||||
*/
|
||||
err = kvm_mmu_init(&hyp_va_bits);
|
||||
err = kvm_mmu_init(hyp_va_bits);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
|
|
|||
|
|
@ -2284,11 +2284,9 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
|
|||
.virt_to_phys = kvm_host_pa,
|
||||
};
|
||||
|
||||
int __init kvm_mmu_init(u32 *hyp_va_bits)
|
||||
int __init kvm_mmu_init(u32 hyp_va_bits)
|
||||
{
|
||||
int err;
|
||||
u32 idmap_bits;
|
||||
u32 kernel_bits;
|
||||
|
||||
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
|
||||
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
|
||||
|
|
@ -2302,25 +2300,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
|
|||
*/
|
||||
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
|
||||
|
||||
/*
|
||||
* The ID map is always configured for 48 bits of translation, which
|
||||
* may be fewer than the number of VA bits used by the regular kernel
|
||||
* stage 1, when VA_BITS=52.
|
||||
*
|
||||
* At EL2, there is only one TTBR register, and we can't switch between
|
||||
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
|
||||
* line: we need to use the extended range with *both* our translation
|
||||
* tables.
|
||||
*
|
||||
* So use the maximum of the idmap VA bits and the regular kernel stage
|
||||
* 1 VA bits to assure that the hypervisor can both ID map its code page
|
||||
* and map any kernel memory.
|
||||
*/
|
||||
idmap_bits = IDMAP_VA_BITS;
|
||||
kernel_bits = vabits_actual;
|
||||
*hyp_va_bits = max(idmap_bits, kernel_bits);
|
||||
|
||||
kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
|
||||
kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
|
||||
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
|
||||
kvm_debug("HYP VA range: %lx:%lx\n",
|
||||
kern_hyp_va(PAGE_OFFSET),
|
||||
|
|
@ -2345,7 +2325,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
|
||||
err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops);
|
||||
if (err)
|
||||
goto out_free_pgtable;
|
||||
|
||||
|
|
@ -2354,7 +2334,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
|
|||
goto out_destroy_pgtable;
|
||||
|
||||
io_map_base = hyp_idmap_start;
|
||||
__hyp_va_bits = *hyp_va_bits;
|
||||
__hyp_va_bits = hyp_va_bits;
|
||||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
|
|
|
|||
|
|
@ -46,9 +46,31 @@ static void init_hyp_physvirt_offset(void)
|
|||
hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the actual VA size used by the hypervisor
|
||||
*/
|
||||
__init u32 kvm_hyp_va_bits(void)
|
||||
{
|
||||
/*
|
||||
* The ID map is always configured for 48 bits of translation, which may
|
||||
* be different from the number of VA bits used by the regular kernel
|
||||
* stage 1.
|
||||
*
|
||||
* At EL2, there is only one TTBR register, and we can't switch between
|
||||
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
|
||||
* line: we need to use the extended range with *both* our translation
|
||||
* tables.
|
||||
*
|
||||
* So use the maximum of the idmap VA bits and the regular kernel stage
|
||||
* 1 VA bits as the hypervisor VA size to assure that the hypervisor can
|
||||
* both ID map its code page and map any kernel memory.
|
||||
*/
|
||||
return max(IDMAP_VA_BITS, vabits_actual);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to generate a hyp VA with the following format (with V ==
|
||||
* vabits_actual):
|
||||
* hypervisor VA bits):
|
||||
*
|
||||
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
||||
* ---------------------------------------------------------
|
||||
|
|
@ -61,10 +83,11 @@ __init void kvm_compute_layout(void)
|
|||
{
|
||||
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
||||
u64 hyp_va_msb;
|
||||
u32 hyp_va_bits = kvm_hyp_va_bits();
|
||||
|
||||
/* Where is my RAM region? */
|
||||
hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
|
||||
hyp_va_msb ^= BIT(vabits_actual - 1);
|
||||
hyp_va_msb = idmap_addr & BIT(hyp_va_bits - 1);
|
||||
hyp_va_msb ^= BIT(hyp_va_bits - 1);
|
||||
|
||||
tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
||||
(u64)(high_memory - 1));
|
||||
|
|
@ -72,9 +95,9 @@ __init void kvm_compute_layout(void)
|
|||
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
||||
tag_val = hyp_va_msb;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (hyp_va_bits - 1)) {
|
||||
/* We have some free bits to insert a random tag. */
|
||||
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
||||
tag_val |= get_random_long() & GENMASK_ULL(hyp_va_bits - 2, tag_lsb);
|
||||
}
|
||||
tag_val >>= tag_lsb;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue