mirror of
https://github.com/torvalds/linux.git
synced 2026-03-14 04:26:15 +01:00
iommu/arm-smmu-v3: Set MEV bit in nested STE for DoS mitigations
There is a DoS concern on the shared hardware event queue among devices passed through to VMs, that too many translation failures that belong to VMs could overflow the shared hardware event queue if those VMs or their VMMs don't handle/recover the devices properly. The MEV bit in the STE allows to configure the SMMU HW to merge similar event records, though there is no guarantee. Set it in a nested STE for DoS mitigations. In the future, we might want to enable the MEV for non-nested cases too such as domain->type == IOMMU_DOMAIN_UNMANAGED or even IOMMU_DOMAIN_DMA. Link: https://patch.msgid.link/r/8ed12feef67fc65273d0f5925f401a81f56acebe.1741719725.git.nicolinc@nvidia.com Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Pranjal Shrivastava <praan@google.com> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
e7d3fa3d29
commit
da0c56520e
3 changed files with 5 additions and 2 deletions
|
|
@ -43,6 +43,8 @@ static void arm_smmu_make_nested_cd_table_ste(
|
|||
target->data[0] |= nested_domain->ste[0] &
|
||||
~cpu_to_le64(STRTAB_STE_0_CFG);
|
||||
target->data[1] |= nested_domain->ste[1];
|
||||
/* Merge events for DoS mitigations on eventq */
|
||||
target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -1052,7 +1052,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
|
|||
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
|
||||
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
|
||||
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
|
||||
STRTAB_STE_1_EATS);
|
||||
STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
|
||||
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
|
||||
|
||||
/*
|
||||
|
|
@ -1068,7 +1068,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
|
|||
if (cfg & BIT(1)) {
|
||||
used_bits[1] |=
|
||||
cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS |
|
||||
STRTAB_STE_1_SHCFG);
|
||||
STRTAB_STE_1_SHCFG | STRTAB_STE_1_MEV);
|
||||
used_bits[2] |=
|
||||
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
|
||||
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
|
||||
|
|
|
|||
|
|
@ -266,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
|
|||
#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
|
||||
#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
|
||||
|
||||
#define STRTAB_STE_1_MEV (1UL << 19)
|
||||
#define STRTAB_STE_1_S2FWB (1UL << 25)
|
||||
#define STRTAB_STE_1_S1STALLD (1UL << 27)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue