mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
arm64: Add support for FEAT_{LS64, LS64_V}
Armv8.7 introduces single-copy atomic 64-byte loads and stores
instructions and its variants named under FEAT_{LS64, LS64_V}.
These features are identified by ID_AA64ISAR1_EL1.LS64 and the
use of such instructions in userspace (EL0) can be trapped.
As st64bv (FEAT_LS64_V) and st64bv0 (FEAT_LS64_ACCDATA) can not be tell
apart, FEAT_LS64 and FEAT_LS64_ACCDATA which will be supported in later
patch will be exported to userspace, FEAT_LS64_V will be enabled only
in kernel.
In order to support the use of corresponding instructions in userspace:
- Make ID_AA64ISAR1_EL1.LS64 visbile to userspace
- Add identifying and enabling in the cpufeature list
- Expose these support of these features to userspace through HWCAP3
and cpuinfo
ld64b/st64b (FEAT_LS64) and st64bv (FEAT_LS64_V) is intended for
special memory (device memory) so requires support by the CPU, system
and target memory location (device that support these instructions).
The HWCAP3_LS64, implies the support of CPU and system (since no
identification method from system, so SoC vendors should advertise
support in the CPU if system also support them).
Otherwise for ld64b/st64b the atomicity may not be guaranteed or a
DABT will be generated, so users (probably userspace driver developer)
should make sure the target memory (device) also have the support.
For st64bv 0xffffffffffffffff will be returned as status result for
unsupported memory so user should check it.
Document the restrictions along with HWCAP3_LS64.
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Oliver Upton <oupton@kernel.org>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Signed-off-by: Zhou Wang <wangzhou1@hisilicon.com>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
151b92c92a
commit
58ce78667a
7 changed files with 52 additions and 0 deletions
|
|
@ -556,6 +556,18 @@ Before jumping into the kernel, the following conditions must be met:
|
|||
|
||||
- MDCR_EL3.TPM (bit 6) must be initialized to 0b0
|
||||
|
||||
For CPUs with support for 64-byte loads and stores without status (FEAT_LS64):
|
||||
|
||||
- If the kernel is entered at EL1 and EL2 is present:
|
||||
|
||||
- HCRX_EL2.EnALS (bit 1) must be initialised to 0b1.
|
||||
|
||||
For CPUs with support for 64-byte stores with status (FEAT_LS64_V):
|
||||
|
||||
- If the kernel is entered at EL1 and EL2 is present:
|
||||
|
||||
- HCRX_EL2.EnASR (bit 2) must be initialised to 0b1.
|
||||
|
||||
The requirements described above for CPU mode, caches, MMUs, architected
|
||||
timers, coherency and system registers apply to all CPUs. All CPUs must
|
||||
enter the kernel in the same exception level. Where the values documented
|
||||
|
|
|
|||
|
|
@ -444,6 +444,13 @@ HWCAP3_MTE_STORE_ONLY
|
|||
HWCAP3_LSFE
|
||||
Functionality implied by ID_AA64ISAR3_EL1.LSFE == 0b0001
|
||||
|
||||
HWCAP3_LS64
|
||||
Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0001. Note that
|
||||
the function of instruction ld64b/st64b requires support by CPU, system
|
||||
and target (device) memory location and HWCAP3_LS64 implies the support
|
||||
of CPU. User should only use ld64b/st64b on supported target (device)
|
||||
memory location, otherwise fallback to the non-atomic alternatives.
|
||||
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
|
|
|||
|
|
@ -179,6 +179,7 @@
|
|||
#define KERNEL_HWCAP_MTE_FAR __khwcap3_feature(MTE_FAR)
|
||||
#define KERNEL_HWCAP_MTE_STORE_ONLY __khwcap3_feature(MTE_STORE_ONLY)
|
||||
#define KERNEL_HWCAP_LSFE __khwcap3_feature(LSFE)
|
||||
#define KERNEL_HWCAP_LS64 __khwcap3_feature(LS64)
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
|
|
|
|||
|
|
@ -146,5 +146,6 @@
|
|||
#define HWCAP3_MTE_FAR (1UL << 0)
|
||||
#define HWCAP3_MTE_STORE_ONLY (1UL << 1)
|
||||
#define HWCAP3_LSFE (1UL << 2)
|
||||
#define HWCAP3_LS64 (1UL << 3)
|
||||
|
||||
#endif /* _UAPI__ASM_HWCAP_H */
|
||||
|
|
|
|||
|
|
@ -240,6 +240,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
|||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LS64_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
|
||||
|
|
@ -2258,6 +2259,16 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
|
|||
}
|
||||
#endif /* CONFIG_ARM64_E0PD */
|
||||
|
||||
static void cpu_enable_ls64(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnALS, SCTLR_EL1_EnALS);
|
||||
}
|
||||
|
||||
static void cpu_enable_ls64_v(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnASR, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
|
|
@ -3142,6 +3153,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, XNX, IMP)
|
||||
},
|
||||
{
|
||||
.desc = "LS64",
|
||||
.capability = ARM64_HAS_LS64,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_enable_ls64,
|
||||
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64)
|
||||
},
|
||||
{
|
||||
.desc = "LS64_V",
|
||||
.capability = ARM64_HAS_LS64_V,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_enable_ls64_v,
|
||||
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V)
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
@ -3261,6 +3288,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|||
HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16),
|
||||
HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH),
|
||||
HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM),
|
||||
HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64, CAP_HWCAP, KERNEL_HWCAP_LS64),
|
||||
HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT),
|
||||
HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX),
|
||||
HWCAP_CAP(ID_AA64ISAR3_EL1, LSFE, IMP, CAP_HWCAP, KERNEL_HWCAP_LSFE),
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
|
|||
[KERNEL_HWCAP_PACA] = "paca",
|
||||
[KERNEL_HWCAP_PACG] = "pacg",
|
||||
[KERNEL_HWCAP_GCS] = "gcs",
|
||||
[KERNEL_HWCAP_LS64] = "ls64",
|
||||
[KERNEL_HWCAP_DCPODP] = "dcpodp",
|
||||
[KERNEL_HWCAP_SVE2] = "sve2",
|
||||
[KERNEL_HWCAP_SVEAES] = "sveaes",
|
||||
|
|
|
|||
|
|
@ -46,6 +46,8 @@ HAS_HCX
|
|||
HAS_LDAPR
|
||||
HAS_LPA2
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_LS64
|
||||
HAS_LS64_V
|
||||
HAS_MOPS
|
||||
HAS_NESTED_VIRT
|
||||
HAS_BBML2_NOABORT
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue