mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
Patch series "introduce kasan.write_only option in hw-tags", v8. Hardware tag based KASAN is implemented using the Memory Tagging Extension (MTE) feature. MTE is built on top of the ARMv8.0 virtual address tagging TBI (Top Byte Ignore) feature and allows software to access a 4-bit allocation tag for each 16-byte granule in the physical address space. A logical tag is derived from bits 59-56 of the virtual address used for the memory access. A CPU with MTE enabled will compare the logical tag against the allocation tag and potentially raise an tag check fault on mismatch, subject to system registers configuration. Since ARMv8.9, FEAT_MTE_STORE_ONLY can be used to restrict raise of tag check fault on store operation only. Using this feature (FEAT_MTE_STORE_ONLY), introduce KASAN write-only mode which restricts KASAN check write (store) operation only. This mode omits KASAN check for read (fetch/load) operation. Therefore, it might be used not only debugging purpose but also in normal environment. This patch (of 2): Since Armv8.9, FEATURE_MTE_STORE_ONLY feature is introduced to restrict raise of tag check fault on store operation only. Introduce KASAN write only mode based on this feature. KASAN write only mode restricts KASAN checks operation for write only and omits the checks for fetch/read operations when accessing memory. So it might be used not only debugging enviroment but also normal enviroment to check memory safty. This features can be controlled with "kasan.write_only" arguments. When "kasan.write_only=on", KASAN checks write operation only otherwise KASAN checks all operations. This changes the MTE_STORE_ONLY feature as BOOT_CPU_FEATURE like ARM64_MTE_ASYMM so that makes it initialise in kasan_init_hw_tags() with other function together. Link: https://lkml.kernel.org/r/20250916222755.466009-1-yeoreum.yun@arm.com Link: https://lkml.kernel.org/r/20250916222755.466009-2-yeoreum.yun@arm.com Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Breno Leitao <leitao@debian.org> Cc: David Hildenbrand <david@redhat.com> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: D Scott Phillips <scott@os.amperecomputing.com> Cc: Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io> Cc: James Morse <james.morse@arm.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: levi.yun <yeoreum.yun@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Oliver Upton <oliver.upton@linux.dev> Cc: Pankaj Gupta <pankaj.gupta@amd.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <yang@os.amperecomputing.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
264 lines
5.7 KiB
C
264 lines
5.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2020 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_MTE_KASAN_H
|
|
#define __ASM_MTE_KASAN_H
|
|
|
|
#include <asm/compiler.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/mte-def.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/types.h>
|
|
|
|
#ifdef CONFIG_KASAN_HW_TAGS
|
|
|
|
/* Whether the MTE asynchronous mode is enabled. */
|
|
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
|
|
|
|
static inline bool system_uses_mte_async_or_asymm_mode(void)
|
|
{
|
|
return static_branch_unlikely(&mte_async_or_asymm_mode);
|
|
}
|
|
|
|
#else /* CONFIG_KASAN_HW_TAGS */
|
|
|
|
static inline bool system_uses_mte_async_or_asymm_mode(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_KASAN_HW_TAGS */
|
|
|
|
#ifdef CONFIG_ARM64_MTE
|
|
|
|
/*
|
|
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
|
|
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
|
|
* used.
|
|
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
|
|
* when UAO is available, so these would act as EL0 accesses using
|
|
* TCF0.
|
|
* However futex.h code uses exclusives which would be executed as
|
|
* EL1, this can potentially cause a tag check fault even if the
|
|
* user disables TCF0.
|
|
*
|
|
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
|
|
* and reset it in uaccess_disable().
|
|
*
|
|
* The Tag check override (TCO) bit disables temporarily the tag checking
|
|
* preventing the issue.
|
|
*/
|
|
static inline void mte_disable_tco(void)
|
|
{
|
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
|
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
|
}
|
|
|
|
static inline void mte_enable_tco(void)
|
|
{
|
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
|
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
|
}
|
|
|
|
/*
|
|
* These functions disable tag checking only if in MTE async mode
|
|
* since the sync mode generates exceptions synchronously and the
|
|
* nofault or load_unaligned_zeropad can handle them.
|
|
*/
|
|
static inline void __mte_disable_tco_async(void)
|
|
{
|
|
if (system_uses_mte_async_or_asymm_mode())
|
|
mte_disable_tco();
|
|
}
|
|
|
|
static inline void __mte_enable_tco_async(void)
|
|
{
|
|
if (system_uses_mte_async_or_asymm_mode())
|
|
mte_enable_tco();
|
|
}
|
|
|
|
/*
|
|
* These functions are meant to be only used from KASAN runtime through
|
|
* the arch_*() interface defined in asm/memory.h.
|
|
* These functions don't include system_supports_mte() checks,
|
|
* as KASAN only calls them when MTE is supported and enabled.
|
|
*/
|
|
|
|
static inline u8 mte_get_ptr_tag(void *ptr)
|
|
{
|
|
/* Note: The format of KASAN tags is 0xF<x> */
|
|
u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
|
|
|
|
return tag;
|
|
}
|
|
|
|
/* Get allocation tag for the address. */
|
|
static inline u8 mte_get_mem_tag(void *addr)
|
|
{
|
|
asm(__MTE_PREAMBLE "ldg %0, [%0]"
|
|
: "+r" (addr));
|
|
|
|
return mte_get_ptr_tag(addr);
|
|
}
|
|
|
|
/* Generate a random tag. */
|
|
static inline u8 mte_get_random_tag(void)
|
|
{
|
|
void *addr;
|
|
|
|
asm(__MTE_PREAMBLE "irg %0, %0"
|
|
: "=r" (addr));
|
|
|
|
return mte_get_ptr_tag(addr);
|
|
}
|
|
|
|
static inline u64 __stg_post(u64 p)
|
|
{
|
|
asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
|
|
: "+r"(p)
|
|
:
|
|
: "memory");
|
|
return p;
|
|
}
|
|
|
|
static inline u64 __stzg_post(u64 p)
|
|
{
|
|
asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
|
|
: "+r"(p)
|
|
:
|
|
: "memory");
|
|
return p;
|
|
}
|
|
|
|
static inline void __dc_gva(u64 p)
|
|
{
|
|
asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
|
|
}
|
|
|
|
static inline void __dc_gzva(u64 p)
|
|
{
|
|
asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
|
|
}
|
|
|
|
/*
|
|
* Assign allocation tags for a region of memory based on the pointer tag.
|
|
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
|
|
* size must be MTE_GRANULE_SIZE aligned.
|
|
*/
|
|
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
|
|
bool init)
|
|
{
|
|
u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
|
|
|
|
/* Read DC G(Z)VA block size from the system register. */
|
|
dczid = read_cpuid(DCZID_EL0);
|
|
dczid_bs = 4ul << (dczid & 0xf);
|
|
dczid_dzp = (dczid >> 4) & 1;
|
|
|
|
curr = (u64)__tag_set(addr, tag);
|
|
mask = dczid_bs - 1;
|
|
/* STG/STZG up to the end of the first block. */
|
|
end1 = curr | mask;
|
|
end3 = curr + size;
|
|
/* DC GVA / GZVA in [end1, end2) */
|
|
end2 = end3 & ~mask;
|
|
|
|
/*
|
|
* The following code uses STG on the first DC GVA block even if the
|
|
* start address is aligned - it appears to be faster than an alignment
|
|
* check + conditional branch. Also, if the range size is at least 2 DC
|
|
* GVA blocks, the first two loops can use post-condition to save one
|
|
* branch each.
|
|
*/
|
|
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
|
|
do { \
|
|
if (!dczid_dzp && size >= 2 * dczid_bs) {\
|
|
do { \
|
|
curr = stg_post(curr); \
|
|
} while (curr < end1); \
|
|
\
|
|
do { \
|
|
dc_gva(curr); \
|
|
curr += dczid_bs; \
|
|
} while (curr < end2); \
|
|
} \
|
|
\
|
|
while (curr < end3) \
|
|
curr = stg_post(curr); \
|
|
} while (0)
|
|
|
|
if (init)
|
|
SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
|
|
else
|
|
SET_MEMTAG_RANGE(__stg_post, __dc_gva);
|
|
#undef SET_MEMTAG_RANGE
|
|
}
|
|
|
|
void mte_enable_kernel_sync(void);
|
|
void mte_enable_kernel_async(void);
|
|
void mte_enable_kernel_asymm(void);
|
|
int mte_enable_kernel_store_only(void);
|
|
|
|
#else /* CONFIG_ARM64_MTE */
|
|
|
|
static inline void mte_disable_tco(void)
|
|
{
|
|
}
|
|
|
|
static inline void mte_enable_tco(void)
|
|
{
|
|
}
|
|
|
|
static inline void __mte_disable_tco_async(void)
|
|
{
|
|
}
|
|
|
|
static inline void __mte_enable_tco_async(void)
|
|
{
|
|
}
|
|
|
|
static inline u8 mte_get_ptr_tag(void *ptr)
|
|
{
|
|
return 0xFF;
|
|
}
|
|
|
|
static inline u8 mte_get_mem_tag(void *addr)
|
|
{
|
|
return 0xFF;
|
|
}
|
|
|
|
static inline u8 mte_get_random_tag(void)
|
|
{
|
|
return 0xFF;
|
|
}
|
|
|
|
static inline void mte_set_mem_tag_range(void *addr, size_t size,
|
|
u8 tag, bool init)
|
|
{
|
|
}
|
|
|
|
static inline void mte_enable_kernel_sync(void)
|
|
{
|
|
}
|
|
|
|
static inline void mte_enable_kernel_async(void)
|
|
{
|
|
}
|
|
|
|
static inline void mte_enable_kernel_asymm(void)
|
|
{
|
|
}
|
|
|
|
static inline int mte_enable_kernel_store_only(void)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
#endif /* CONFIG_ARM64_MTE */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_MTE_KASAN_H */
|