mm: introduce CONFIG_ARCH_HAS_LAZY_MMU_MODE

Architectures currently opt in for implementing lazy_mmu helpers by
defining __HAVE_ARCH_ENTER_LAZY_MMU_MODE.

In preparation for introducing a generic lazy_mmu layer that will require
storage in task_struct, let's switch to a cleaner approach: instead of
defining a macro, select a CONFIG option.

This patch introduces CONFIG_ARCH_HAS_LAZY_MMU_MODE and has each arch
select it when it implements lazy_mmu helpers. 
__HAVE_ARCH_ENTER_LAZY_MMU_MODE is removed and <linux/pgtable.h> relies on
the new CONFIG instead.

On x86, lazy_mmu helpers are only implemented if PARAVIRT_XXL is selected.
This creates some complications in arch/x86/boot/, because a few files
manually undefine PARAVIRT* options.  As a result <asm/paravirt.h> does
not define the lazy_mmu helpers, but this breaks the build as
<linux/pgtable.h> only defines them if !CONFIG_ARCH_HAS_LAZY_MMU_MODE. 
There does not seem to be a clean way out of this - let's just undefine
that new CONFIG too.

Link: https://lkml.kernel.org/r/20251215150323.2218608-7-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Acked-by: Andreas Larsson <andreas@gaisler.com>	[sparc]
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kevin Brodsky 2025-12-15 15:03:15 +00:00 committed by Andrew Morton
parent f2be745071
commit 7303ecbfe4
12 changed files with 14 additions and 7 deletions

View file

@ -35,6 +35,7 @@ config ARM64
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if KERNEL_MODE_NEON
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_LAZY_MMU_MODE
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS

View file

@ -80,7 +80,6 @@ static inline void queue_pte_barriers(void)
}
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
/*

View file

@ -24,8 +24,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
struct ppc64_tlb_batch *batch;

View file

@ -93,6 +93,7 @@ config PPC_BOOK3S_64
select IRQ_WORK
select PPC_64S_HASH_MMU if !PPC_RADIX_MMU
select KASAN_VMALLOC if KASAN
select ARCH_HAS_LAZY_MMU_MODE
config PPC_BOOK3E_64
bool "Embedded processors"

View file

@ -112,6 +112,7 @@ config SPARC64
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select ARCH_SUPPORTS_SCHED_SMT if SMP
select ARCH_SUPPORTS_SCHED_MC if SMP
select ARCH_HAS_LAZY_MMU_MODE
config ARCH_PROC_KCORE_TEXT
def_bool y

View file

@ -39,8 +39,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
void flush_tlb_pending(void);
void arch_enter_lazy_mmu_mode(void);
void arch_flush_lazy_mmu_mode(void);

View file

@ -808,6 +808,7 @@ config PARAVIRT
config PARAVIRT_XXL
bool
depends on X86_64
select ARCH_HAS_LAZY_MMU_MODE
config PARAVIRT_DEBUG
bool "paravirt-ops debugging"

View file

@ -11,6 +11,7 @@
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_ARCH_HAS_LAZY_MMU_MODE
#undef CONFIG_KASAN
#undef CONFIG_KASAN_GENERIC

View file

@ -24,6 +24,7 @@
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_ARCH_HAS_LAZY_MMU_MODE
/*
* This code runs before CPU feature bits are set. By default, the

View file

@ -526,7 +526,6 @@ static inline void arch_end_context_switch(struct task_struct *next)
PVOP_VCALL1(cpu.end_context_switch, next);
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
PVOP_VCALL0(mmu.lazy_mode.enter);

View file

@ -235,7 +235,7 @@ static inline int pmd_dirty(pmd_t pmd)
*
* Nesting is not permitted and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#ifndef CONFIG_ARCH_HAS_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void) {}
static inline void arch_leave_lazy_mmu_mode(void) {}
static inline void arch_flush_lazy_mmu_mode(void) {}

View file

@ -1468,6 +1468,13 @@ config PT_RECLAIM
config FIND_NORMAL_PAGE
def_bool n
config ARCH_HAS_LAZY_MMU_MODE
bool
help
The architecture uses the lazy MMU mode. This allows changes to
MMU-related architectural state to be deferred until the mode is
exited. See <linux/pgtable.h> for details.
source "mm/damon/Kconfig"
endmenu