mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
To initialize node, zone and memory map data structures every architecture calls free_area_init() during setup_arch() and passes it an array of zone limits. Beside code duplication it creates "interesting" ordering cases between allocation and initialization of hugetlb and the memory map. Some architectures allocate hugetlb pages very early in setup_arch() in certain cases, some only create hugetlb CMA areas in setup_arch() and sometimes hugetlb allocations happen mm_core_init(). With arch_zone_limits_init() helper available now on all architectures it is no longer necessary to call free_area_init() from architecture setup code. Rather core MM initialization can call arch_zone_limits_init() in a single place. This allows to unify ordering of hugetlb vs memory map allocation and initialization. Remove the call to free_area_init() from architecture specific code and place it in a new mm_core_init_early() function that is called immediately after setup_arch(). After this refactoring it is possible to consolidate hugetlb allocations and eliminate differences in ordering of hugetlb and memory map initialization among different architectures. As the first step of this consolidation move hugetlb_bootmem_alloc() to mm_core_early_init(). Link: https://lkml.kernel.org/r/20260111082105.290734-24-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alex Shi <alexs@kernel.org> Cc: Andreas Larsson <andreas@gaisler.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@kernel.org> Cc: David S. Miller <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Klara Modin <klarasmodin@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Pratyush Yadav <pratyush@kernel.org> Cc: Richard Weinberger <richard@nod.at> Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
180 lines
4.9 KiB
C
180 lines
4.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/memblock.h>
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
#include <linux/initrd.h>
|
|
#endif
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/module.h>
|
|
#include <linux/highmem.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/arcregs.h>
|
|
|
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
|
|
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
|
|
static unsigned long low_mem_sz;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
static unsigned long min_high_pfn, max_high_pfn;
|
|
static phys_addr_t high_mem_start;
|
|
static phys_addr_t high_mem_sz;
|
|
unsigned long arch_pfn_offset;
|
|
EXPORT_SYMBOL(arch_pfn_offset);
|
|
#endif
|
|
|
|
long __init arc_get_mem_sz(void)
|
|
{
|
|
return low_mem_sz;
|
|
}
|
|
|
|
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
|
|
static int __init setup_mem_sz(char *str)
|
|
{
|
|
low_mem_sz = memparse(str, NULL) & PAGE_MASK;
|
|
|
|
/* early console might not be setup yet - it will show up later */
|
|
pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
|
|
|
|
return 0;
|
|
}
|
|
early_param("mem", setup_mem_sz);
|
|
|
|
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
|
{
|
|
int in_use = 0;
|
|
|
|
if (!low_mem_sz) {
|
|
if (base != low_mem_start)
|
|
panic("CONFIG_LINUX_RAM_BASE != DT memory { }");
|
|
|
|
low_mem_sz = size;
|
|
in_use = 1;
|
|
memblock_add_node(base, size, 0, MEMBLOCK_NONE);
|
|
} else {
|
|
#ifdef CONFIG_HIGHMEM
|
|
high_mem_start = base;
|
|
high_mem_sz = size;
|
|
in_use = 1;
|
|
memblock_add_node(base, size, 1, MEMBLOCK_NONE);
|
|
memblock_reserve(base, size);
|
|
#endif
|
|
}
|
|
|
|
pr_info("Memory @ %llx [%lldM] %s\n",
|
|
base, TO_MB(size), !in_use ? "Not used":"");
|
|
}
|
|
|
|
void __init arch_zone_limits_init(unsigned long *max_zone_pfn)
|
|
{
|
|
/*----------------- node/zones setup --------------------------*/
|
|
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
|
|
* For HIGHMEM without PAE max_high_pfn should be less than
|
|
* min_low_pfn to guarantee that these two regions don't overlap.
|
|
* For PAE case highmem is greater than lowmem, so it is natural
|
|
* to use max_high_pfn.
|
|
*
|
|
* In both cases, holes should be handled by pfn_valid().
|
|
*/
|
|
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* First memory setup routine called from setup_arch()
|
|
* 1. setup swapper's mm @init_mm
|
|
* 2. Count the pages we have and setup bootmem allocator
|
|
* 3. zone setup
|
|
*/
|
|
void __init setup_arch_memory(void)
|
|
{
|
|
setup_initial_init_mm(_text, _etext, _edata, _end);
|
|
|
|
/* first page of system - kernel .vector starts here */
|
|
min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
|
|
|
|
/* Last usable page of low mem */
|
|
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
|
|
|
|
/*------------- bootmem allocator setup -----------------------*/
|
|
|
|
/*
|
|
* seed the bootmem allocator after any DT memory node parsing or
|
|
* "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
|
|
*
|
|
* Only low mem is added, otherwise we have crashes when allocating
|
|
* mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
|
|
* avail memory, ending in highmem with a > 32-bit address. However
|
|
* it then tries to memset it with a truncaed 32-bit handle, causing
|
|
* the crash
|
|
*/
|
|
|
|
memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
|
__pa(_end) - CONFIG_LINUX_LINK_BASE);
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
if (phys_initrd_size) {
|
|
memblock_reserve(phys_initrd_start, phys_initrd_size);
|
|
initrd_start = (unsigned long)__va(phys_initrd_start);
|
|
initrd_end = initrd_start + phys_initrd_size;
|
|
}
|
|
#endif
|
|
|
|
early_init_fdt_reserve_self();
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
memblock_dump_all();
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
|
|
* than addresses in normal aka low memory (0x8000_0000 based).
|
|
* Even with PAE, the huge peripheral space hole would waste a lot of
|
|
* mem with single contiguous mem_map[].
|
|
* Thus when HIGHMEM on ARC is enabled the memory map corresponding
|
|
* to the hole is freed and ARC specific version of pfn_valid()
|
|
* handles the hole in the memory map.
|
|
*/
|
|
|
|
min_high_pfn = PFN_DOWN(high_mem_start);
|
|
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
|
|
|
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
|
|
kmap_init();
|
|
#endif /* CONFIG_HIGHMEM */
|
|
}
|
|
|
|
void __init arch_mm_preinit(void)
|
|
{
|
|
#ifdef CONFIG_HIGHMEM
|
|
memblock_phys_free(high_mem_start, high_mem_sz);
|
|
#endif
|
|
|
|
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
|
|
}
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
int pfn_valid(unsigned long pfn)
|
|
{
|
|
return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
|
|
(pfn >= min_low_pfn && pfn <= max_low_pfn);
|
|
}
|
|
EXPORT_SYMBOL(pfn_valid);
|
|
#endif
|