mm: fix minor spelling mistakes in comments

Correct several typos in comments across files in mm/

[akpm@linux-foundation.org: also fix comment grammar, per SeongJae]
Link: https://lkml.kernel.org/r/20251218150906.25042-1-klourencodev@gmail.com
Signed-off-by: Kevin Lourenco <klourencodev@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kevin Lourenco 2025-12-18 16:09:06 +01:00 committed by Andrew Morton
parent 5ec9bb6de4
commit 62451ae347
27 changed files with 45 additions and 45 deletions

View file

@ -171,7 +171,7 @@ static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
/*
* OK, we tried to call the file hook for mmap(), but an error
* arose. The mapping is in an inconsistent state and we most not invoke
* arose. The mapping is in an inconsistent state and we must not invoke
* any further hooks on it.
*/
vma->vm_ops = &vma_dummy_vm_ops;

View file

@ -1867,7 +1867,7 @@ static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior)
* madvise_should_skip() - Return if the request is invalid or nothing.
* @start: Start address of madvise-requested address range.
* @len_in: Length of madvise-requested address range.
* @behavior: Requested madvise behavor.
* @behavior: Requested madvise behavior.
* @err: Pointer to store an error code from the check.
*
* If the specified behaviour is invalid or nothing would occur, we skip the

View file

@ -773,7 +773,7 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
unsigned long start_pfn, end_pfn, mem_size_mb;
int nid, i;
/* calculate lose page */
/* calculate lost page */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
if (!numa_valid_node(nid))
nr_pages += end_pfn - start_pfn;
@ -2414,7 +2414,7 @@ EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
/**
* reserve_mem_release_by_name - Release reserved memory region with a given name
* @name: The name that is attatched to a reserved memory region
* @name: The name that is attached to a reserved memory region
*
* Forcibly release the pages in the reserved memory region so that those memory
* can be used as free memory. After released the reserved region size becomes 0.

View file

@ -4976,7 +4976,7 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
memcg = folio_memcg(old);
/*
* Note that it is normal to see !memcg for a hugetlb folio.
* For e.g, itt could have been allocated when memory_hugetlb_accounting
* For e.g, it could have been allocated when memory_hugetlb_accounting
* was not selected.
*/
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);

View file

@ -864,7 +864,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
*
* MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* The page has been completely isolated, that is, unmapped, taken out of
* the buddy system, or hole-punnched out of the file mapping.
* the buddy system, or hole-punched out of the file mapping.
*/
static const char *action_name[] = {
[MF_IGNORED] = "Ignored",

View file

@ -648,7 +648,7 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype)
if (node_memory_types[node].memtype == memtype || !memtype)
node_memory_types[node].map_count--;
/*
* If we umapped all the attached devices to this node,
* If we unmapped all the attached devices to this node,
* clear the node memory type.
*/
if (!node_memory_types[node].map_count) {

View file

@ -5935,7 +5935,7 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
else
*last_cpupid = folio_last_cpupid(folio);
/* Record the current PID acceesing VMA */
/* Record the current PID accessing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
@ -6254,7 +6254,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
* Use the maywrite version to indicate that vmf->pte may be
* modified, but since we will use pte_same() to detect the
* change of the !pte_none() entry, there is no need to recheck
* the pmdval. Here we chooes to pass a dummy variable instead
* the pmdval. Here we choose to pass a dummy variable instead
* of NULL, which helps new user think about why this place is
* special.
*/

View file

@ -926,7 +926,7 @@ static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn
*
* MOVABLE : KERNEL_EARLY
*
* Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
* Whereby KERNEL_EARLY is memory in one of the kernel zones, available since
* boot. We base our calculation on KERNEL_EARLY internally, because:
*
* a) Hotplugged memory in one of the kernel zones can sometimes still get
@ -1258,7 +1258,7 @@ static pg_data_t *hotadd_init_pgdat(int nid)
* NODE_DATA is preallocated (free_area_init) but its internal
* state is not allocated completely. Add missing pieces.
* Completely offline nodes stay around and they just need
* reintialization.
* reinitialization.
*/
pgdat = NODE_DATA(nid);

View file

@ -1419,10 +1419,10 @@ EXPORT_SYMBOL(migrate_device_range);
/**
* migrate_device_pfns() - migrate device private pfns to normal memory.
* @src_pfns: pre-popluated array of source device private pfns to migrate.
* @src_pfns: pre-populated array of source device private pfns to migrate.
* @npages: number of pages to migrate.
*
* Similar to migrate_device_range() but supports non-contiguous pre-popluated
* Similar to migrate_device_range() but supports non-contiguous pre-populated
* array of device pages to migrate.
*/
int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)

View file

@ -187,7 +187,7 @@ void mm_compute_batch(int overcommit_policy)
/*
* For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
* (total memory/#cpus), and lift it to 25% for other policies
* to easy the possible lock contention for percpu_counter
* to ease the possible lock contention for percpu_counter
* vm_committed_as, while the max limit is INT_MAX
*/
if (overcommit_policy == OVERCOMMIT_NEVER)
@ -2045,7 +2045,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
* Initialize and free pages.
*
* At this point reserved pages and struct pages that correspond to holes in
* memblock.memory are already intialized so every free range has a valid
* memblock.memory are already initialized so every free range has a valid
* memory map around it.
* This ensures that access of pages that are ahead of the range being
* initialized (computing buddy page in __free_one_page()) always reads a valid

View file

@ -678,7 +678,7 @@ static bool can_realign_addr(struct pagetable_move_control *pmc,
/*
* We don't want to have to go hunting for VMAs from the end of the old
* VMA to the next page table boundary, also we want to make sure the
* operation is wortwhile.
* operation is worthwhile.
*
* So ensure that we only perform this realignment if the end of the
* range being copied reaches or crosses the page table boundary.
@ -926,7 +926,7 @@ static bool vrm_overlaps(struct vma_remap_struct *vrm)
/*
* Will a new address definitely be assigned? This either if the user specifies
* it via MREMAP_FIXED, or if MREMAP_DONTUNMAP is used, indicating we will
* always detemrine a target address.
* always determine a target address.
*/
static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
{
@ -1806,7 +1806,7 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
/*
* move_vma() need us to stay 4 maps below the threshold, otherwise
* it will bail out at the very beginning.
* That is a problem if we have already unmaped the regions here
* That is a problem if we have already unmapped the regions here
* (new_addr, and old_addr), because userspace will not know the
* state of the vma's after it gets -ENOMEM.
* So, to avoid such scenario we can pre-compute if the whole

View file

@ -21,7 +21,7 @@
* It disallows unmapped regions from start to end whether they exist at the
* start, in the middle, or at the end of the range, or any combination thereof.
*
* This is because after sealng a range, there's nothing to stop memory mapping
* This is because after sealing a range, there's nothing to stop memory mapping
* of ranges in the remaining gaps later, meaning that the user might then
* wrongly consider the entirety of the mseal()'d range to be sealed when it
* in fact isn't.
@ -124,7 +124,7 @@ static int mseal_apply(struct mm_struct *mm,
* -EINVAL:
* invalid input flags.
* start address is not page aligned.
* Address arange (start + len) overflow.
* Address range (start + len) overflow.
* -ENOMEM:
* addr is not a valid address (not allocated).
* end (start + len) is not a valid address.

View file

@ -467,7 +467,7 @@ int __init numa_memblks_init(int (*init_func)(void),
* We reset memblock back to the top-down direction
* here because if we configured ACPI_NUMA, we have
* parsed SRAT in init_func(). It is ok to have the
* reset here even if we did't configure ACPI_NUMA
* reset here even if we didn't configure ACPI_NUMA
* or acpi numa init fails and fallbacks to dummy
* numa init.
*/

View file

@ -1853,7 +1853,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
/*
* As memory initialization might be integrated into KASAN,
* KASAN unpoisoning and memory initializion code must be
* KASAN unpoisoning and memory initialization code must be
* kept together to avoid discrepancies in behavior.
*/
@ -7653,7 +7653,7 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned
* unsafe in NMI. If spin_trylock() is called from hard IRQ the current
* task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
* mark the task as the owner of another rt_spin_lock which will
* confuse PI logic, so return immediately if called form hard IRQ or
* confuse PI logic, so return immediately if called from hard IRQ or
* NMI.
*
* Note, irqs_disabled() case is ok. This function can be called

View file

@ -450,14 +450,14 @@ void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug)
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/*
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
if (data_race(sis->flags & SWP_FS_OPS))
swap_writepage_fs(folio, swap_plug);
/*
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
* is safe.
*/

View file

@ -301,7 +301,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* pageblock. When not all pageblocks within a page are isolated at the same
* time, free page accounting can go wrong. For example, in the case of
* MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
* pagelbocks.
* pageblocks.
* [ MAX_PAGE_ORDER ]
* [ pageblock0 | pageblock1 ]
* When either pageblock is isolated, if it is a free page, the page is not

View file

@ -123,7 +123,7 @@ page_reporting_drain(struct page_reporting_dev_info *prdev,
continue;
/*
* If page was not comingled with another page we can
* If page was not commingled with another page we can
* consider the result to be "reported" since the page
* hasn't been modified, otherwise we will need to
* report on the new larger page when we make our way

View file

@ -513,7 +513,7 @@ void folio_add_lru(struct folio *folio)
EXPORT_SYMBOL(folio_add_lru);
/**
* folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
* folio_add_lru_vma() - Add a folio to the appropriate LRU list for this VMA.
* @folio: The folio to be added to the LRU.
* @vma: VMA in which the folio is mapped.
*

View file

@ -236,7 +236,7 @@ static inline bool folio_matches_swap_entry(const struct folio *folio,
/*
* All swap cache helpers below require the caller to ensure the swap entries
* used are valid and stablize the device by any of the following ways:
* used are valid and stabilize the device by any of the following ways:
* - Hold a reference by get_swap_device(): this ensures a single entry is
* valid and increases the swap device's refcount.
* - Locking a folio in the swap cache: this ensures the folio's swap entries

View file

@ -82,7 +82,7 @@ void show_swap_cache_info(void)
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* Return: Returns the found folio on success, NULL otherwise. The caller
* must lock nd check if the folio still matches the swap entry before
* must lock and check if the folio still matches the swap entry before
* use (e.g., folio_matches_swap_entry).
*/
struct folio *swap_cache_get_folio(swp_entry_t entry)

View file

@ -2018,7 +2018,7 @@ swp_entry_t get_swap_page_of_type(int type)
if (get_swap_device_info(si)) {
if (si->flags & SWP_WRITEOK) {
/*
* Grab the local lock to be complaint
* Grab the local lock to be compliant
* with swap table allocation.
*/
local_lock(&percpu_swap_cluster.lock);

View file

@ -1274,7 +1274,7 @@ retry:
* Use the maywrite version to indicate that dst_pte will be modified,
* since dst_pte needs to be none, the subsequent pte_same() check
* cannot prevent the dst_pte page from being freed concurrently, so we
* also need to abtain dst_pmdval and recheck pmd_same() later.
* also need to obtain dst_pmdval and recheck pmd_same() later.
*/
dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dst_pmdval,
&dst_ptl);
@ -1330,7 +1330,7 @@ retry:
goto out;
}
/* If PTE changed after we locked the folio them start over */
/* If PTE changed after we locked the folio then start over */
if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
ret = -EAGAIN;
goto out;

View file

@ -2951,10 +2951,10 @@ retry:
return -ENOMEM;
/*
* Adjust for the gap first so it doesn't interfere with the
* later alignment. The first step is the minimum needed to
* fulill the start gap, the next steps is the minimum to align
* that. It is the minimum needed to fulill both.
* Adjust for the gap first so it doesn't interfere with the later
* alignment. The first step is the minimum needed to fulfill the start
* gap, the next step is the minimum to align that. It is the minimum
* needed to fulfill both.
*/
gap = vma_iter_addr(&vmi) + info->start_gap;
gap += (info->align_offset - gap) & info->align_mask;

View file

@ -267,7 +267,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct vm_area_struct *next);
/**
* vma_modify_flags() - Peform any necessary split/merge in preparation for
* vma_modify_flags() - Perform any necessary split/merge in preparation for
* setting VMA flags to *@vm_flags in the range @start to @end contained within
* @vma.
* @vmi: Valid VMA iterator positioned at @vma.
@ -295,7 +295,7 @@ __must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
vm_flags_t *vm_flags_ptr);
/**
* vma_modify_name() - Peform any necessary split/merge in preparation for
* vma_modify_name() - Perform any necessary split/merge in preparation for
* setting anonymous VMA name to @new_name in the range @start to @end contained
* within @vma.
* @vmi: Valid VMA iterator positioned at @vma.
@ -319,7 +319,7 @@ __must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
struct anon_vma_name *new_name);
/**
* vma_modify_policy() - Peform any necessary split/merge in preparation for
* vma_modify_policy() - Perform any necessary split/merge in preparation for
* setting NUMA policy to @new_pol in the range @start to @end contained
* within @vma.
* @vmi: Valid VMA iterator positioned at @vma.
@ -343,7 +343,7 @@ __must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
struct mempolicy *new_pol);
/**
* vma_modify_flags_uffd() - Peform any necessary split/merge in preparation for
* vma_modify_flags_uffd() - Perform any necessary split/merge in preparation for
* setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range
* @start to @end contained within @vma.
* @vmi: Valid VMA iterator positioned at @vma.

View file

@ -1063,7 +1063,7 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
/*
* We can "enter_fs" for swap-cache with only __GFP_IO
* providing this isn't SWP_FS_OPS.
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/

View file

@ -1626,7 +1626,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
}
}
/* Print out the free pages at each order for each migatetype */
/* Print out the free pages at each order for each migratetype */
static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
{
int order;

View file

@ -105,7 +105,7 @@
/*
* On systems with 4K page size, this gives 255 size classes! There is a
* trader-off here:
* trade-off here:
* - Large number of size classes is potentially wasteful as free page are
* spread across these classes
* - Small number of size classes causes large internal fragmentation