mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
Alpha systems can suffer sporadic user-space crashes and heap corruption when memory compaction is enabled. Symptoms include SIGSEGV, glibc allocator failures (e.g. "unaligned tcache chunk"), and compiler internal errors. The failures disappear when compaction is disabled or when using global TLB invalidation. The root cause is insufficient TLB shootdown during page migration. Alpha relies on ASN-based MM context rollover for instruction cache coherency, but this alone is not sufficient to prevent stale data or instruction translations from surviving migration. Fix this by introducing a migration-specific helper that combines: - MM context invalidation (ASN rollover), - immediate per-CPU TLB invalidation (TBI), - synchronous cross-CPU shootdown when required. The helper is used only by migration/compaction paths to avoid changing global TLB semantics. Additionally, update flush_tlb_other(), pte_clear(), to use READ_ONCE()/WRITE_ONCE() for correct SMP memory ordering. This fixes observed crashes on both UP and SMP Alpha systems. Reviewed-by: Ivan Kokshaysky <ink@unseen.parts> Tested-by: Matoro Mahri <matoro_mailinglist_kernel@matoro.tk> Tested-by: Michael Cree <mcree@orcon.net.nz> Signed-off-by: Magnus Lindholm <linmag7@gmail.com> Link: https://lore.kernel.org/r/20260102173603.18247-2-linmag7@gmail.com Signed-off-by: Magnus Lindholm <linmag7@gmail.com>
112 lines
2.8 KiB
C
112 lines
2.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Alpha TLB shootdown helpers
|
|
*
|
|
* Copyright (C) 2025 Magnus Lindholm <linmag7@gmail.com>
|
|
*
|
|
* Alpha-specific TLB flush helpers that cannot be expressed purely
|
|
* as inline functions.
|
|
*
|
|
* These helpers provide combined MM context handling (ASN rollover)
|
|
* and immediate TLB invalidation for page migration and memory
|
|
* compaction paths, where lazy shootdowns are insufficient.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/sched.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/pal.h>
|
|
#include <asm/mmu_context.h>
|
|
|
|
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
|
|
|
|
/*
|
|
* Migration/compaction helper: combine mm context (ASN) handling with an
|
|
* immediate per-page TLB invalidate and (for exec) an instruction barrier.
|
|
*
|
|
* This mirrors the SMP combined IPI handler semantics, but runs locally on UP.
|
|
*/
|
|
#ifndef CONFIG_SMP
|
|
void migrate_flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
int tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2;
|
|
|
|
/*
|
|
* First do the mm-context side:
|
|
* If we're currently running this mm, reload a fresh context ASN.
|
|
* Otherwise, mark context invalid.
|
|
*
|
|
* On UP, this is mostly about matching the SMP semantics and ensuring
|
|
* exec/i-cache tagging assumptions hold when compaction migrates pages.
|
|
*/
|
|
if (mm == current->active_mm)
|
|
flush_tlb_current(mm);
|
|
else
|
|
flush_tlb_other(mm);
|
|
|
|
/*
|
|
* Then do the immediate translation kill for this VA.
|
|
* For exec mappings, order instruction fetch after invalidation.
|
|
*/
|
|
tbi(tbi_type, addr);
|
|
}
|
|
|
|
#else
|
|
struct tlb_mm_and_addr {
|
|
struct mm_struct *mm;
|
|
unsigned long addr;
|
|
int tbi_type; /* 2 = DTB, 3 = ITB+DTB */
|
|
};
|
|
|
|
static void ipi_flush_mm_and_page(void *x)
|
|
{
|
|
struct tlb_mm_and_addr *d = x;
|
|
|
|
/* Part 1: mm context side (Alpha uses ASN/context as a key mechanism). */
|
|
if (d->mm == current->active_mm && !asn_locked())
|
|
__load_new_mm_context(d->mm);
|
|
else
|
|
flush_tlb_other(d->mm);
|
|
|
|
/* Part 2: immediate per-VA invalidation on this CPU. */
|
|
tbi(d->tbi_type, d->addr);
|
|
}
|
|
|
|
void migrate_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct tlb_mm_and_addr d = {
|
|
.mm = mm,
|
|
.addr = addr,
|
|
.tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2,
|
|
};
|
|
|
|
/*
|
|
* One synchronous rendezvous: every CPU runs ipi_flush_mm_and_page().
|
|
* This is the "combined" version of flush_tlb_mm + per-page invalidate.
|
|
*/
|
|
preempt_disable();
|
|
on_each_cpu(ipi_flush_mm_and_page, &d, 1);
|
|
|
|
/*
|
|
* mimic flush_tlb_mm()'s mm_users<=1 optimization.
|
|
*/
|
|
if (atomic_read(&mm->mm_users) <= 1) {
|
|
|
|
int cpu, this_cpu;
|
|
this_cpu = smp_processor_id();
|
|
|
|
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
if (!cpu_online(cpu) || cpu == this_cpu)
|
|
continue;
|
|
if (READ_ONCE(mm->context[cpu]))
|
|
WRITE_ONCE(mm->context[cpu], 0);
|
|
}
|
|
}
|
|
preempt_enable();
|
|
}
|
|
|
|
#endif
|