LoongArch changes for v7.0

1, Select HAVE_CMPXCHG_{LOCAL,DOUBLE};
 2, Add 128-bit atomic cmpxchg support;
 3, Add HOTPLUG_SMT implementation;
 4, Wire up memfd_secret system call;
 5, Fix boot errors and unwind errors for KASAN;
 6, Use BPF prog pack allocator and add BPF arena support;
 7, Update dts files to add nand controllers;
 8, Some bug fixes and other small changes.
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmmMTnsWHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImeo0AEACFniyK/cbaBchYAONJb5TxXcW6
 7pvFEAbNrTzvQ8TTGpt+EBsOZlqE+y/afB/NlR06Aow8ifvUnOxJu9Ur1afo2r6A
 syB3Y7OsuUd8nxsATgrfJrNZnqq30dCJWxnBlP+YCCHQ2FFjLHIGcheRNM7rTrzd
 LvGCnBwHSKmKv5wGxsDJufYxbHgeb4YvrwZiNJC0ELRM9VqMSCogkIlayJrfC26S
 Or89+6i2XLC3K+Rrd1MgPp2HX6W9utzhB7kSmro0piUyX6F5UtL1YGHC9t1hamIZ
 yuTStXOZA2bYQPwEmXNNVucX8UfmPOeUQgl0P0n8XG09RGq0uNKFhfkSy9d+lxUl
 2jftUZGujgV3/RsehrsKcto1ZBwwd2FyKL7uLWucuop+XJvrqIus/hsR+M2FI9IY
 6sngOJZkKWfxMECTL7+FAMOGuxnghRk0VBZRJ8PqHTU/9YkKLQf0iyYqmvl+wOgu
 ByJmEapmVdrdGG78zUHsMDAqUFo518ixABhExWuqwEE2/zSj2jQIliIAcHRSJkvT
 ZOW1CZBX54AuFfRvjelYucSz1Q89lHC7U9WjYkte8Rv4tyPOYnTUmg3ouBPm5W2+
 MuPVt1Y4rJN8RnD+1sSHJa4laMo7gZN2Cr4LsELc0mURxOfRK/hU1bjA9JM4mv2X
 2L69IvQDbaG2H61qBQ==
 =WKfm
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:
 - Select HAVE_CMPXCHG_{LOCAL,DOUBLE}
 - Add 128-bit atomic cmpxchg support
 - Add HOTPLUG_SMT implementation
 - Wire up memfd_secret system call
 - Fix boot errors and unwind errors for KASAN
 - Use BPF prog pack allocator and add BPF arena support
 - Update dts files to add nand controllers
 - Some bug fixes and other small changes

* tag 'loongarch-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: dts: loongson-2k1000: Add nand controller support
  LoongArch: dts: loongson-2k0500: Add nand controller support
  LoongArch: BPF: Implement bpf_addr_space_cast instruction
  LoongArch: BPF: Implement PROBE_MEM32 pseudo instructions
  LoongArch: BPF: Use BPF prog pack allocator
  LoongArch: Use IS_ERR_PCPU() macro for KGDB
  LoongArch: Rework KASAN initialization for PTW-enabled systems
  LoongArch: Disable instrumentation for setup_ptwalker()
  LoongArch: Remove some extern variables in source files
  LoongArch: Guard percpu handler under !CONFIG_PREEMPT_RT
  LoongArch: Handle percpu handler address for ORC unwinder
  LoongArch: Use %px to print unmodified unwinding address
  LoongArch: Prefer top-down allocation after arch_mem_init()
  LoongArch: Add HOTPLUG_SMT implementation
  LoongArch: Make cpumask_of_node() robust against NUMA_NO_NODE
  LoongArch: Wire up memfd_secret system call
  LoongArch: Replace seq_printf() with seq_puts() for simple strings
  LoongArch: Add 128-bit atomic cmpxchg support
  LoongArch: Add detection for SC.Q support
  LoongArch: Select HAVE_CMPXCHG_LOCAL in Kconfig
This commit is contained in:
Linus Torvalds 2026-02-14 12:47:15 -08:00
commit 64275e9fda
26 changed files with 450 additions and 153 deletions

View file

@ -4661,7 +4661,7 @@ Kernel parameters
nosmt [KNL,MIPS,PPC,EARLY] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
[KNL,X86,PPC,S390] Disable symmetric multithreading (SMT).
[KNL,LOONGARCH,X86,PPC,S390] Disable symmetric multithreading (SMT).
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.

View file

@ -114,6 +114,7 @@ config LOONGARCH
select GENERIC_TIME_VSYSCALL
select GPIOLIB
select HAS_IOPORT
select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL
@ -130,6 +131,8 @@ config LOONGARCH
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
@ -183,6 +186,7 @@ config LOONGARCH
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select HOTPLUG_SMT if HOTPLUG_CPU
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA

View file

@ -41,6 +41,25 @@
};
};
&apbdma0 {
status = "okay";
};
&nand {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
label = "ls2k0500-nand";
nand-use-soft-ecc-engine;
nand-ecc-algo = "bch";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
};
};
&apbdma3 {
status = "okay";
};

View file

@ -84,7 +84,7 @@
clock-names = "ref_100m";
};
dma-controller@1fe10c00 {
apbdma0: dma-controller@1fe10c00 {
compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
reg = <0 0x1fe10c00 0 0x8>;
interrupt-parent = <&eiointc>;
@ -172,6 +172,16 @@
interrupts = <3>;
};
nand: nand-controller@1ff58000 {
compatible = "loongson,ls2k0500-nand-controller";
reg = <0 0x1ff58000 0 0x24>,
<0 0x1ff58040 0 0x4>;
reg-names = "nand", "nand-dma";
dmas = <&apbdma0 0>;
dma-names = "rxtx";
status = "disabled";
};
pwm@1ff5c000 {
compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
reg = <0x0 0x1ff5c000 0x0 0x10>;

View file

@ -48,6 +48,28 @@
};
};
&apbdma0 {
status = "okay";
};
&nand {
status = "okay";
pinctrl-0 = <&nand_pins_default>;
pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
label = "ls2k1000-nand";
nand-use-soft-ecc-engine;
nand-ecc-algo = "bch";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
};
};
&apbdma1 {
status = "okay";
};

View file

@ -248,7 +248,7 @@
#thermal-sensor-cells = <1>;
};
dma-controller@1fe00c00 {
apbdma0: dma-controller@1fe00c00 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c00 0x0 0x8>;
interrupt-parent = <&liointc1>;
@ -364,6 +364,17 @@
status = "disabled";
};
nand: nand-controller@1fe26000 {
compatible = "loongson,ls2k1000-nand-controller";
reg = <0 0x1fe26000 0 0x24>,
<0 0x1fe26040 0 0x4>,
<0 0x1fe00438 0 0x8>;
reg-names = "nand", "nand-dma", "dma-config";
dmas = <&apbdma0 0>;
dma-names = "rxtx";
status = "disabled";
};
pmc: power-management@1fe27000 {
compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x1fe27000 0x0 0x58>;

View file

@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/build_bug.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
#define __xchg_amo_asm(amswap_db, m, val) \
({ \
@ -236,6 +237,59 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_cmpxchg((ptr), (o), (n)); \
})
union __u128_halves {
u128 full;
struct {
u64 low;
u64 high;
};
};
#define system_has_cmpxchg128() cpu_opt(LOONGARCH_CPU_SCQ)
#define __arch_cmpxchg128(ptr, old, new, llsc_mb) \
({ \
union __u128_halves __old, __new, __ret; \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
\
__old.full = (old); \
__new.full = (new); \
\
__asm__ __volatile__( \
"1: ll.d %0, %3 # 128-bit cmpxchg low \n" \
llsc_mb \
" ld.d %1, %4 # 128-bit cmpxchg high \n" \
" move $t0, %0 \n" \
" move $t1, %1 \n" \
" bne %0, %z5, 2f \n" \
" bne %1, %z6, 2f \n" \
" move $t0, %z7 \n" \
" move $t1, %z8 \n" \
"2: sc.q $t0, $t1, %2 \n" \
" beqz $t0, 1b \n" \
llsc_mb \
: "=&r" (__ret.low), "=&r" (__ret.high) \
: "r" (__ptr), \
"ZC" (__ptr[0]), "m" (__ptr[1]), \
"Jr" (__old.low), "Jr" (__old.high), \
"Jr" (__new.low), "Jr" (__new.high) \
: "t0", "t1", "memory"); \
\
__ret.full; \
})
#define arch_cmpxchg128(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 16); \
__arch_cmpxchg128(ptr, o, n, __WEAK_LLSC_MB); \
})
#define arch_cmpxchg128_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 16); \
__arch_cmpxchg128(ptr, o, n, ""); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))

View file

@ -35,6 +35,7 @@
*/
#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG)
#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM)
#define cpu_has_scq cpu_opt(LOONGARCH_CPU_SCQ)
#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL)
#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU)
#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX)

View file

@ -95,39 +95,41 @@ static inline char *id_to_core_name(unsigned int id)
*/
#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */
#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */
#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */
#define CPU_FEATURE_FPU 3 /* CPU has FPU */
#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */
#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */
#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */
#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */
#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */
#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */
#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */
#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
#define CPU_FEATURE_MSGINT 29 /* CPU has MSG interrupt */
#define CPU_FEATURE_AVECINT 30 /* CPU has AVEC interrupt */
#define CPU_FEATURE_REDIRECTINT 31 /* CPU has interrupt remapping */
#define CPU_FEATURE_SCQ 2 /* CPU has SC.Q instruction */
#define CPU_FEATURE_UAL 3 /* CPU supports unaligned access */
#define CPU_FEATURE_FPU 4 /* CPU has FPU */
#define CPU_FEATURE_LSX 5 /* CPU has LSX (128-bit SIMD) */
#define CPU_FEATURE_LASX 6 /* CPU has LASX (256-bit SIMD) */
#define CPU_FEATURE_CRC32 7 /* CPU has CRC32 instructions */
#define CPU_FEATURE_COMPLEX 8 /* CPU has Complex instructions */
#define CPU_FEATURE_CRYPTO 9 /* CPU has Crypto instructions */
#define CPU_FEATURE_LVZ 10 /* CPU has Virtualization extension */
#define CPU_FEATURE_LBT_X86 11 /* CPU has X86 Binary Translation */
#define CPU_FEATURE_LBT_ARM 12 /* CPU has ARM Binary Translation */
#define CPU_FEATURE_LBT_MIPS 13 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 14 /* CPU has TLB */
#define CPU_FEATURE_CSR 15 /* CPU has CSR */
#define CPU_FEATURE_IOCSR 16 /* CPU has IOCSR */
#define CPU_FEATURE_WATCH 17 /* CPU has watchpoint registers */
#define CPU_FEATURE_VINT 18 /* CPU has vectored interrupts */
#define CPU_FEATURE_CSRIPI 19 /* CPU has CSR-IPI */
#define CPU_FEATURE_EXTIOI 20 /* CPU has EXT-IOI */
#define CPU_FEATURE_PREFETCH 21 /* CPU has prefetch instructions */
#define CPU_FEATURE_PMP 22 /* CPU has perfermance counter */
#define CPU_FEATURE_SCALEFREQ 23 /* CPU supports cpufreq scaling */
#define CPU_FEATURE_FLATMODE 24 /* CPU has flat mode */
#define CPU_FEATURE_EIODECODE 25 /* CPU has EXTIOI interrupt pin decode mode */
#define CPU_FEATURE_GUESTID 26 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 27 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 28 /* CPU has hardware page table walker */
#define CPU_FEATURE_LSPW 29 /* CPU has LSPW (lddir/ldpte instructions) */
#define CPU_FEATURE_MSGINT 30 /* CPU has MSG interrupt */
#define CPU_FEATURE_AVECINT 31 /* CPU has AVEC interrupt */
#define CPU_FEATURE_REDIRECTINT 32 /* CPU has interrupt remapping */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
#define LOONGARCH_CPU_SCQ BIT_ULL(CPU_FEATURE_SCQ)
#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL)
#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU)
#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX)

View file

@ -7,6 +7,7 @@
#define _LOONGARCH_SETUP_H
#include <linux/types.h>
#include <linux/threads.h>
#include <asm/sections.h>
#include <uapi/asm/setup.h>
@ -14,6 +15,8 @@
extern unsigned long eentry;
extern unsigned long tlbrentry;
extern unsigned long pcpu_handlers[NR_CPUS];
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
extern char init_command_line[COMMAND_LINE_SIZE];
extern void tlb_init(int cpu);
extern void cpu_cache_init(void);

View file

@ -12,7 +12,7 @@
extern cpumask_t cpus_on_node[];
#define cpumask_of_node(node) (&cpus_on_node[node])
#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? cpu_all_mask : &cpus_on_node[node])
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);

View file

@ -10,5 +10,6 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_MEMFD_SECRET
#define NR_syscalls (__NR_syscalls)

View file

@ -18,5 +18,6 @@
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
#define HWCAP_LOONGARCH_LSPW (1 << 14)
#define HWCAP_LOONGARCH_SCQ (1 << 15)
#endif /* _UAPI_ASM_HWCAP_H */

View file

@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
# No special ABIs on loongarch so far
syscall_abis_32 +=
syscall_abis_64 +=
syscall_abis_32 += memfd_secret
syscall_abis_64 += memfd_secret

View file

@ -177,6 +177,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_LAM;
elf_hwcap |= HWCAP_LOONGARCH_LAM;
}
if (config & CPUCFG2_SCQ) {
c->options |= LOONGARCH_CPU_SCQ;
elf_hwcap |= HWCAP_LOONGARCH_SCQ;
}
if (config & CPUCFG2_FP) {
c->options |= LOONGARCH_CPU_FPU;
elf_hwcap |= HWCAP_LOONGARCH_FPU;

View file

@ -697,7 +697,7 @@ void kgdb_arch_late(void)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
if (IS_ERR_PCPU(breakinfo[i].pev)) {
pr_err("kgdb: Could not allocate hw breakpoints.\n");
breakinfo[i].pev = NULL;
return;

View file

@ -50,32 +50,49 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
seq_puts(m, "ISA\t\t\t:");
if (isa & LOONGARCH_CPU_ISA_LA32R)
seq_printf(m, " loongarch32r");
seq_puts(m, " loongarch32r");
if (isa & LOONGARCH_CPU_ISA_LA32S)
seq_printf(m, " loongarch32s");
seq_puts(m, " loongarch32s");
if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
seq_puts(m, " loongarch64");
seq_puts(m, "\n");
seq_printf(m, "Features\t\t:");
if (cpu_has_cpucfg) seq_printf(m, " cpucfg");
if (cpu_has_lam) seq_printf(m, " lam");
if (cpu_has_ual) seq_printf(m, " ual");
if (cpu_has_fpu) seq_printf(m, " fpu");
if (cpu_has_lsx) seq_printf(m, " lsx");
if (cpu_has_lasx) seq_printf(m, " lasx");
if (cpu_has_crc32) seq_printf(m, " crc32");
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
seq_puts(m, "Features\t\t:");
if (cpu_has_cpucfg)
seq_puts(m, " cpucfg");
if (cpu_has_lam)
seq_puts(m, " lam");
if (cpu_has_scq)
seq_puts(m, " scq");
if (cpu_has_ual)
seq_puts(m, " ual");
if (cpu_has_fpu)
seq_puts(m, " fpu");
if (cpu_has_lsx)
seq_puts(m, " lsx");
if (cpu_has_lasx)
seq_puts(m, " lasx");
if (cpu_has_crc32)
seq_puts(m, " crc32");
if (cpu_has_complex)
seq_puts(m, " complex");
if (cpu_has_crypto)
seq_puts(m, " crypto");
if (cpu_has_ptw)
seq_puts(m, " ptw");
if (cpu_has_lspw)
seq_puts(m, " lspw");
if (cpu_has_lvz)
seq_puts(m, " lvz");
if (cpu_has_lbt_x86)
seq_puts(m, " lbt_x86");
if (cpu_has_lbt_arm)
seq_puts(m, " lbt_arm");
if (cpu_has_lbt_mips)
seq_puts(m, " lbt_mips");
seq_puts(m, "\n");
seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
@ -83,7 +100,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
seq_printf(m, "\n\n");
seq_puts(m, "\n\n");
return 0;
}

View file

@ -413,6 +413,7 @@ static void __init arch_mem_init(char **cmdline_p)
PFN_UP(__pa_symbol(&__nosave_end)));
memblock_dump_all();
memblock_set_bottom_up(false);
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
}

View file

@ -365,16 +365,29 @@ void __init loongson_smp_setup(void)
void __init loongson_prepare_cpus(unsigned int max_cpus)
{
int i = 0;
int threads_per_core = 0;
parse_acpi_topology();
cpu_data[0].global_id = cpu_logical_map(0);
if (!pptt_enabled)
threads_per_core = 1;
else {
for_each_possible_cpu(i) {
if (cpu_to_node(i) != 0)
continue;
if (cpus_are_siblings(0, i))
threads_per_core++;
}
}
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
set_cpu_present(i, true);
csr_mail_send(0, __cpu_logical_map[i], 0);
}
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
cpu_smt_set_num_threads(threads_per_core, threads_per_core);
}
/*

View file

@ -350,7 +350,21 @@ EXPORT_SYMBOL_GPL(unwind_start);
static inline unsigned long bt_address(unsigned long ra)
{
extern unsigned long eentry;
#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
int cpu;
int vec_sz = sizeof(exception_handlers);
for_each_possible_cpu(cpu) {
if (!pcpu_handlers[cpu])
continue;
if (ra >= pcpu_handlers[cpu] &&
ra < pcpu_handlers[cpu] + vec_sz) {
ra = ra + eentry - pcpu_handlers[cpu];
break;
}
}
#endif
if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
unsigned long func;
@ -494,7 +508,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->pc = bt_address(pc);
if (!state->pc) {
pr_err("cannot find unwind pc at %p\n", (void *)pc);
pr_err("cannot find unwind pc at %px\n", (void *)pc);
goto err;
}

View file

@ -23,10 +23,6 @@ extern const int unwind_hint_lasx;
extern const int unwind_hint_lbt;
extern const int unwind_hint_ri;
extern const int unwind_hint_watch;
extern unsigned long eentry;
#ifdef CONFIG_NUMA
extern unsigned long pcpu_handlers[NR_CPUS];
#endif
static inline bool scan_handlers(unsigned long entry_offset)
{
@ -65,7 +61,7 @@ static inline bool scan_handlers(unsigned long entry_offset)
static inline bool fix_exception(unsigned long pc)
{
#ifdef CONFIG_NUMA
#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
int cpu;
for_each_possible_cpu(cpu) {

View file

@ -40,39 +40,43 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
#define __pte_none(early, pte) (early ? pte_none(pte) : \
((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
static void *mem_to_shadow(const void *addr)
{
unsigned long offset = 0;
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
if (maddr >= FIXADDR_START)
return (void *)(kasan_early_shadow_page);
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKPRANGE_WC_SEG:
offset = XKPRANGE_WC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
void *kasan_mem_to_shadow(const void *addr)
{
if (!kasan_enabled()) {
if (kasan_enabled())
return mem_to_shadow(addr);
else
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
if (maddr >= FIXADDR_START)
return (void *)(kasan_early_shadow_page);
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKPRANGE_WC_SEG:
offset = XKPRANGE_WC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
}
const void *kasan_shadow_to_mem(const void *shadow_addr)
@ -293,11 +297,8 @@ void __init kasan_init(void)
/* Maps everything to a single page of zeroes */
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
/* Enable KASAN here before kasan_mem_to_shadow(). */
kasan_init_generic();
kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
mem_to_shadow((void *)KFENCE_AREA_END));
/* Populate the linear mapping */
for_each_mem_range(i, &pa_start, &pa_end) {
@ -307,13 +308,13 @@ void __init kasan_init(void)
if (start >= end)
break;
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
kasan_map_populate((unsigned long)mem_to_shadow(start),
(unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
}
/* Populate modules mapping */
kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
(unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
/*
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
* should make sure that it maps the zero page read-only.
@ -328,4 +329,5 @@ void __init kasan_init(void)
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
kasan_init_generic();
}

View file

@ -202,7 +202,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep
local_irq_restore(flags);
}
static void setup_ptwalker(void)
static void __no_sanitize_address setup_ptwalker(void)
{
unsigned long pwctl0, pwctl1;
unsigned long pgd_i = 0, pgd_w = 0;
@ -262,7 +262,6 @@ static void output_pgtable_bits_defines(void)
#ifdef CONFIG_NUMA
unsigned long pcpu_handlers[NR_CPUS];
#endif
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
static void setup_tlb_handler(int cpu)
{

View file

@ -17,6 +17,7 @@
#define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
#define REG_TCC LOONGARCH_GPR_A6
#define REG_ARENA LOONGARCH_GPR_S6 /* For storing arena_vm_start */
#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
static const int regmap[] = {
@ -136,6 +137,9 @@ static void build_prologue(struct jit_ctx *ctx)
/* To store tcc and tcc_ptr */
stack_adjust += sizeof(long) * 2;
if (ctx->arena_vm_start)
stack_adjust += 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
@ -178,6 +182,11 @@ static void build_prologue(struct jit_ctx *ctx)
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
if (ctx->arena_vm_start) {
store_offset -= sizeof(long);
emit_insn(ctx, std, REG_ARENA, LOONGARCH_GPR_SP, store_offset);
}
prepare_bpf_tail_call_cnt(ctx, &store_offset);
emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
@ -186,6 +195,9 @@ static void build_prologue(struct jit_ctx *ctx)
emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
ctx->stack_size = stack_adjust;
if (ctx->arena_vm_start)
move_imm(ctx, REG_ARENA, ctx->arena_vm_start, false);
}
static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
@ -217,6 +229,11 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
if (ctx->arena_vm_start) {
load_offset -= sizeof(long);
emit_insn(ctx, ldd, REG_ARENA, LOONGARCH_GPR_SP, load_offset);
}
/*
* When push into the stack, follow the order of tcc then tcc_ptr.
* When pop from the stack, first pop tcc_ptr then followed by tcc.
@ -442,6 +459,7 @@ static bool is_signed_bpf_cond(u8 cond)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define REG_DONT_CLEAR_MARKER 0
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@ -449,7 +467,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
regs->regs[dst_reg] = 0;
if (dst_reg != REG_DONT_CLEAR_MARKER)
regs->regs[dst_reg] = 0;
regs->csr_era = (unsigned long)&ex->fixup - offset;
return true;
@ -461,28 +480,33 @@ static int add_exception_handler(const struct bpf_insn *insn,
int dst_reg)
{
unsigned long pc;
off_t offset;
off_t ins_offset, fixup_offset;
struct exception_table_entry *ex;
if (!ctx->image || !ctx->prog->aux->extable)
if (!ctx->image || !ctx->ro_image || !ctx->prog->aux->extable)
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
BPF_MODE(insn->code) != BPF_PROBE_MEM32)
return 0;
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->num_exentries];
pc = (unsigned long)&ctx->image[ctx->idx - 1];
pc = (unsigned long)&ctx->ro_image[ctx->idx - 1];
offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
/*
* This is the relative offset of the instruction that may fault from
* the exception table itself. This will be written to the exception
* table and if this instruction faults, the destination register will
* be set to '0' and the execution will jump to the next instruction.
*/
ins_offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
@ -490,13 +514,23 @@ static int add_exception_handler(const struct bpf_insn *insn,
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
*
* The fixup_offset is set to the next instruction from the instruction
* that may fault. The execution will jump to this after handling the fault.
*/
offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
fixup_offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
/*
* The offsets above have been calculated using the RO buffer but we
* need to use the R/W buffer for writes. Switch ex to rw buffer for writing.
*/
ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image);
ex->insn = ins_offset;
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ctx->num_exentries++;
@ -514,8 +548,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
const u8 cond = BPF_OP(code);
const u8 t1 = LOONGARCH_GPR_T1;
const u8 t2 = LOONGARCH_GPR_T2;
const u8 src = regmap[insn->src_reg];
const u8 dst = regmap[insn->dst_reg];
const u8 t3 = LOONGARCH_GPR_T3;
u8 src = regmap[insn->src_reg];
u8 dst = regmap[insn->dst_reg];
const s16 off = insn->off;
const s32 imm = insn->imm;
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
@ -524,6 +559,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
if (insn_is_cast_user(insn)) {
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, true);
move_imm(ctx, dst, (ctx->user_vm_start >> 32) << 32, false);
emit_insn(ctx, beq, t1, LOONGARCH_GPR_ZERO, 1);
emit_insn(ctx, or, t1, dst, t1);
move_reg(ctx, dst, t1);
break;
}
switch (off) {
case 0:
move_reg(ctx, dst, src);
@ -1021,8 +1065,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
/* LDX | PROBE_MEM32: dst = *(unsigned size *)(src + REG_ARENA + off) */
case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
sign_extend = BPF_MODE(code) == BPF_MEMSX ||
BPF_MODE(code) == BPF_PROBE_MEMSX;
if (BPF_MODE(code) == BPF_PROBE_MEM32) {
emit_insn(ctx, addd, t2, src, REG_ARENA);
src = t2;
}
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_signed_imm12(off)) {
@ -1082,6 +1137,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
/* ST | PROBE_MEM32: *(size *)(dst + REG_ARENA + off) = imm */
case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
if (BPF_MODE(code) == BPF_PROBE_MEM32) {
emit_insn(ctx, addd, t3, dst, REG_ARENA);
dst = t3;
}
switch (BPF_SIZE(code)) {
case BPF_B:
move_imm(ctx, t1, imm, is32);
@ -1124,6 +1189,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
}
break;
}
ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER);
if (ret)
return ret;
break;
/* *(size *)(dst + off) = src */
@ -1131,6 +1200,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
/* STX | PROBE_MEM32: *(size *)(dst + REG_ARENA + off) = src */
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
if (BPF_MODE(code) == BPF_PROBE_MEM32) {
emit_insn(ctx, addd, t2, dst, REG_ARENA);
dst = t2;
}
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_signed_imm12(off)) {
@ -1169,6 +1248,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
}
break;
}
ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER);
if (ret)
return ret;
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
@ -1829,11 +1912,12 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
bool tmp_blinded = false, extra_pass = false;
u8 *image_ptr;
u8 *image_ptr, *ro_image_ptr;
int image_size, prog_size, extable_size;
struct jit_ctx ctx;
struct jit_data *jit_data;
struct bpf_binary_header *header;
struct bpf_binary_header *ro_header;
struct bpf_prog *tmp, *orig_prog = prog;
/*
@ -1868,8 +1952,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
image_ptr = jit_data->image;
ro_header = jit_data->ro_header;
ro_image_ptr = (void *)ctx.ro_image;
header = jit_data->header;
image_ptr = (void *)header + ((void *)ro_image_ptr - (void *)ro_header);
extra_pass = true;
prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx;
@ -1877,6 +1963,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
if (ctx.offset == NULL) {
@ -1903,17 +1991,25 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog_size = sizeof(u32) * ctx.idx;
image_size = prog_size + extable_size;
/* Now we know the size of the structure to make */
header = bpf_jit_binary_alloc(image_size, &image_ptr,
sizeof(u32), jit_fill_hole);
if (header == NULL) {
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, sizeof(u32),
&header, &image_ptr, jit_fill_hole);
if (!ro_header) {
prog = orig_prog;
goto out_offset;
}
/* 2. Now, the actual pass to generate final JIT code */
/*
* Use the image (RW) for writing the JITed instructions. But also save
* the ro_image (RX) for calculating the offsets in the image. The RW
* image will be later copied to the RX image from where the program will
* run. The bpf_jit_binary_pack_finalize() will do this copy in the final
* step.
*/
ctx.image = (union loongarch_instruction *)image_ptr;
ctx.ro_image = (union loongarch_instruction *)ro_image_ptr;
if (extable_size)
prog->aux->extable = (void *)image_ptr + prog_size;
prog->aux->extable = (void *)ro_image_ptr + prog_size;
skip_init_ctx:
ctx.idx = 0;
@ -1921,48 +2017,47 @@ skip_init_ctx:
build_prologue(&ctx);
if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
goto out_free;
}
build_epilogue(&ctx);
/* 3. Extra pass to validate JITed code */
if (validate_ctx(&ctx)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
goto out_free;
}
/* And we're done */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
/* Update the icache */
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
int err;
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
goto out_free;
}
err = bpf_jit_binary_lock_ro(header);
if (err) {
pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
err);
if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) {
/* ro_header has been freed */
ro_header = NULL;
prog = orig_prog;
goto out_free;
}
/*
* The instructions have now been copied to the ROX region from
* where they will execute. Now the data cache has to be cleaned
* to the PoU and the I-cache has to be invalidated for the VAs.
*/
bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
jit_data->header = header;
jit_data->ro_header = ro_header;
}
prog->jited = 1;
prog->jited_len = prog_size;
prog->bpf_func = (void *)ctx.image;
prog->bpf_func = (void *)ctx.ro_image;
if (!prog->is_func || extra_pass) {
int i;
@ -1982,17 +2077,39 @@ out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
return prog;
out_free:
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
if (header) {
bpf_arch_text_copy(&ro_header->size, &header->size, sizeof(header->size));
bpf_jit_binary_pack_free(ro_header, header);
}
goto out_offset;
}
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
struct jit_data *jit_data = prog->aux->jit_data;
struct bpf_binary_header *hdr;
/*
* If we fail the final pass of JIT (from jit_subprogs), the
* program may not be finalized yet. Call finalize here before
* freeing it.
*/
if (jit_data) {
bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
kfree(jit_data);
}
hdr = bpf_jit_binary_pack_hdr(prog);
bpf_jit_binary_pack_free(hdr, NULL);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
}
bpf_prog_unlock_free(prog);
}
bool bpf_jit_bypass_spec_v1(void)
{
return true;
@ -2003,6 +2120,11 @@ bool bpf_jit_bypass_spec_v4(void)
return true;
}
bool bpf_jit_supports_arena(void)
{
return true;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{

View file

@ -20,11 +20,13 @@ struct jit_ctx {
union loongarch_instruction *image;
union loongarch_instruction *ro_image;
u32 stack_size;
u64 arena_vm_start;
u64 user_vm_start;
};
struct jit_data {
struct bpf_binary_header *header;
u8 *image;
struct bpf_binary_header *ro_header;
struct jit_ctx ctx;
};

View file

@ -72,7 +72,7 @@ TEST_GEN_FILES += madv_populate
TEST_GEN_FILES += map_fixed_noreplace
TEST_GEN_FILES += map_hugetlb
TEST_GEN_FILES += map_populate
ifneq (,$(filter $(ARCH),arm64 riscv riscv64 x86 x86_64))
ifneq (,$(filter $(ARCH),arm64 riscv riscv64 x86 x86_64 loongarch32 loongarch64))
TEST_GEN_FILES += memfd_secret
endif
TEST_GEN_FILES += migration