bpf-fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmmsahoACgkQ6rmadz2v
 bToD4Q/9Hr2lAELsTRZIENBTYYTfk48Rzrr+rJbZWLfX4nOu9RwhVboTw0gvJr8r
 QEteYO8+gP5hrIuifcC+vdzW12CpgziBK7/Qj2NB7MGSX0ZCLK0ShQK6TAbNE8uZ
 xA4qMcJp0BWJpgfU51z4Ur5nMzG0Th2XlY+SGwHYZ/53qRMmP7Tr8R9wPltKuEll
 J+tS6BE6bFKMQe1CuIYlYfYj9kO3j4yx9S48j1e8x9LXc2/2arfsOIZQ6L11rnxZ
 YTa9jbcRL0YdLy7d4hMLyPQquF9hiHDrJLJih+YNmbyOCPGD3HP0ib2c2g0ip+qk
 WlNFLc2K9w0eS02uuAytaxwArdOpDUHG+skWe+dGt95Bk3Xld0hW/JSI5hrohM1E
 0YKjuZeGGvTkI5FJ5kk+BWoApy+FveiN1w7VsHZgr1tix5NRZRgJP/5swM+0eSp/
 neTzp08fC10gi+GOnUpdF/lI4wEayoGMpo68BTGiM/hP6Qh268wNWgTHaau3Sk/z
 wfJb90VTcDHk+N8mnbi4970RZ6OdqX2n7aBy8D7hBW31kHhP27zofzYX51CFD8Ln
 5NQDU3wX/hUw9tNwoghCNe12//gfDBi6rO/GoEztNFfVVef4QuzsfwKKdxzUMc9a
 jZvyDyGh87rGLauDWylq4s23vgxgIG/p114uv+eFp3AMkOfT/Dk=
 =knWl
 -----END PGP SIGNATURE-----

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix u32/s32 bounds when ranges cross min/max boundary (Eduard
   Zingerman)

 - Fix precision backtracking with linked registers (Eduard Zingerman)

 - Fix linker flags detection for resolve_btfids (Ihor Solodrai)

 - Fix race in update_ftrace_direct_add/del (Jiri Olsa)

 - Fix UAF in bpf_trampoline_link_cgroup_shim (Lang Xu)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  resolve_btfids: Fix linker flags detection
  selftests/bpf: add reproducer for spurious precision propagation through calls
  bpf: collect only live registers in linked regs
  Revert "selftests/bpf: Update reg_bound range refinement logic"
  selftests/bpf: test refining u32/s32 bounds when ranges cross min/max boundary
  bpf: Fix u32/s32 bounds when ranges cross min/max boundary
  bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
  ftrace: Add missing ftrace_lock to update_ftrace_direct_add/del
This commit is contained in:
Linus Torvalds 2026-03-07 12:20:37 -08:00
commit 8b7f4cd3ac
11 changed files with 268 additions and 62 deletions

View file

@ -23,6 +23,7 @@ RM ?= rm
HOSTCC ?= gcc
HOSTLD ?= ld
HOSTAR ?= ar
HOSTPKG_CONFIG ?= pkg-config
CROSS_COMPILE =
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@ -63,10 +64,14 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
$(abspath $@) install_headers
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
ifneq ($(filter -static,$(EXTRA_LDFLAGS)),)
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs --static 2>/dev/null || echo -lelf -lzstd)
else
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
endif
ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz)
ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd)
HOSTCFLAGS_resolve_btfids += -g \
-I$(srctree)/tools/include \
@ -76,7 +81,7 @@ HOSTCFLAGS_resolve_btfids += -g \
$(LIBELF_FLAGS) \
-Wall -Werror
LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS)
LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS)
export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include

View file

@ -409,6 +409,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
HOSTPKG_CONFIG=$(PKG_CONFIG) \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by

View file

@ -422,15 +422,69 @@ static bool is_valid_range(enum num_t t, struct range x)
}
}
static struct range range_improve(enum num_t t, struct range old, struct range new)
static struct range range_intersection(enum num_t t, struct range old, struct range new)
{
return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
}
/*
* Result is precise when 'x' and 'y' overlap or form a continuous range,
* result is an over-approximation if 'x' and 'y' do not overlap.
*/
static struct range range_union(enum num_t t, struct range x, struct range y)
{
if (!is_valid_range(t, x))
return y;
if (!is_valid_range(t, y))
return x;
return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
}
/*
* This function attempts to improve x range intersecting it with y.
* range_cast(... to_t ...) looses precision for ranges that pass to_t
* min/max boundaries. To avoid such precision loses this function
* splits both x and y into halves corresponding to non-overflowing
* sub-ranges: [0, smin] and [smax, -1].
* Final result is computed as follows:
*
* ((x [0, smax]) (y [0, smax]))
* ((x [smin,-1]) (y [smin,-1]))
*
* Precision might still be lost if final union is not a continuous range.
*/
static struct range range_refine_in_halves(enum num_t x_t, struct range x,
enum num_t y_t, struct range y)
{
struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
u64 smax, smin, neg_one;
if (t_is_32(x_t)) {
smax = (u64)(u32)S32_MAX;
smin = (u64)(u32)S32_MIN;
neg_one = (u64)(u32)(s32)(-1);
} else {
smax = (u64)S64_MAX;
smin = (u64)S64_MIN;
neg_one = U64_MAX;
}
x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
return range_union(x_t, r_pos, r_neg);
}
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
{
struct range y_cast;
if (t_is_32(x_t) == t_is_32(y_t))
x = range_refine_in_halves(x_t, x, y_t, y);
y_cast = range_cast(y_t, x_t, y);
/* If we know that
@ -444,7 +498,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
*/
if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
return range_improve(x_t, x, y_cast);
return range_intersection(x_t, x, y_cast);
/* the case when new range knowledge, *y*, is a 32-bit subregister
* range, while previous range knowledge, *x*, is a full register
@ -462,25 +516,11 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
if (!is_valid_range(x_t, x_swap))
return x;
return range_improve(x_t, x, x_swap);
}
if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
if (x_t == S64 && x.a > x.b) {
if (x.b < y.a && x.a <= y.b)
return range(x_t, x.a, y.b);
if (x.a > y.b && x.b >= y.a)
return range(x_t, y.a, x.b);
} else if (x_t == U64 && y.a > y.b) {
if (y.b < x.a && y.a <= x.b)
return range(x_t, y.a, x.b);
if (y.a > x.b && y.b >= x.a)
return range(x_t, x.a, y.b);
}
return range_intersection(x_t, x, x_swap);
}
/* otherwise, plain range cast and intersection works */
return range_improve(x_t, x, y_cast);
return range_intersection(x_t, x, y_cast);
}
/* =======================

View file

@ -18,43 +18,43 @@
return *(u64 *)num; \
}
__msg(": R0=0xffffffff80000000")
__msg("R{{.}}=0xffffffff80000000")
check_assert(s64, ==, eq_int_min, INT_MIN);
__msg(": R0=0x7fffffff")
__msg("R{{.}}=0x7fffffff")
check_assert(s64, ==, eq_int_max, INT_MAX);
__msg(": R0=0")
__msg("R{{.}}=0")
check_assert(s64, ==, eq_zero, 0);
__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
__msg("R{{.}}=0x8000000000000000")
check_assert(s64, ==, eq_llong_min, LLONG_MIN);
__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
__msg("R{{.}}=0x7fffffffffffffff")
check_assert(s64, ==, eq_llong_max, LLONG_MAX);
__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
__msg("R{{.}}=scalar(id=1,smax=0x7ffffffe)")
check_assert(s64, <, lt_pos, INT_MAX);
__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg("R{{.}}=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, <, lt_zero, 0);
__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
__msg("R{{.}}=scalar(id=1,smax=0xffffffff7fffffff")
check_assert(s64, <, lt_neg, INT_MIN);
__msg(": R0=scalar(id=1,smax=0x7fffffff)")
__msg("R{{.}}=scalar(id=1,smax=0x7fffffff)")
check_assert(s64, <=, le_pos, INT_MAX);
__msg(": R0=scalar(id=1,smax=0)")
__msg("R{{.}}=scalar(id=1,smax=0)")
check_assert(s64, <=, le_zero, 0);
__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
__msg("R{{.}}=scalar(id=1,smax=0xffffffff80000000")
check_assert(s64, <=, le_neg, INT_MIN);
__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
__msg("R{{.}}=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >, gt_pos, INT_MAX);
__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
__msg("R{{.}}=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >, gt_zero, 0);
__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000001")
check_assert(s64, >, gt_neg, INT_MIN);
__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
__msg("R{{.}}=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >=, ge_pos, INT_MAX);
__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
__msg("R{{.}}=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >=, ge_zero, 0);
__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000000")
check_assert(s64, >=, ge_neg, INT_MIN);
SEC("?tc")

View file

@ -1148,7 +1148,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
__success __retval(0)
__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void crossing_32_bit_signed_boundary_2(void)
{
asm volatile (" \
@ -2000,4 +2000,41 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx)
: __clobber_all);
}
SEC("socket")
__success
__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void signed_unsigned_intersection32_case1(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
w0 &= 0xffffffff; \
if w0 < 0x3 goto 1f; /* on fall-through u32 range [3..U32_MAX] */ \
if w0 s> 0x1 goto 1f; /* on fall-through s32 range [S32_MIN..1] */ \
if w0 s< 0x0 goto 1f; /* range can be narrowed to [S32_MIN..-1] */ \
r10 = 0; /* thus predicting the jump. */ \
1: exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__success
__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void signed_unsigned_intersection32_case2(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
w0 &= 0xffffffff; \
if w0 > 0x80000003 goto 1f; /* on fall-through u32 range [0..S32_MIN+3] */ \
if w0 s< -3 goto 1f; /* on fall-through s32 range [-3..S32_MAX] */ \
if w0 s> 5 goto 1f; /* on fall-through s32 range [-3..5] */ \
if w0 <= 5 goto 1f; /* range can be narrowed to [0..5] */ \
r10 = 0; /* thus predicting the jump */ \
1: exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View file

@ -363,4 +363,68 @@ void alu32_negative_offset(void)
__sink(path[0]);
}
void dummy_calls(void)
{
bpf_iter_num_new(0, 0, 0);
bpf_iter_num_next(0);
bpf_iter_num_destroy(0);
}
SEC("socket")
__success
__flag(BPF_F_TEST_STATE_FREQ)
int spurious_precision_marks(void *ctx)
{
struct bpf_iter_num iter;
asm volatile(
"r1 = %[iter];"
"r2 = 0;"
"r3 = 10;"
"call %[bpf_iter_num_new];"
"1:"
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto 4f;"
"r7 = *(u32 *)(r0 + 0);"
"r8 = *(u32 *)(r0 + 0);"
/* This jump can't be predicted and does not change r7 or r8 state. */
"if r7 > r8 goto 2f;"
/* Branch explored first ties r2 and r7 as having the same id. */
"r2 = r7;"
"goto 3f;"
"2:"
/* Branch explored second does not tie r2 and r7 but has a function call. */
"call %[bpf_get_prandom_u32];"
"3:"
/*
* A checkpoint.
* When first branch is explored, this would inject linked registers
* r2 and r7 into the jump history.
* When second branch is explored, this would be a cache hit point,
* triggering propagate_precision().
*/
"if r7 <= 42 goto +0;"
/*
* Mark r7 as precise using an if condition that is always true.
* When reached via the second branch, this triggered a bug in the backtrack_insn()
* because r2 (tied to r7) was propagated as precise to a call.
*/
"if r7 <= 0xffffFFFF goto +0;"
"goto 1b;"
"4:"
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter),
__imm(bpf_iter_num_new),
__imm(bpf_iter_num_next),
__imm(bpf_iter_num_destroy),
__imm(bpf_get_prandom_u32)
: __clobber_common, "r7", "r8"
);
return 0;
}
char _license[] SEC("license") = "GPL";

View file

@ -40,6 +40,9 @@ __naked void linked_regs_bpf_k(void)
*/
"r3 = r10;"
"r3 += r0;"
/* Mark r1 and r2 as alive. */
"r1 = r1;"
"r2 = r2;"
"r0 = 0;"
"exit;"
:
@ -73,6 +76,9 @@ __naked void linked_regs_bpf_x_src(void)
*/
"r4 = r10;"
"r4 += r0;"
/* Mark r1 and r2 as alive. */
"r1 = r1;"
"r2 = r2;"
"r0 = 0;"
"exit;"
:
@ -106,6 +112,10 @@ __naked void linked_regs_bpf_x_dst(void)
*/
"r4 = r10;"
"r4 += r3;"
/* Mark r1 and r2 as alive. */
"r0 = r0;"
"r1 = r1;"
"r2 = r2;"
"r0 = 0;"
"exit;"
:
@ -143,6 +153,9 @@ __naked void linked_regs_broken_link(void)
*/
"r3 = r10;"
"r3 += r0;"
/* Mark r1 and r2 as alive. */
"r1 = r1;"
"r2 = r2;"
"r0 = 0;"
"exit;"
:
@ -156,16 +169,16 @@ __naked void linked_regs_broken_link(void)
*/
SEC("socket")
__success __log_level(2)
__msg("12: (0f) r2 += r1")
__msg("17: (0f) r2 += r1")
/* Current state */
__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
__msg("frame2: last_idx 17 first_idx 14 subseq_idx -1 ")
__msg("frame2: regs=r1 stack= before 16: (bf) r2 = r10")
__msg("frame2: parent state regs=r1 stack=")
__msg("frame1: parent state regs= stack=")
__msg("frame0: parent state regs= stack=")
/* Parent state */
__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
__msg("frame2: last_idx 13 first_idx 13 subseq_idx 14 ")
__msg("frame2: regs=r1 stack= before 13: (25) if r1 > 0x7 goto pc+0")
__msg("frame2: parent state regs=r1 stack=")
/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
* looks for all registers with frame2.r1.id in the current state
@ -173,20 +186,20 @@ __msg("frame2: parent state regs=r1 stack=")
__msg("frame1: parent state regs=r6,r7 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
__msg("frame2: last_idx 9 first_idx 9 subseq_idx 13")
__msg("frame2: regs=r1 stack= before 9: (85) call pc+3")
/* frame1.r1 is marked because of backtracking of call instruction */
__msg("frame1: parent state regs=r1,r6,r7 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
__msg("frame1: last_idx 8 first_idx 7 subseq_idx 9")
__msg("frame1: regs=r1,r6,r7 stack= before 8: (bf) r7 = r1")
__msg("frame1: regs=r1,r6 stack= before 7: (bf) r6 = r1")
__msg("frame1: parent state regs=r1 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
__msg("frame1: last_idx 4 first_idx 4 subseq_idx 7")
__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
__msg("frame0: parent state regs=r1,r6 stack=")
/* Parent state */
__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
@ -204,6 +217,7 @@ __naked void precision_many_frames(void)
"r1 = r0;"
"r6 = r0;"
"call precision_many_frames__foo;"
"r6 = r6;" /* mark r6 as live */
"exit;"
:
: __imm(bpf_ktime_get_ns)
@ -220,6 +234,8 @@ void precision_many_frames__foo(void)
"r6 = r1;"
"r7 = r1;"
"call precision_many_frames__bar;"
"r6 = r6;" /* mark r6 as live */
"r7 = r7;" /* mark r7 as live */
"exit"
::: __clobber_all);
}
@ -229,6 +245,8 @@ void precision_many_frames__bar(void)
{
asm volatile (
"if r1 > 7 goto +0;"
"r6 = 0;" /* mark r6 as live */
"r7 = 0;" /* mark r7 as live */
/* force r1 to be precise, this eventually marks:
* - bar frame r1
* - foo frame r{1,6,7}
@ -340,6 +358,8 @@ __naked void precision_two_ids(void)
"r3 += r7;"
/* force r9 to be precise, this also marks r8 */
"r3 += r9;"
"r6 = r6;" /* mark r6 as live */
"r8 = r8;" /* mark r8 as live */
"exit;"
:
: __imm(bpf_ktime_get_ns)
@ -353,7 +373,7 @@ __flag(BPF_F_TEST_STATE_FREQ)
* collect_linked_regs() can't tie more than 6 registers for a single insn.
*/
__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
__msg("9: (bf) r6 = r6 ; R6=scalar(id=2")
__msg("14: (bf) r6 = r6 ; R6=scalar(id=2")
/* check that r{0-5} are marked precise after 'if' */
__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
@ -372,6 +392,12 @@ __naked void linked_regs_too_many_regs(void)
"r6 = r0;"
/* propagate range for r{0-6} */
"if r0 > 7 goto +0;"
/* keep r{1-5} live */
"r1 = r1;"
"r2 = r2;"
"r3 = r3;"
"r4 = r4;"
"r5 = r5;"
/* make r6 appear in the log */
"r6 = r6;"
/* force r0 to be precise,
@ -517,7 +543,7 @@ __naked void check_ids_in_regsafe_2(void)
"*(u64*)(r10 - 8) = r1;"
/* r9 = pointer to stack */
"r9 = r10;"
"r9 += -8;"
"r9 += -16;"
/* r8 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r8 = r0;"
@ -538,6 +564,8 @@ __naked void check_ids_in_regsafe_2(void)
"if r7 > 4 goto l2_%=;"
/* Access memory at r9[r6] */
"r9 += r6;"
"r9 += r7;"
"r9 += r8;"
"r0 = *(u8*)(r9 + 0);"
"l2_%=:"
"r0 = 0;"

View file

@ -44,9 +44,9 @@
mark_precise: frame0: regs=r2 stack= before 23\
mark_precise: frame0: regs=r2 stack= before 22\
mark_precise: frame0: regs=r2 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 19 first_idx 10\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r2 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
mark_precise: frame0: regs=r8,r9 stack= before 17\
mark_precise: frame0: regs=r0,r9 stack= before 15\
@ -107,9 +107,9 @@
mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 20 first_idx 20\
mark_precise: frame0: regs=r2 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 19 first_idx 17\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r2 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
mark_precise: frame0: regs=r8,r9 stack= before 17\
mark_precise: frame0: parent state regs= stack=:",