mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
bpf: Fix u32/s32 bounds when ranges cross min/max boundary
Same as in __reg64_deduce_bounds(), refine s32/u32 ranges in __reg32_deduce_bounds() in the following situations: - s32 range crosses U32_MAX/0 boundary, positive part of the s32 range overlaps with u32 range: 0 U32_MAX | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | |----------------------------|----------------------------| |xxxxx s32 range xxxxxxxxx] [xxxxxxx| 0 S32_MAX S32_MIN -1 - s32 range crosses U32_MAX/0 boundary, negative part of the s32 range overlaps with u32 range: 0 U32_MAX | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | |----------------------------|----------------------------| |xxxxxxxxx] [xxxxxxxxxxxx s32 range | 0 S32_MAX S32_MIN -1 - No refinement if ranges overlap in two intervals. This helps for e.g. consider the following program: call %[bpf_get_prandom_u32]; w0 &= 0xffffffff; if w0 < 0x3 goto 1f; // on fall-through u32 range [3..U32_MAX] if w0 s> 0x1 goto 1f; // on fall-through s32 range [S32_MIN..1] if w0 s< 0x0 goto 1f; // range can be narrowed to [S32_MIN..-1] r10 = 0; 1: ...; The reg_bounds.c selftest is updated to incorporate identical logic, refinement based on non-overflowing range halves: ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪ ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1])) Reported-by: Andrea Righi <arighi@nvidia.com> Reported-by: Emil Tsalapatis <emil@etsalapatis.com> Closes: https://lore.kernel.org/bpf/aakqucg4vcujVwif@gpd4/T/ Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com> Acked-by: Shung-Hsi Yu <shung-hsi.yu@suse.com> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20260306-bpf-32-bit-range-overflow-v3-1-f7f67e060a6b@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
56145d2373
commit
fbc7aef517
2 changed files with 82 additions and 4 deletions
|
|
@ -422,15 +422,69 @@ static bool is_valid_range(enum num_t t, struct range x)
|
|||
}
|
||||
}
|
||||
|
||||
static struct range range_improve(enum num_t t, struct range old, struct range new)
|
||||
static struct range range_intersection(enum num_t t, struct range old, struct range new)
|
||||
{
|
||||
return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
|
||||
}
|
||||
|
||||
/*
|
||||
* Result is precise when 'x' and 'y' overlap or form a continuous range,
|
||||
* result is an over-approximation if 'x' and 'y' do not overlap.
|
||||
*/
|
||||
static struct range range_union(enum num_t t, struct range x, struct range y)
|
||||
{
|
||||
if (!is_valid_range(t, x))
|
||||
return y;
|
||||
if (!is_valid_range(t, y))
|
||||
return x;
|
||||
return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function attempts to improve x range intersecting it with y.
|
||||
* range_cast(... to_t ...) looses precision for ranges that pass to_t
|
||||
* min/max boundaries. To avoid such precision loses this function
|
||||
* splits both x and y into halves corresponding to non-overflowing
|
||||
* sub-ranges: [0, smin] and [smax, -1].
|
||||
* Final result is computed as follows:
|
||||
*
|
||||
* ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
|
||||
* ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
|
||||
*
|
||||
* Precision might still be lost if final union is not a continuous range.
|
||||
*/
|
||||
static struct range range_refine_in_halves(enum num_t x_t, struct range x,
|
||||
enum num_t y_t, struct range y)
|
||||
{
|
||||
struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
|
||||
u64 smax, smin, neg_one;
|
||||
|
||||
if (t_is_32(x_t)) {
|
||||
smax = (u64)(u32)S32_MAX;
|
||||
smin = (u64)(u32)S32_MIN;
|
||||
neg_one = (u64)(u32)(s32)(-1);
|
||||
} else {
|
||||
smax = (u64)S64_MAX;
|
||||
smin = (u64)S64_MIN;
|
||||
neg_one = U64_MAX;
|
||||
}
|
||||
x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
|
||||
x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
|
||||
y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
|
||||
y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
|
||||
r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
|
||||
r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
|
||||
return range_union(x_t, r_pos, r_neg);
|
||||
|
||||
}
|
||||
|
||||
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
|
||||
{
|
||||
struct range y_cast;
|
||||
|
||||
if (t_is_32(x_t) == t_is_32(y_t))
|
||||
x = range_refine_in_halves(x_t, x, y_t, y);
|
||||
|
||||
y_cast = range_cast(y_t, x_t, y);
|
||||
|
||||
/* If we know that
|
||||
|
|
@ -444,7 +498,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
|
|||
*/
|
||||
if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
|
||||
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
|
||||
return range_improve(x_t, x, y_cast);
|
||||
return range_intersection(x_t, x, y_cast);
|
||||
|
||||
/* the case when new range knowledge, *y*, is a 32-bit subregister
|
||||
* range, while previous range knowledge, *x*, is a full register
|
||||
|
|
@ -462,7 +516,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
|
|||
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
|
||||
if (!is_valid_range(x_t, x_swap))
|
||||
return x;
|
||||
return range_improve(x_t, x, x_swap);
|
||||
return range_intersection(x_t, x, x_swap);
|
||||
}
|
||||
|
||||
if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
|
||||
|
|
@ -480,7 +534,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
|
|||
}
|
||||
|
||||
/* otherwise, plain range cast and intersection works */
|
||||
return range_improve(x_t, x, y_cast);
|
||||
return range_intersection(x_t, x, y_cast);
|
||||
}
|
||||
|
||||
/* =======================
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue