bpf-fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmmjmDIACgkQ6rmadz2v
 bTq3gg//QQLOT/FxP2/dDurliDTXvQRr1tUxmIw6s3P6hnz9j/LLEVKpLRVkqd8t
 XEwbubPd1TXDRsJ4f26Ew01YUtf9xi6ZQoMe/BL1okxi0ZwQGGRVMkiKOQgRT+rj
 qYSN5JMfPzA2AuM6FjBF/hhw24yVRdgKRYBam6D7XLfFf3s8TOhHHjJ925PqEo0t
 uJOy4ddDYB9BcGmfoeyiFgUtpPqcYrKIUCLBdwFvT2fnPJvrFFoCF3t7NS9UJu/O
 wd6ZPuGWSOl9A7vSheldP6cJUDX8L/5WEGO4/LjN7plkySF0HNv8uq/b1T3kKqoY
 Y3unXerLGJUAA9D5wpYAekx9YmvRTPQ/o39oTbquEB4SSJVU/SPUpvFw7m2Moq10
 51yuyXLcPlI3xtk0Bd8c/CESSmkRenjWzsuZQhDGhsR0I9mIaALrhf9LaatHtXI5
 f5ct73e+beK7Fc0Ze+b0JxDeFvzA3CKfAF0/fvGt0r9VZjBaMD+a3NnscBlyKztW
 UCXazcfndMhNfUUWanktbT5YhYPmY7hzVQEOl7HAMGn4yG6XbXXmzzY6BqEXIucM
 etueW2msZJHGBHQGe2RK3lxtmiB7/FglJHd86xebkIU2gCzqt8fGUha8AIuJ4rLS
 7wxC33DycCofRGWdseVu7PsTasdhSGsHKbXz2fOFOFESOczYRw8=
 =fj3P
 -----END PGP SIGNATURE-----

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix alignment of arm64 JIT buffer to prevent atomic tearing (Fuad
   Tabba)

 - Fix invariant violation for single value tnums in the verifier
   (Harishankar Vishwanathan, Paul Chaignon)

 - Fix a bunch of issues found by ASAN in selftests/bpf (Ihor Solodrai)

 - Fix race in devmpa and cpumap on PREEMPT_RT (Jiayuan Chen)

 - Fix show_fdinfo of kprobe_multi when cookies are not present (Jiri
   Olsa)

 - Fix race in freeing special fields in BPF maps to prevent memory
   leaks (Kumar Kartikeya Dwivedi)

 - Fix OOB read in dmabuf_collector (T.J. Mercier)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (36 commits)
  selftests/bpf: Avoid simplification of crafted bounds test
  selftests/bpf: Test refinement of single-value tnum
  bpf: Improve bounds when tnum has a single possible value
  bpf: Introduce tnum_step to step through tnum's members
  bpf: Fix race in devmap on PREEMPT_RT
  bpf: Fix race in cpumap on PREEMPT_RT
  selftests/bpf: Add tests for special fields races
  bpf: Retire rcu_trace_implies_rcu_gp() from local storage
  bpf: Delay freeing fields in local storage
  bpf: Lose const-ness of map in map_check_btf()
  bpf: Register dtor for freeing special fields
  selftests/bpf: Fix OOB read in dmabuf_collector
  selftests/bpf: Fix a memory leak in xdp_flowtable test
  bpf: Fix stack-out-of-bounds write in devmap
  bpf: Fix kprobe_multi cookies access in show_fdinfo callback
  bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic tearing
  selftests/bpf: Don't override SIGSEGV handler with ASAN
  selftests/bpf: Check BPFTOOL env var in detect_bpftool_path()
  selftests/bpf: Fix out-of-bounds array access bugs reported by ASAN
  selftests/bpf: Fix array bounds warning in jit_disasm_helpers
  ...
This commit is contained in:
Linus Torvalds 2026-02-28 19:54:28 -08:00
commit eb71ab2bf7
73 changed files with 1181 additions and 237 deletions

View file

@ -2119,7 +2119,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
image_size = extable_offset + extable_size;
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
sizeof(u32), &header, &image_ptr,
sizeof(u64), &header, &image_ptr,
jit_fill_hole);
if (!ro_header) {
prog = orig_prog;

View file

@ -124,7 +124,7 @@ struct bpf_map_ops {
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
int (*map_check_btf)(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
@ -656,7 +656,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
map->ops->map_seq_show_elem;
}
int map_check_no_btf(const struct bpf_map *map,
int map_check_no_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);

View file

@ -176,7 +176,7 @@ u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
void bpf_local_storage_map_free(struct bpf_map *map,
struct bpf_local_storage_cache *cache);
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
int bpf_local_storage_map_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);

View file

@ -14,6 +14,8 @@ struct bpf_mem_alloc {
struct obj_cgroup *objcg;
bool percpu;
struct work_struct work;
void (*dtor_ctx_free)(void *ctx);
void *dtor_ctx;
};
/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
@ -32,6 +34,10 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
/* The percpu allocation with a specific unit size. */
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma,
void (*dtor)(void *obj, void *ctx),
void (*dtor_ctx_free)(void *ctx),
void *ctx);
/* Check the allocation size for kmalloc equivalent allocator */
int bpf_mem_alloc_check_size(bool percpu, size_t size);

View file

@ -131,4 +131,7 @@ static inline bool tnum_subreg_is_const(struct tnum a)
return !(tnum_subreg(a)).mask;
}
/* Returns the smallest member of t larger than z */
u64 tnum_step(struct tnum t, u64 z);
#endif /* _LINUX_TNUM_H */

View file

@ -303,7 +303,7 @@ static long arena_map_update_elem(struct bpf_map *map, void *key,
return -EOPNOTSUPP;
}
static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf,
static int arena_map_check_btf(struct bpf_map *map, const struct btf *btf,
const struct btf_type *key_type, const struct btf_type *value_type)
{
return 0;

View file

@ -548,7 +548,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
static int array_map_check_btf(const struct bpf_map *map,
static int array_map_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -180,7 +180,7 @@ static long bloom_map_update_elem(struct bpf_map *map, void *key,
return -EINVAL;
}
static int bloom_map_check_btf(const struct bpf_map *map,
static int bloom_map_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -98,7 +98,7 @@ static long insn_array_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
static int insn_array_check_btf(const struct bpf_map *map,
static int insn_array_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -107,14 +107,12 @@ static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
{
struct bpf_local_storage *local_storage;
/* If RCU Tasks Trace grace period implies RCU grace period, do
* kfree(), else do kfree_rcu().
/*
* RCU Tasks Trace grace period implies RCU grace period, do
* kfree() directly.
*/
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
if (rcu_trace_implies_rcu_gp())
kfree(local_storage);
else
kfree_rcu(local_storage, rcu);
kfree(local_storage);
}
/* Handle use_kmalloc_nolock == false */
@ -138,10 +136,11 @@ static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
bpf_local_storage_free_rcu(rcu);
else
call_rcu(rcu, bpf_local_storage_free_rcu);
/*
* RCU Tasks Trace grace period implies RCU grace period, do
* kfree() directly.
*/
bpf_local_storage_free_rcu(rcu);
}
static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
@ -164,16 +163,29 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
bpf_local_storage_free_trace_rcu);
}
/* rcu callback for use_kmalloc_nolock == false */
static void __bpf_selem_free_rcu(struct rcu_head *rcu)
{
struct bpf_local_storage_elem *selem;
struct bpf_local_storage_map *smap;
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
/* bpf_selem_unlink_nofail may have already cleared smap and freed fields. */
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
if (smap)
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
kfree(selem);
}
/* rcu tasks trace callback for use_kmalloc_nolock == false */
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
{
struct bpf_local_storage_elem *selem;
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
if (rcu_trace_implies_rcu_gp())
kfree(selem);
else
kfree_rcu(selem, rcu);
/*
* RCU Tasks Trace grace period implies RCU grace period, do
* kfree() directly.
*/
__bpf_selem_free_rcu(rcu);
}
/* Handle use_kmalloc_nolock == false */
@ -181,7 +193,7 @@ static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
bool vanilla_rcu)
{
if (vanilla_rcu)
kfree_rcu(selem, rcu);
call_rcu(&selem->rcu, __bpf_selem_free_rcu);
else
call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
}
@ -195,37 +207,29 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
/* The bpf_local_storage_map_free will wait for rcu_barrier */
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
if (smap) {
migrate_disable();
if (smap)
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
migrate_enable();
}
kfree_nolock(selem);
}
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
bpf_selem_free_rcu(rcu);
else
call_rcu(rcu, bpf_selem_free_rcu);
/*
* RCU Tasks Trace grace period implies RCU grace period, do
* kfree() directly.
*/
bpf_selem_free_rcu(rcu);
}
void bpf_selem_free(struct bpf_local_storage_elem *selem,
bool reuse_now)
{
struct bpf_local_storage_map *smap;
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
if (!selem->use_kmalloc_nolock) {
/*
* No uptr will be unpin even when reuse_now == false since uptr
* is only supported in task local storage, where
* smap->use_kmalloc_nolock == true.
*/
if (smap)
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
__bpf_selem_free(selem, reuse_now);
return;
}
@ -797,7 +801,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
return 0;
}
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
int bpf_local_storage_map_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
@ -958,10 +962,9 @@ restart:
*/
synchronize_rcu();
if (smap->use_kmalloc_nolock) {
rcu_barrier_tasks_trace();
rcu_barrier();
}
/* smap remains in use regardless of kmalloc_nolock, so wait unconditionally. */
rcu_barrier_tasks_trace();
rcu_barrier();
kvfree(smap->buckets);
bpf_map_area_free(smap);
}

View file

@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/local_lock.h>
#include <linux/completion.h>
#include <trace/events/xdp.h>
#include <linux/btf_ids.h>
@ -52,6 +53,7 @@ struct xdp_bulk_queue {
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
local_lock_t bq_lock;
};
/* Struct for every remote "destination" CPU in map */
@ -451,6 +453,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
for_each_possible_cpu(i) {
bq = per_cpu_ptr(rcpu->bulkq, i);
bq->obj = rcpu;
local_lock_init(&bq->bq_lock);
}
/* Alloc queue */
@ -722,6 +725,8 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
struct ptr_ring *q;
int i;
lockdep_assert_held(&bq->bq_lock);
if (unlikely(!bq->count))
return;
@ -749,11 +754,15 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
}
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
* Thus, safe percpu variable access.
* Thus, safe percpu variable access. PREEMPT_RT relies on
* local_lock_nested_bh() to serialise access to the per-CPU bq.
*/
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
struct xdp_bulk_queue *bq;
local_lock_nested_bh(&rcpu->bulkq->bq_lock);
bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(bq);
@ -774,6 +783,8 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
list_add(&bq->flush_node, flush_list);
}
local_unlock_nested_bh(&rcpu->bulkq->bq_lock);
}
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
@ -810,7 +821,9 @@ void __cpu_map_flush(struct list_head *flush_list)
struct xdp_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
local_lock_nested_bh(&bq->obj->bulkq->bq_lock);
bq_flush_to_queue(bq);
local_unlock_nested_bh(&bq->obj->bulkq->bq_lock);
/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(bq->obj->kthread);

View file

@ -45,6 +45,7 @@
* types of devmap; only the lookup and insertion is different.
*/
#include <linux/bpf.h>
#include <linux/local_lock.h>
#include <net/xdp.h>
#include <linux/filter.h>
#include <trace/events/xdp.h>
@ -60,6 +61,7 @@ struct xdp_dev_bulk_queue {
struct net_device *dev_rx;
struct bpf_prog *xdp_prog;
unsigned int count;
local_lock_t bq_lock;
};
struct bpf_dtab_netdev {
@ -381,6 +383,8 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
int to_send = cnt;
int i;
lockdep_assert_held(&bq->bq_lock);
if (unlikely(!cnt))
return;
@ -425,10 +429,12 @@ void __dev_flush(struct list_head *flush_list)
struct xdp_dev_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
bq_xmit_all(bq, XDP_XMIT_FLUSH);
bq->dev_rx = NULL;
bq->xdp_prog = NULL;
__list_del_clearprev(&bq->flush_node);
local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
}
}
@ -451,12 +457,16 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
* variable access, and map elements stick around. See comment above
* xdp_do_flush() in filter.c.
* xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh()
* to serialise access to the per-CPU bq.
*/
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
struct xdp_dev_bulk_queue *bq;
local_lock_nested_bh(&dev->xdp_bulkq->bq_lock);
bq = this_cpu_ptr(dev->xdp_bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(bq, 0);
@ -477,6 +487,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
}
bq->q[bq->count++] = xdpf;
local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock);
}
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
@ -588,18 +600,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
}
/* Get ifindex of each upper device. 'indexes' must be able to hold at
* least MAX_NEST_DEV elements.
* Returns the number of ifindexes added.
* least 'max' elements.
* Returns the number of ifindexes added, or -EOVERFLOW if there are too
* many upper devices.
*/
static int get_upper_ifindexes(struct net_device *dev, int *indexes)
static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
{
struct net_device *upper;
struct list_head *iter;
int n = 0;
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
if (n >= max)
return -EOVERFLOW;
indexes[n++] = upper->ifindex;
}
return n;
}
@ -615,7 +631,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
int err;
if (exclude_ingress) {
num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
ARRAY_SIZE(excluded_devices) - 1);
if (num_excluded < 0)
return num_excluded;
excluded_devices[num_excluded++] = dev_rx->ifindex;
}
@ -733,7 +753,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
int err;
if (exclude_ingress) {
num_excluded = get_upper_ifindexes(dev, excluded_devices);
num_excluded = get_upper_ifindexes(dev, excluded_devices,
ARRAY_SIZE(excluded_devices) - 1);
if (num_excluded < 0)
return num_excluded;
excluded_devices[num_excluded++] = dev->ifindex;
}
@ -1115,8 +1139,13 @@ static int dev_map_notification(struct notifier_block *notifier,
if (!netdev->xdp_bulkq)
return NOTIFY_BAD;
for_each_possible_cpu(cpu)
per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
for_each_possible_cpu(cpu) {
struct xdp_dev_bulk_queue *bq;
bq = per_cpu_ptr(netdev->xdp_bulkq, cpu);
bq->dev = netdev;
local_lock_init(&bq->bq_lock);
}
break;
case NETDEV_UNREGISTER:
/* This rcu_read_lock/unlock pair is needed because

View file

@ -125,6 +125,11 @@ struct htab_elem {
char key[] __aligned(8);
};
struct htab_btf_record {
struct btf_record *record;
u32 key_size;
};
static inline bool htab_is_prealloc(const struct bpf_htab *htab)
{
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
@ -457,6 +462,83 @@ static int htab_map_alloc_check(union bpf_attr *attr)
return 0;
}
static void htab_mem_dtor(void *obj, void *ctx)
{
struct htab_btf_record *hrec = ctx;
struct htab_elem *elem = obj;
void *map_value;
if (IS_ERR_OR_NULL(hrec->record))
return;
map_value = htab_elem_value(elem, hrec->key_size);
bpf_obj_free_fields(hrec->record, map_value);
}
static void htab_pcpu_mem_dtor(void *obj, void *ctx)
{
void __percpu *pptr = *(void __percpu **)obj;
struct htab_btf_record *hrec = ctx;
int cpu;
if (IS_ERR_OR_NULL(hrec->record))
return;
for_each_possible_cpu(cpu)
bpf_obj_free_fields(hrec->record, per_cpu_ptr(pptr, cpu));
}
static void htab_dtor_ctx_free(void *ctx)
{
struct htab_btf_record *hrec = ctx;
btf_record_free(hrec->record);
kfree(ctx);
}
static int htab_set_dtor(struct bpf_htab *htab, void (*dtor)(void *, void *))
{
u32 key_size = htab->map.key_size;
struct bpf_mem_alloc *ma;
struct htab_btf_record *hrec;
int err;
/* No need for dtors. */
if (IS_ERR_OR_NULL(htab->map.record))
return 0;
hrec = kzalloc(sizeof(*hrec), GFP_KERNEL);
if (!hrec)
return -ENOMEM;
hrec->key_size = key_size;
hrec->record = btf_record_dup(htab->map.record);
if (IS_ERR(hrec->record)) {
err = PTR_ERR(hrec->record);
kfree(hrec);
return err;
}
ma = htab_is_percpu(htab) ? &htab->pcpu_ma : &htab->ma;
bpf_mem_alloc_set_dtor(ma, dtor, htab_dtor_ctx_free, hrec);
return 0;
}
static int htab_map_check_btf(struct bpf_map *map, const struct btf *btf,
const struct btf_type *key_type, const struct btf_type *value_type)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
if (htab_is_prealloc(htab))
return 0;
/*
* We must set the dtor using this callback, as map's BTF record is not
* populated in htab_map_alloc(), so it will always appear as NULL.
*/
if (htab_is_percpu(htab))
return htab_set_dtor(htab, htab_pcpu_mem_dtor);
else
return htab_set_dtor(htab, htab_mem_dtor);
}
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@ -2281,6 +2363,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_seq_show_elem = htab_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_check_btf = htab_map_check_btf,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0],
@ -2303,6 +2386,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_seq_show_elem = htab_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_check_btf = htab_map_check_btf,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_lru),
.map_btf_id = &htab_map_btf_ids[0],
@ -2482,6 +2566,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_check_btf = htab_map_check_btf,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_percpu),
.map_btf_id = &htab_map_btf_ids[0],
@ -2502,6 +2587,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
.map_check_btf = htab_map_check_btf,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_lru_percpu),
.map_btf_id = &htab_map_btf_ids[0],

View file

@ -364,7 +364,7 @@ static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
static int cgroup_storage_check_btf(const struct bpf_map *map,
static int cgroup_storage_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -751,7 +751,7 @@ free_stack:
return err;
}
static int trie_check_btf(const struct bpf_map *map,
static int trie_check_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -102,6 +102,8 @@ struct bpf_mem_cache {
int percpu_size;
bool draining;
struct bpf_mem_cache *tgt;
void (*dtor)(void *obj, void *ctx);
void *dtor_ctx;
/* list of objects to be freed after RCU GP */
struct llist_head free_by_rcu;
@ -260,12 +262,14 @@ static void free_one(void *obj, bool percpu)
kfree(obj);
}
static int free_all(struct llist_node *llnode, bool percpu)
static int free_all(struct bpf_mem_cache *c, struct llist_node *llnode, bool percpu)
{
struct llist_node *pos, *t;
int cnt = 0;
llist_for_each_safe(pos, t, llnode) {
if (c->dtor)
c->dtor((void *)pos + LLIST_NODE_SZ, c->dtor_ctx);
free_one(pos, percpu);
cnt++;
}
@ -276,7 +280,7 @@ static void __free_rcu(struct rcu_head *head)
{
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
atomic_set(&c->call_rcu_ttrace_in_progress, 0);
}
@ -308,7 +312,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
if (unlikely(READ_ONCE(c->draining))) {
llnode = llist_del_all(&c->free_by_rcu_ttrace);
free_all(llnode, !!c->percpu_size);
free_all(c, llnode, !!c->percpu_size);
}
return;
}
@ -417,7 +421,7 @@ static void check_free_by_rcu(struct bpf_mem_cache *c)
dec_active(c, &flags);
if (unlikely(READ_ONCE(c->draining))) {
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
free_all(c, llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
atomic_set(&c->call_rcu_in_progress, 0);
} else {
call_rcu_hurry(&c->rcu, __free_by_rcu);
@ -635,13 +639,13 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
* Except for waiting_for_gp_ttrace list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all().
*/
free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(__llist_del_all(&c->free_llist), percpu);
free_all(__llist_del_all(&c->free_llist_extra), percpu);
free_all(__llist_del_all(&c->free_by_rcu), percpu);
free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
free_all(llist_del_all(&c->waiting_for_gp), percpu);
free_all(c, llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(c, __llist_del_all(&c->free_llist), percpu);
free_all(c, __llist_del_all(&c->free_llist_extra), percpu);
free_all(c, __llist_del_all(&c->free_by_rcu), percpu);
free_all(c, __llist_del_all(&c->free_llist_extra_rcu), percpu);
free_all(c, llist_del_all(&c->waiting_for_gp), percpu);
}
static void check_mem_cache(struct bpf_mem_cache *c)
@ -680,6 +684,9 @@ static void check_leaked_objs(struct bpf_mem_alloc *ma)
static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
{
/* We can free dtor ctx only once all callbacks are done using it. */
if (ma->dtor_ctx_free)
ma->dtor_ctx_free(ma->dtor_ctx);
check_leaked_objs(ma);
free_percpu(ma->cache);
free_percpu(ma->caches);
@ -1014,3 +1021,32 @@ int bpf_mem_alloc_check_size(bool percpu, size_t size)
return 0;
}
void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma, void (*dtor)(void *obj, void *ctx),
void (*dtor_ctx_free)(void *ctx), void *ctx)
{
struct bpf_mem_caches *cc;
struct bpf_mem_cache *c;
int cpu, i;
ma->dtor_ctx_free = dtor_ctx_free;
ma->dtor_ctx = ctx;
if (ma->cache) {
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(ma->cache, cpu);
c->dtor = dtor;
c->dtor_ctx = ctx;
}
}
if (ma->caches) {
for_each_possible_cpu(cpu) {
cc = per_cpu_ptr(ma->caches, cpu);
for (i = 0; i < NUM_CACHES; i++) {
c = &cc->cache[i];
c->dtor = dtor;
c->dtor_ctx = ctx;
}
}
}
}

View file

@ -1234,7 +1234,7 @@ int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
}
EXPORT_SYMBOL_GPL(bpf_obj_name_cpy);
int map_check_no_btf(const struct bpf_map *map,
int map_check_no_btf(struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)

View file

@ -269,3 +269,59 @@ struct tnum tnum_bswap64(struct tnum a)
{
return TNUM(swab64(a.value), swab64(a.mask));
}
/* Given tnum t, and a number z such that tmin <= z < tmax, where tmin
* is the smallest member of the t (= t.value) and tmax is the largest
* member of t (= t.value | t.mask), returns the smallest member of t
* larger than z.
*
* For example,
* t = x11100x0
* z = 11110001 (241)
* result = 11110010 (242)
*
* Note: if this function is called with z >= tmax, it just returns
* early with tmax; if this function is called with z < tmin, the
* algorithm already returns tmin.
*/
u64 tnum_step(struct tnum t, u64 z)
{
u64 tmax, j, p, q, r, s, v, u, w, res;
u8 k;
tmax = t.value | t.mask;
/* if z >= largest member of t, return largest member of t */
if (z >= tmax)
return tmax;
/* if z < smallest member of t, return smallest member of t */
if (z < t.value)
return t.value;
/* keep t's known bits, and match all unknown bits to z */
j = t.value | (z & t.mask);
if (j > z) {
p = ~z & t.value & ~t.mask;
k = fls64(p); /* k is the most-significant 0-to-1 flip */
q = U64_MAX << k;
r = q & z; /* positions > k matched to z */
s = ~q & t.value; /* positions <= k matched to t.value */
v = r | s;
res = v;
} else {
p = z & ~t.value & ~t.mask;
k = fls64(p); /* k is the most-significant 1-to-0 flip */
q = U64_MAX << k;
r = q & t.mask & z; /* unknown positions > k, matched to z */
s = q & ~t.mask; /* known positions > k, set to 1 */
v = r | s;
/* add 1 to unknown positions > k to make value greater than z */
u = v + (1ULL << k);
/* extract bits in unknown positions > k from u, rest from t.value */
w = (u & t.mask) | t.value;
res = w;
}
return res;
}

View file

@ -2379,6 +2379,9 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg)
static void __update_reg64_bounds(struct bpf_reg_state *reg)
{
u64 tnum_next, tmax;
bool umin_in_tnum;
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
@ -2388,6 +2391,33 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
/* Check if u64 and tnum overlap in a single value */
tnum_next = tnum_step(reg->var_off, reg->umin_value);
umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value;
tmax = reg->var_off.value | reg->var_off.mask;
if (umin_in_tnum && tnum_next > reg->umax_value) {
/* The u64 range and the tnum only overlap in umin.
* u64: ---[xxxxxx]-----
* tnum: --xx----------x-
*/
___mark_reg_known(reg, reg->umin_value);
} else if (!umin_in_tnum && tnum_next == tmax) {
/* The u64 range and the tnum only overlap in the maximum value
* represented by the tnum, called tmax.
* u64: ---[xxxxxx]-----
* tnum: xx-----x--------
*/
___mark_reg_known(reg, tmax);
} else if (!umin_in_tnum && tnum_next <= reg->umax_value &&
tnum_step(reg->var_off, tnum_next) > reg->umax_value) {
/* The u64 range and the tnum only overlap in between umin
* (excluded) and umax.
* u64: ---[xxxxxx]-----
* tnum: xx----x-------x-
*/
___mark_reg_known(reg, tnum_next);
}
}
static void __update_reg_bounds(struct bpf_reg_state *reg)

View file

@ -2454,8 +2454,10 @@ static void bpf_kprobe_multi_show_fdinfo(const struct bpf_link *link,
struct seq_file *seq)
{
struct bpf_kprobe_multi_link *kmulti_link;
bool has_cookies;
kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
has_cookies = !!kmulti_link->cookies;
seq_printf(seq,
"kprobe_cnt:\t%u\n"
@ -2467,7 +2469,7 @@ static void bpf_kprobe_multi_show_fdinfo(const struct bpf_link *link,
for (int i = 0; i < kmulti_link->cnt; i++) {
seq_printf(seq,
"%llu\t %pS\n",
kmulti_link->cookies[i],
has_cookies ? kmulti_link->cookies[i] : 0,
(void *)kmulti_link->addrs[i]);
}
}

View file

@ -65,6 +65,9 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz)
ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd)
HOSTCFLAGS_resolve_btfids += -g \
-I$(srctree)/tools/include \
-I$(srctree)/tools/include/uapi \
@ -73,7 +76,7 @@ HOSTCFLAGS_resolve_btfids += -g \
$(LIBELF_FLAGS) \
-Wall -Werror
LIBS = $(LIBELF_LIBS) -lz
LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS)
export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include
@ -83,7 +86,7 @@ $(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
$(call msg,LINK,$@)
$(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
$(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) $(EXTRA_LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
clean_objects := $(wildcard $(OUTPUT)/*.o \
$(OUTPUT)/.*.o.cmd \

View file

@ -226,7 +226,7 @@ static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
}
static struct btf_id *__btf_id__add(struct rb_root *root,
char *name,
const char *name,
enum btf_id_kind kind,
bool unique)
{
@ -250,7 +250,11 @@ static struct btf_id *__btf_id__add(struct rb_root *root,
id = zalloc(sizeof(*id));
if (id) {
pr_debug("adding symbol %s\n", name);
id->name = name;
id->name = strdup(name);
if (!id->name) {
free(id);
return NULL;
}
id->kind = kind;
rb_link_node(&id->rb_node, parent, p);
rb_insert_color(&id->rb_node, root);
@ -258,17 +262,21 @@ static struct btf_id *__btf_id__add(struct rb_root *root,
return id;
}
static inline struct btf_id *btf_id__add(struct rb_root *root, char *name, enum btf_id_kind kind)
static inline struct btf_id *btf_id__add(struct rb_root *root,
const char *name,
enum btf_id_kind kind)
{
return __btf_id__add(root, name, kind, false);
}
static inline struct btf_id *btf_id__add_unique(struct rb_root *root, char *name, enum btf_id_kind kind)
static inline struct btf_id *btf_id__add_unique(struct rb_root *root,
const char *name,
enum btf_id_kind kind)
{
return __btf_id__add(root, name, kind, true);
}
static char *get_id(const char *prefix_end)
static int get_id(const char *prefix_end, char *buf, size_t buf_sz)
{
/*
* __BTF_ID__func__vfs_truncate__0
@ -277,28 +285,28 @@ static char *get_id(const char *prefix_end)
*/
int len = strlen(prefix_end);
int pos = sizeof("__") - 1;
char *p, *id;
char *p;
if (pos >= len)
return NULL;
return -1;
id = strdup(prefix_end + pos);
if (id) {
/*
* __BTF_ID__func__vfs_truncate__0
* id = ^
*
* cut the unique id part
*/
p = strrchr(id, '_');
p--;
if (*p != '_') {
free(id);
return NULL;
}
*p = '\0';
}
return id;
if (len - pos >= buf_sz)
return -1;
strcpy(buf, prefix_end + pos);
/*
* __BTF_ID__func__vfs_truncate__0
* buf = ^
*
* cut the unique id part
*/
p = strrchr(buf, '_');
p--;
if (*p != '_')
return -1;
*p = '\0';
return 0;
}
static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind kind)
@ -335,10 +343,9 @@ static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind k
static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
{
char *id;
char id[KSYM_NAME_LEN];
id = get_id(name + size);
if (!id) {
if (get_id(name + size, id, sizeof(id))) {
pr_err("FAILED to parse symbol name: %s\n", name);
return NULL;
}
@ -346,6 +353,21 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
return btf_id__add(root, id, BTF_ID_KIND_SYM);
}
static void btf_id__free_all(struct rb_root *root)
{
struct rb_node *next;
struct btf_id *id;
next = rb_first(root);
while (next) {
id = rb_entry(next, struct btf_id, rb_node);
next = rb_next(&id->rb_node);
rb_erase(&id->rb_node, root);
free(id->name);
free(id);
}
}
static void bswap_32_data(void *data, u32 nr_bytes)
{
u32 cnt, i;
@ -1547,6 +1569,11 @@ dump_btf:
out:
btf__free(obj.base_btf);
btf__free(obj.btf);
btf_id__free_all(&obj.structs);
btf_id__free_all(&obj.unions);
btf_id__free_all(&obj.typedefs);
btf_id__free_all(&obj.funcs);
btf_id__free_all(&obj.sets);
if (obj.efile.elf) {
elf_end(obj.efile.elf);
close(obj.efile.fd);

View file

@ -22,7 +22,11 @@
#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/* Concatenate two parameters, but allow them to be expanded beforehand. */
#ifndef __CONCAT
#define __CONCAT(a, b) a ## b
#endif
#ifndef CONCATENATE
#define CONCATENATE(a, b) __CONCAT(a, b)
#endif
#endif /* _LINUX_ARGS_H */

View file

@ -0,0 +1,3 @@
*arena*
task_local_data
uprobe_multi_test

View file

@ -27,7 +27,11 @@ ifneq ($(wildcard $(GENHDR)),)
endif
BPF_GCC ?= $(shell command -v bpf-gcc;)
ifdef ASAN
SAN_CFLAGS ?= -fsanitize=address -fno-omit-frame-pointer
else
SAN_CFLAGS ?=
endif
SAN_LDFLAGS ?= $(SAN_CFLAGS)
RELEASE ?=
OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0)
@ -326,8 +330,8 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \
EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \
EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \
EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
@ -338,8 +342,8 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(BPFOBJ) | $(BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \
EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \
EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \
EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
OUTPUT=$(BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
@ -404,6 +408,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by

View file

@ -230,8 +230,8 @@ static void trigger_fentry_setup(void)
static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
char **syms = NULL;
size_t cnt = 0;
struct bpf_link *link = NULL;
struct ksyms *ksyms = NULL;
/* Some recursive functions will be skipped in
* bpf_get_ksyms -> skip_entry, as they can introduce sufficient
@ -241,16 +241,18 @@ static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe)
* So, don't run the kprobe-multi-all and kretprobe-multi-all on
* a debug kernel.
*/
if (bpf_get_ksyms(&syms, &cnt, true)) {
if (bpf_get_ksyms(&ksyms, true)) {
fprintf(stderr, "failed to get ksyms\n");
exit(1);
}
opts.syms = (const char **) syms;
opts.cnt = cnt;
opts.syms = (const char **)ksyms->filtered_syms;
opts.cnt = ksyms->filtered_cnt;
opts.retprobe = kretprobe;
/* attach empty to all the kernel functions except bpf_get_numa_node_id. */
if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) {
link = bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts);
free_kallsyms_local(ksyms);
if (!link) {
fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n");
exit(1);
}

View file

@ -8,6 +8,7 @@
#include <errno.h>
#include <syscall.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
#include <linux/args.h>
static inline unsigned int bpf_num_possible_cpus(void)
{
@ -21,25 +22,43 @@ static inline unsigned int bpf_num_possible_cpus(void)
return possible_cpus;
}
/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
* is zero-terminated string no matter what (unless sz == 0, in which case
* it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
* in what is returned. Given this is internal helper, it's trivial to extend
* this, when necessary. Use this instead of strncpy inside libbpf source code.
/*
* Simplified strscpy() implementation. The kernel one is in lib/string.c
*/
static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
static inline ssize_t sized_strscpy(char *dest, const char *src, size_t count)
{
size_t i;
long res = 0;
if (sz == 0)
return;
if (count == 0)
return -E2BIG;
sz--;
for (i = 0; i < sz && src[i]; i++)
dst[i] = src[i];
dst[i] = '\0';
while (count > 1) {
char c;
c = src[res];
dest[res] = c;
if (!c)
return res;
res++;
count--;
}
/* Force NUL-termination. */
dest[res] = '\0';
/* Return E2BIG if the source didn't stop */
return src[res] ? -E2BIG : res;
}
#define __strscpy0(dst, src, ...) \
sized_strscpy(dst, src, sizeof(dst))
#define __strscpy1(dst, src, size) \
sized_strscpy(dst, src, size)
#undef strscpy /* Redefine the placeholder from tools/include/linux/string.h */
#define strscpy(dst, src, ...) \
CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
#define __bpf_percpu_val_align __attribute__((__aligned__(8)))
#define BPF_DECLARE_PERCPU(type, name) \

View file

@ -1,24 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-only
#include "bpftool_helpers.h"
#include <unistd.h>
#include <string.h>
#include <stdbool.h>
#include "bpf_util.h"
#include "bpftool_helpers.h"
#define BPFTOOL_PATH_MAX_LEN 64
#define BPFTOOL_FULL_CMD_MAX_LEN 512
#define BPFTOOL_DEFAULT_PATH "tools/sbin/bpftool"
static int detect_bpftool_path(char *buffer)
static int detect_bpftool_path(char *buffer, size_t size)
{
char tmp[BPFTOOL_PATH_MAX_LEN];
const char *env_path;
/* First, check if BPFTOOL environment variable is set */
env_path = getenv("BPFTOOL");
if (env_path && access(env_path, X_OK) == 0) {
strscpy(buffer, env_path, size);
return 0;
} else if (env_path) {
fprintf(stderr, "bpftool '%s' doesn't exist or is not executable\n", env_path);
return 1;
}
/* Check default bpftool location (will work if we are running the
* default flavor of test_progs)
*/
snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "./%s", BPFTOOL_DEFAULT_PATH);
if (access(tmp, X_OK) == 0) {
strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN);
strscpy(buffer, tmp, size);
return 0;
}
@ -27,11 +40,11 @@ static int detect_bpftool_path(char *buffer)
*/
snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "../%s", BPFTOOL_DEFAULT_PATH);
if (access(tmp, X_OK) == 0) {
strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN);
strscpy(buffer, tmp, size);
return 0;
}
/* Failed to find bpftool binary */
fprintf(stderr, "Failed to detect bpftool path, use BPFTOOL env var to override\n");
return 1;
}
@ -44,7 +57,7 @@ static int run_command(char *args, char *output_buf, size_t output_max_len)
int ret;
/* Detect and cache bpftool binary location */
if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path))
if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path, sizeof(bpftool_path)))
return 1;
ret = snprintf(command, BPFTOOL_FULL_CMD_MAX_LEN, "%s %s%s",

View file

@ -86,7 +86,7 @@ static int __enable_controllers(const char *cgroup_path, const char *controllers
enable[len] = 0;
close(fd);
} else {
bpf_strlcpy(enable, controllers, sizeof(enable));
strscpy(enable, controllers);
}
snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);

View file

@ -122,15 +122,15 @@ static int disasm_one_func(FILE *text_out, uint8_t *image, __u32 len)
pc += cnt;
}
qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
for (i = 0; i < labels.cnt; ++i)
/* gcc is unable to infer upper bound for labels.cnt and assumes
* it to be U32_MAX. U32_MAX takes 10 decimal digits.
* snprintf below prints into labels.names[*],
* which has space only for two digits and a letter.
* To avoid truncation warning use (i % MAX_LOCAL_LABELS),
* which informs gcc about printed value upper bound.
*/
snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS);
/* gcc is unable to infer upper bound for labels.cnt and
* assumes it to be U32_MAX. U32_MAX takes 10 decimal digits.
* snprintf below prints into labels.names[*], which has space
* only for two digits and a letter. To avoid truncation
* warning use (i < MAX_LOCAL_LABELS), which informs gcc about
* printed value upper bound.
*/
for (i = 0; i < labels.cnt && i < MAX_LOCAL_LABELS; ++i)
snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i);
/* now print with labels */
labels.print_phase = true;

View file

@ -432,7 +432,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
memset(addr, 0, sizeof(*sun));
sun->sun_family = family;
sun->sun_path[0] = 0;
strcpy(sun->sun_path + 1, addr_str);
strscpy(sun->sun_path + 1, addr_str, sizeof(sun->sun_path) - 1);
if (len)
*len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(addr_str);
return 0;
@ -581,8 +581,7 @@ int open_tuntap(const char *dev_name, bool need_mac)
return -1;
ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN);
strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1);
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
strscpy(ifr.ifr_name, dev_name);
err = ioctl(fd, TUNSETIFF, &ifr);
if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {

View file

@ -346,8 +346,7 @@ static void test_task_sleepable(void)
close(finish_pipe[1]);
test_data = malloc(sizeof(char) * 10);
strncpy(test_data, "test_data", 10);
test_data[9] = '\0';
strscpy(test_data, "test_data", 10);
test_data_long = malloc(sizeof(char) * 5000);
for (int i = 0; i < 5000; ++i) {

View file

@ -281,7 +281,7 @@ static void test_dctcp_fallback(void)
dctcp_skel = bpf_dctcp__open();
if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
return;
strcpy(dctcp_skel->rodata->fallback_cc, "cubic");
strscpy(dctcp_skel->rodata->fallback_cc, "cubic");
if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
goto done;

View file

@ -202,7 +202,7 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
goto out;
goto out_link;
/* trigger the program run */
(void)read(iter_fd, buf, sizeof(buf));
@ -210,6 +210,8 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
close(iter_fd);
out_link:
bpf_link__destroy(link);
out:
cgrp_ls_sleepable__destroy(skel);
}

View file

@ -308,8 +308,10 @@ static int find_field_offset(struct btf *btf, char *pattern, regmatch_t *matches
return -1;
}
strncpy(type_str, type, type_sz);
strncpy(field_str, field, field_sz);
memcpy(type_str, type, type_sz);
type_str[type_sz] = '\0';
memcpy(field_str, field, field_sz);
field_str[field_sz] = '\0';
btf_id = btf__find_by_name(btf, type_str);
if (btf_id < 0) {
PRINT_FAIL("No BTF info for type %s\n", type_str);

View file

@ -137,11 +137,14 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
);
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
if (!ASSERT_OK_PTR(link, "bpf_program__attach")) {
bpf_object__close(obj);
goto cleanup;
}
err = bpf_prog_test_run_opts(aux_prog_fd, &topts);
bpf_link__destroy(link);
bpf_object__close(obj);
if (!ASSERT_OK(err, "test_run"))
goto cleanup;

View file

@ -412,8 +412,8 @@ static void check_fd_array_cnt__fd_array_too_big(void)
ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG");
cleanup_fds:
while (i > 0)
Close(extra_fds[--i]);
while (i-- > 0)
Close(extra_fds[i]);
}
void test_fd_array_cnt(void)

View file

@ -570,7 +570,7 @@ static int create_tap(const char *ifname)
};
int fd, ret;
strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
strscpy(ifr.ifr_name, ifname);
fd = open("/dev/net/tun", O_RDWR);
if (fd < 0)
@ -599,7 +599,7 @@ static int ifup(const char *ifname)
struct ifreq ifr = {};
int sk, ret;
strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
strscpy(ifr.ifr_name, ifname);
sk = socket(PF_INET, SOCK_DGRAM, 0);
if (sk < 0)

View file

@ -61,6 +61,7 @@ static void test_reenter_update(void)
ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy");
out:
free(value);
htab_update__destroy(skel);
}

View file

@ -104,11 +104,8 @@ void test_kmem_cache_iter(void)
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
goto destroy;
memset(buf, 0, sizeof(buf));
while (read(iter_fd, buf, sizeof(buf)) > 0) {
/* Read out all contents */
printf("%s", buf);
}
while (read(iter_fd, buf, sizeof(buf)) > 0)
; /* Read out all contents */
/* Next reads should return 0 */
ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");

View file

@ -456,25 +456,23 @@ static void test_kprobe_multi_bench_attach(bool kernel)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi_empty *skel = NULL;
char **syms = NULL;
size_t cnt = 0;
struct ksyms *ksyms = NULL;
if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
if (!ASSERT_OK(bpf_get_ksyms(&ksyms, kernel), "bpf_get_ksyms"))
return;
skel = kprobe_multi_empty__open_and_load();
if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
goto cleanup;
opts.syms = (const char **) syms;
opts.cnt = cnt;
opts.syms = (const char **)ksyms->filtered_syms;
opts.cnt = ksyms->filtered_cnt;
do_bench_test(skel, &opts);
cleanup:
kprobe_multi_empty__destroy(skel);
if (syms)
free(syms);
free_kallsyms_local(ksyms);
}
static void test_kprobe_multi_bench_attach_addr(bool kernel)

View file

@ -117,7 +117,7 @@ void test_lwt_seg6local(void)
const char *ns1 = NETNS_BASE "1";
const char *ns6 = NETNS_BASE "6";
struct nstoken *nstoken = NULL;
const char *foobar = "foobar";
const char foobar[] = "foobar";
ssize_t bytes;
int sfd, cfd;
char buf[7];

View file

@ -0,0 +1,218 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "map_kptr_race.skel.h"
static int get_map_id(int map_fd)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
if (!ASSERT_OK(bpf_map_get_info_by_fd(map_fd, &info, &len), "get_map_info"))
return -1;
return info.id;
}
static int read_refs(struct map_kptr_race *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, opts);
int ret;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &opts);
if (!ASSERT_OK(ret, "count_ref run"))
return -1;
if (!ASSERT_OK(opts.retval, "count_ref retval"))
return -1;
return skel->bss->num_of_refs;
}
static void test_htab_leak(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct map_kptr_race *skel, *watcher;
int ret, map_id;
skel = map_kptr_race__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_htab_leak), &opts);
if (!ASSERT_OK(ret, "test_htab_leak run"))
goto out_skel;
if (!ASSERT_OK(opts.retval, "test_htab_leak retval"))
goto out_skel;
map_id = get_map_id(bpf_map__fd(skel->maps.race_hash_map));
if (!ASSERT_GE(map_id, 0, "map_id"))
goto out_skel;
watcher = map_kptr_race__open_and_load();
if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
goto out_skel;
watcher->bss->target_map_id = map_id;
watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
goto out_watcher;
watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
goto out_watcher;
map_kptr_race__destroy(skel);
skel = NULL;
kern_sync_rcu();
while (!READ_ONCE(watcher->bss->map_freed))
sched_yield();
ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
ASSERT_EQ(read_refs(watcher), 2, "htab refcount");
out_watcher:
map_kptr_race__destroy(watcher);
out_skel:
map_kptr_race__destroy(skel);
}
static void test_percpu_htab_leak(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct map_kptr_race *skel, *watcher;
int ret, map_id;
skel = map_kptr_race__open();
if (!ASSERT_OK_PTR(skel, "open"))
return;
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
if (skel->rodata->nr_cpus > 16)
skel->rodata->nr_cpus = 16;
ret = map_kptr_race__load(skel);
if (!ASSERT_OK(ret, "load"))
goto out_skel;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_percpu_htab_leak), &opts);
if (!ASSERT_OK(ret, "test_percpu_htab_leak run"))
goto out_skel;
if (!ASSERT_OK(opts.retval, "test_percpu_htab_leak retval"))
goto out_skel;
map_id = get_map_id(bpf_map__fd(skel->maps.race_percpu_hash_map));
if (!ASSERT_GE(map_id, 0, "map_id"))
goto out_skel;
watcher = map_kptr_race__open_and_load();
if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
goto out_skel;
watcher->bss->target_map_id = map_id;
watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
goto out_watcher;
watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
goto out_watcher;
map_kptr_race__destroy(skel);
skel = NULL;
kern_sync_rcu();
while (!READ_ONCE(watcher->bss->map_freed))
sched_yield();
ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
ASSERT_EQ(read_refs(watcher), 2, "percpu_htab refcount");
out_watcher:
map_kptr_race__destroy(watcher);
out_skel:
map_kptr_race__destroy(skel);
}
static void test_sk_ls_leak(void)
{
struct map_kptr_race *skel, *watcher;
int listen_fd = -1, client_fd = -1, map_id;
skel = map_kptr_race__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
if (!ASSERT_OK(map_kptr_race__attach(skel), "attach"))
goto out_skel;
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (!ASSERT_GE(listen_fd, 0, "start_server"))
goto out_skel;
client_fd = connect_to_fd(listen_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto out_skel;
if (!ASSERT_EQ(skel->bss->sk_ls_leak_done, 1, "sk_ls_leak_done"))
goto out_skel;
close(client_fd);
client_fd = -1;
close(listen_fd);
listen_fd = -1;
map_id = get_map_id(bpf_map__fd(skel->maps.race_sk_ls_map));
if (!ASSERT_GE(map_id, 0, "map_id"))
goto out_skel;
watcher = map_kptr_race__open_and_load();
if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
goto out_skel;
watcher->bss->target_map_id = map_id;
watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
goto out_watcher;
watcher->links.sk_map_free = bpf_program__attach(watcher->progs.sk_map_free);
if (!ASSERT_OK_PTR(watcher->links.sk_map_free, "attach fexit"))
goto out_watcher;
map_kptr_race__destroy(skel);
skel = NULL;
kern_sync_rcu();
while (!READ_ONCE(watcher->bss->map_freed))
sched_yield();
ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
ASSERT_EQ(read_refs(watcher), 2, "sk_ls refcount");
out_watcher:
map_kptr_race__destroy(watcher);
out_skel:
if (client_fd >= 0)
close(client_fd);
if (listen_fd >= 0)
close(listen_fd);
map_kptr_race__destroy(skel);
}
void serial_test_map_kptr_race(void)
{
if (test__start_subtest("htab_leak"))
test_htab_leak();
if (test__start_subtest("percpu_htab_leak"))
test_percpu_htab_leak();
if (test__start_subtest("sk_ls_leak"))
test_sk_ls_leak();
}

View file

@ -28,9 +28,9 @@ static void test_queue_stack_map_by_type(int type)
vals[i] = rand();
if (type == QUEUE)
strncpy(file, "./test_queue_map.bpf.o", sizeof(file));
strscpy(file, "./test_queue_map.bpf.o");
else if (type == STACK)
strncpy(file, "./test_stack_map.bpf.o", sizeof(file));
strscpy(file, "./test_stack_map.bpf.o");
else
return;

View file

@ -2091,7 +2091,7 @@ static struct subtest_case crafted_cases[] = {
{U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
{U64, U32, {0, 0x100000000}, {0, 0}},
{U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}},
{U64, U32, {0xfffffffe, 0x300000000}, {0x80000000, 0x80000000}},
{U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
/* these are tricky cases where lower 32 bits allow to tighten 64

View file

@ -212,7 +212,7 @@ void test_setget_sockopt(void)
if (!ASSERT_OK_PTR(skel, "open skel"))
goto done;
strcpy(skel->rodata->veth, "binddevtest1");
strscpy(skel->rodata->veth, "binddevtest1");
skel->rodata->veth_ifindex = if_nametoindex("binddevtest1");
if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex"))
goto done;

View file

@ -34,7 +34,7 @@ void test_skc_to_unix_sock(void)
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_UNIX;
strncpy(sockaddr.sun_path, sock_path, strlen(sock_path));
strscpy(sockaddr.sun_path, sock_path);
sockaddr.sun_path[0] = '\0';
err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));

View file

@ -204,7 +204,7 @@ static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
/* Fail since bpf_link for the same prog type has been created. */
link2 = bpf_program__attach_sockmap(prog_clone, map);
if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) {
bpf_link__detach(link2);
bpf_link__destroy(link2);
goto out;
}
@ -230,7 +230,7 @@ static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
if (!ASSERT_OK(err, "bpf_link_update"))
goto out;
out:
bpf_link__detach(link);
bpf_link__destroy(link);
test_skmsg_load_helpers__destroy(skel);
}
@ -417,7 +417,7 @@ static void test_sockmap_skb_verdict_attach_with_link(void)
if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
goto out;
bpf_link__detach(link);
bpf_link__destroy(link);
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
@ -426,7 +426,7 @@ static void test_sockmap_skb_verdict_attach_with_link(void)
/* Fail since attaching with the same prog/map has been done. */
link = bpf_program__attach_sockmap(prog, map);
if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap"))
bpf_link__detach(link);
bpf_link__destroy(link);
err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
if (!ASSERT_OK(err, "bpf_prog_detach2"))
@ -747,13 +747,13 @@ static void test_sockmap_skb_verdict_peek_with_link(void)
test_sockmap_skb_verdict_peek_helper(map);
ASSERT_EQ(pass->bss->clone_called, 1, "clone_called");
out:
bpf_link__detach(link);
bpf_link__destroy(link);
test_sockmap_pass_prog__destroy(pass);
}
static void test_sockmap_unconnected_unix(void)
{
int err, map, stream = 0, dgram = 0, zero = 0;
int err, map, stream = -1, dgram = -1, zero = 0;
struct test_sockmap_pass_prog *skel;
skel = test_sockmap_pass_prog__open_and_load();
@ -764,22 +764,22 @@ static void test_sockmap_unconnected_unix(void)
stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
if (stream < 0)
return;
goto out;
dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
if (dgram < 0) {
close(stream);
return;
}
if (dgram < 0)
goto out;
err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
ASSERT_ERR(err, "bpf_map_update_elem(stream)");
if (!ASSERT_ERR(err, "bpf_map_update_elem(stream)"))
goto out;
err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
ASSERT_OK(err, "bpf_map_update_elem(dgram)");
out:
close(stream);
close(dgram);
test_sockmap_pass_prog__destroy(skel);
}
static void test_sockmap_many_socket(void)
@ -1027,7 +1027,7 @@ static void test_sockmap_skb_verdict_vsock_poll(void)
if (xrecv_nonblock(conn, &buf, 1, 0) != 1)
FAIL("xrecv_nonblock");
detach:
bpf_link__detach(link);
bpf_link__destroy(link);
close:
xclose(conn);
xclose(peer);

View file

@ -899,7 +899,7 @@ static void test_msg_redir_to_listening_with_link(struct test_sockmap_listen *sk
redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
bpf_link__detach(link);
bpf_link__destroy(link);
}
static void redir_partial(int family, int sotype, int sock_map, int parser_map)

View file

@ -142,7 +142,7 @@ static int getsetsockopt(void)
/* TCP_CONGESTION can extend the string */
strcpy(buf.cc, "nv");
strscpy(buf.cc, "nv");
err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv"));
if (err) {
log_err("Failed to call setsockopt(TCP_CONGESTION)");

View file

@ -54,9 +54,7 @@ static void test_private_stack_fail(void)
}
err = struct_ops_private_stack_fail__load(skel);
if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load"))
goto cleanup;
return;
ASSERT_ERR(err, "struct_ops_private_stack_fail__load");
cleanup:
struct_ops_private_stack_fail__destroy(skel);

View file

@ -262,7 +262,7 @@ retry:
if (!atomic_compare_exchange_strong(&tld_meta_p->cnt, &cnt, cnt + 1))
goto retry;
strncpy(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN);
strscpy(tld_meta_p->metadata[i].name, name);
atomic_store(&tld_meta_p->metadata[i].size, size);
return (tld_key_t){(__s16)off};
}

View file

@ -1360,10 +1360,8 @@ static void test_tc_opts_dev_cleanup_target(int target)
assert_mprog_count_ifindex(ifindex, target, 4);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
return;
goto cleanup;
cleanup3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");

View file

@ -1095,7 +1095,7 @@ static int tun_open(char *name)
ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
if (*name)
strncpy(ifr.ifr_name, name, IFNAMSIZ);
strscpy(ifr.ifr_name, name);
err = ioctl(fd, TUNSETIFF, &ifr);
if (!ASSERT_OK(err, "ioctl TUNSETIFF"))

View file

@ -27,6 +27,7 @@ struct sysctl_test {
OP_EPERM,
SUCCESS,
} result;
struct bpf_object *obj;
};
static struct sysctl_test tests[] = {
@ -1471,6 +1472,7 @@ static int load_sysctl_prog_file(struct sysctl_test *test)
return -1;
}
test->obj = obj;
return prog_fd;
}
@ -1573,6 +1575,7 @@ out:
/* Detaching w/o checking return code: best effort attempt. */
if (progfd != -1)
bpf_prog_detach(cgfd, atype);
bpf_object__close(test->obj);
close(progfd);
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;

View file

@ -699,7 +699,7 @@ void test_tc_tunnel(void)
return;
if (!ASSERT_OK(setup(), "global setup"))
return;
goto out;
for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) {
cfg = &subtests_cfg[i];
@ -711,4 +711,7 @@ void test_tc_tunnel(void)
subtest_cleanup(cfg);
}
cleanup();
out:
test_tc_tunnel__destroy(skel);
}

View file

@ -24,9 +24,9 @@ static struct fixture *init_fixture(void)
/* for no_alu32 and cpuv4 veristat is in parent folder */
if (access("./veristat", F_OK) == 0)
strcpy(fix->veristat, "./veristat");
strscpy(fix->veristat, "./veristat");
else if (access("../veristat", F_OK) == 0)
strcpy(fix->veristat, "../veristat");
strscpy(fix->veristat, "../veristat");
else
PRINT_FAIL("Can't find veristat binary");

View file

@ -2003,9 +2003,17 @@ int testapp_stats_tx_invalid_descs(struct test_spec *test)
int testapp_stats_rx_full(struct test_spec *test)
{
if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
struct pkt_stream *tmp;
tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
if (!tmp)
return TEST_FAILURE;
test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_tx->xsk->pkt_stream = tmp;
tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
if (!tmp)
return TEST_FAILURE;
test->ifobj_rx->xsk->pkt_stream = tmp;
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
@ -2015,9 +2023,17 @@ int testapp_stats_rx_full(struct test_spec *test)
int testapp_stats_fill_empty(struct test_spec *test)
{
if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
struct pkt_stream *tmp;
tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
if (!tmp)
return TEST_FAILURE;
test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_tx->xsk->pkt_stream = tmp;
tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
if (!tmp)
return TEST_FAILURE;
test->ifobj_rx->xsk->pkt_stream = tmp;
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;

View file

@ -62,8 +62,10 @@ static void release_child(struct child *child)
return;
close(child->go[1]);
close(child->go[0]);
if (child->thread)
if (child->thread) {
pthread_join(child->thread, NULL);
child->thread = 0;
}
close(child->c2p[0]);
close(child->c2p[1]);
if (child->pid > 0)
@ -331,6 +333,8 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi
{
static struct child child;
memset(&child, 0, sizeof(child));
/* no pid filter */
__test_attach_api(binary, pattern, opts, NULL);

View file

@ -47,7 +47,7 @@ static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts);
char *exp_log, prog_name[16], op_name[32];
char *exp_log, prog_name[24], op_name[32];
struct test_log_buf *skel;
struct bpf_program *prog;
size_t fixed_log_sz;

View file

@ -67,7 +67,7 @@ void test_xdp_flowtable(void)
struct nstoken *tok = NULL;
int iifindex, stats_fd;
__u32 value, key = 0;
struct bpf_link *link;
struct bpf_link *link = NULL;
if (SYS_NOFAIL("nft -v")) {
fprintf(stdout, "Missing required nft tool\n");
@ -160,6 +160,7 @@ void test_xdp_flowtable(void)
ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed");
out:
bpf_link__destroy(link);
xdp_flowtable__destroy(skel);
if (tok)
close_netns(tok);

View file

@ -126,10 +126,10 @@ static int open_xsk(int ifindex, struct xsk *xsk)
static void close_xsk(struct xsk *xsk)
{
if (xsk->umem)
xsk_umem__delete(xsk->umem);
if (xsk->socket)
xsk_socket__delete(xsk->socket);
if (xsk->umem)
xsk_umem__delete(xsk->umem);
munmap(xsk->umem_area, UMEM_SIZE);
}

View file

@ -48,7 +48,7 @@ int dmabuf_collector(struct bpf_iter__dmabuf *ctx)
/* Buffers are not required to be named */
if (pname) {
if (bpf_probe_read_kernel(name, sizeof(name), pname))
if (bpf_probe_read_kernel_str(name, sizeof(name), pname) < 0)
return 1;
/* Name strings can be provided by userspace */

View file

@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr *ref_ptr;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} race_hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} race_percpu_hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
} race_sk_ls_map SEC(".maps");
int num_of_refs;
int sk_ls_leak_done;
int target_map_id;
int map_freed;
const volatile int nr_cpus;
SEC("tc")
int test_htab_leak(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *p, *old;
struct map_value val = {};
struct map_value *v;
int key = 0;
if (bpf_map_update_elem(&race_hash_map, &key, &val, BPF_ANY))
return 1;
v = bpf_map_lookup_elem(&race_hash_map, &key);
if (!v)
return 2;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 3;
old = bpf_kptr_xchg(&v->ref_ptr, p);
if (old)
bpf_kfunc_call_test_release(old);
bpf_map_delete_elem(&race_hash_map, &key);
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 4;
old = bpf_kptr_xchg(&v->ref_ptr, p);
if (old)
bpf_kfunc_call_test_release(old);
return 0;
}
static int fill_percpu_kptr(struct map_value *v)
{
struct prog_test_ref_kfunc *p, *old;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 1;
old = bpf_kptr_xchg(&v->ref_ptr, p);
if (old)
bpf_kfunc_call_test_release(old);
return 0;
}
SEC("tc")
int test_percpu_htab_leak(struct __sk_buff *skb)
{
struct map_value *v, *arr[16] = {};
struct map_value val = {};
int key = 0;
int err = 0;
if (bpf_map_update_elem(&race_percpu_hash_map, &key, &val, BPF_ANY))
return 1;
for (int i = 0; i < nr_cpus; i++) {
v = bpf_map_lookup_percpu_elem(&race_percpu_hash_map, &key, i);
if (!v)
return 2;
arr[i] = v;
}
bpf_map_delete_elem(&race_percpu_hash_map, &key);
for (int i = 0; i < nr_cpus; i++) {
v = arr[i];
err = fill_percpu_kptr(v);
if (err)
return 3;
}
return 0;
}
SEC("tp_btf/inet_sock_set_state")
int BPF_PROG(test_sk_ls_leak, struct sock *sk, int oldstate, int newstate)
{
struct prog_test_ref_kfunc *p, *old;
struct map_value *v;
if (newstate != BPF_TCP_SYN_SENT)
return 0;
if (sk_ls_leak_done)
return 0;
v = bpf_sk_storage_get(&race_sk_ls_map, sk, NULL,
BPF_SK_STORAGE_GET_F_CREATE);
if (!v)
return 0;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 0;
old = bpf_kptr_xchg(&v->ref_ptr, p);
if (old)
bpf_kfunc_call_test_release(old);
bpf_sk_storage_delete(&race_sk_ls_map, sk);
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 0;
old = bpf_kptr_xchg(&v->ref_ptr, p);
if (old)
bpf_kfunc_call_test_release(old);
sk_ls_leak_done = 1;
return 0;
}
long target_map_ptr;
SEC("fentry/bpf_map_put")
int BPF_PROG(map_put, struct bpf_map *map)
{
if (target_map_id && map->id == (u32)target_map_id)
target_map_ptr = (long)map;
return 0;
}
SEC("fexit/htab_map_free")
int BPF_PROG(htab_map_free, struct bpf_map *map)
{
if (target_map_ptr && (long)map == target_map_ptr)
map_freed = 1;
return 0;
}
SEC("fexit/bpf_sk_storage_map_free")
int BPF_PROG(sk_map_free, struct bpf_map *map)
{
if (target_map_ptr && (long)map == target_map_ptr)
map_freed = 1;
return 0;
}
SEC("syscall")
int count_ref(void *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long arg = 0;
p = bpf_kfunc_call_test_acquire(&arg);
if (!p)
return 1;
num_of_refs = p->cnt.refs.counter;
bpf_kfunc_call_test_release(p);
return 0;
}
char _license[] SEC("license") = "GPL";

View file

@ -1863,4 +1863,141 @@ l1_%=: r0 = 1; \
: __clobber_all);
}
/* This test covers the bounds deduction when the u64 range and the tnum
* overlap only at umax. After instruction 3, the ranges look as follows:
*
* 0 umin=0xe01 umax=0xf00 U64_MAX
* | [xxxxxxxxxxxxxx] |
* |----------------------------|------------------------------|
* | x x | tnum values
*
* The verifier can therefore deduce that the R0=0xf0=240.
*/
SEC("socket")
__description("bounds refinement with single-value tnum on umax")
__msg("3: (15) if r0 == 0xe0 {{.*}} R0=240")
__success __log_level(2)
__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void bounds_refinement_tnum_umax(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
r0 |= 0xe0; \
r0 &= 0xf0; \
if r0 == 0xe0 goto +2; \
if r0 == 0xf0 goto +1; \
r10 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* This test covers the bounds deduction when the u64 range and the tnum
* overlap only at umin. After instruction 3, the ranges look as follows:
*
* 0 umin=0xe00 umax=0xeff U64_MAX
* | [xxxxxxxxxxxxxx] |
* |----------------------------|------------------------------|
* | x x | tnum values
*
* The verifier can therefore deduce that the R0=0xe0=224.
*/
SEC("socket")
__description("bounds refinement with single-value tnum on umin")
__msg("3: (15) if r0 == 0xf0 {{.*}} R0=224")
__success __log_level(2)
__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void bounds_refinement_tnum_umin(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
r0 |= 0xe0; \
r0 &= 0xf0; \
if r0 == 0xf0 goto +2; \
if r0 == 0xe0 goto +1; \
r10 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* This test covers the bounds deduction when the only possible tnum value is
* in the middle of the u64 range. After instruction 3, the ranges look as
* follows:
*
* 0 umin=0x7cf umax=0x7df U64_MAX
* | [xxxxxxxxxxxx] |
* |----------------------------|------------------------------|
* | x x x x x | tnum values
* | +--- 0x7e0
* +--- 0x7d0
*
* Since the lower four bits are zero, the tnum and the u64 range only overlap
* in R0=0x7d0=2000. Instruction 5 is therefore dead code.
*/
SEC("socket")
__description("bounds refinement with single-value tnum in middle of range")
__msg("3: (a5) if r0 < 0x7cf {{.*}} R0=2000")
__success __log_level(2)
__naked void bounds_refinement_tnum_middle(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
if r0 & 0x0f goto +4; \
if r0 > 0x7df goto +3; \
if r0 < 0x7cf goto +2; \
if r0 == 0x7d0 goto +1; \
r10 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* This test cover the negative case for the tnum/u64 overlap. Since
* they contain the same two values (i.e., {0, 1}), we can't deduce
* anything more.
*/
SEC("socket")
__description("bounds refinement: several overlaps between tnum and u64")
__msg("2: (25) if r0 > 0x1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=1,var_off=(0x0; 0x1))")
__failure __log_level(2)
__naked void bounds_refinement_several_overlaps(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
if r0 < 0 goto +3; \
if r0 > 1 goto +2; \
if r0 == 1 goto +1; \
r10 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* This test cover the negative case for the tnum/u64 overlap. Since
* they overlap in the two values contained by the u64 range (i.e.,
* {0xf, 0x10}), we can't deduce anything more.
*/
SEC("socket")
__description("bounds refinement: multiple overlaps between tnum and u64")
__msg("2: (25) if r0 > 0x10 {{.*}} R0=scalar(smin=umin=smin32=umin32=15,smax=umax=smax32=umax32=16,var_off=(0x0; 0x1f))")
__failure __log_level(2)
__naked void bounds_refinement_multiple_overlaps(void *ctx)
{
asm volatile(" \
call %[bpf_get_prandom_u32]; \
if r0 < 0xf goto +3; \
if r0 > 0x10 goto +2; \
if r0 == 0x10 goto +1; \
r10 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View file

@ -1261,14 +1261,8 @@ int get_bpf_max_tramp_links(void)
return ret;
}
#define MAX_BACKTRACE_SZ 128
void crash_handler(int signum)
static void dump_crash_log(void)
{
void *bt[MAX_BACKTRACE_SZ];
size_t sz;
sz = backtrace(bt, ARRAY_SIZE(bt));
fflush(stdout);
stdout = env.stdout_saved;
stderr = env.stderr_saved;
@ -1277,12 +1271,32 @@ void crash_handler(int signum)
env.test_state->error_cnt++;
dump_test_log(env.test, env.test_state, true, false, NULL);
}
}
#define MAX_BACKTRACE_SZ 128
void crash_handler(int signum)
{
void *bt[MAX_BACKTRACE_SZ];
size_t sz;
sz = backtrace(bt, ARRAY_SIZE(bt));
dump_crash_log();
if (env.worker_id != -1)
fprintf(stderr, "[%d]: ", env.worker_id);
fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
backtrace_symbols_fd(bt, sz, STDERR_FILENO);
}
#ifdef __SANITIZE_ADDRESS__
void __asan_on_error(void)
{
dump_crash_log();
}
#endif
void hexdump(const char *prefix, const void *buf, size_t len)
{
for (int i = 0; i < len; i++) {
@ -1799,7 +1813,7 @@ static int worker_main_send_subtests(int sock, struct test_state *state)
msg.subtest_done.num = i;
strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
strscpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
msg.subtest_done.error_cnt = subtest_state->error_cnt;
msg.subtest_done.skipped = subtest_state->skipped;
@ -1944,13 +1958,15 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
int err, i;
#ifndef __SANITIZE_ADDRESS__
struct sigaction sigact = {
.sa_handler = crash_handler,
.sa_flags = SA_RESETHAND,
};
int err, i;
};
sigaction(SIGSEGV, &sigact, NULL);
#endif
env.stdout_saved = stdout;
env.stderr_saved = stderr;

View file

@ -1320,7 +1320,7 @@ static bool cmp_str_seq(const char *log, const char *exp)
printf("FAIL\nTestcase bug\n");
return false;
}
strncpy(needle, exp, len);
memcpy(needle, exp, len);
needle[len] = 0;
q = strstr(log, needle);
if (!q) {

View file

@ -212,6 +212,7 @@ int parse_test_list_file(const char *path,
break;
}
free(buf);
fclose(f);
return err;
}

View file

@ -24,12 +24,6 @@
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
struct ksyms {
struct ksym *syms;
size_t sym_cap;
size_t sym_cnt;
};
static struct ksyms *ksyms;
static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER;
@ -54,6 +48,8 @@ void free_kallsyms_local(struct ksyms *ksyms)
if (!ksyms)
return;
free(ksyms->filtered_syms);
if (!ksyms->syms) {
free(ksyms);
return;
@ -610,7 +606,7 @@ static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
return compare_name(p1, p2->name);
}
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel)
{
size_t cap = 0, cnt = 0;
char *name = NULL, *ksym_name, **syms = NULL;
@ -637,8 +633,10 @@ int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
if (!f)
if (!f) {
free_kallsyms_local(ksyms);
return -EINVAL;
}
map = hashmap__new(symbol_hash, symbol_equal, NULL);
if (IS_ERR(map)) {
@ -679,15 +677,18 @@ int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
syms[cnt++] = ksym_name;
}
*symsp = syms;
*cntp = cnt;
ksyms->filtered_syms = syms;
ksyms->filtered_cnt = cnt;
*ksymsp = ksyms;
error:
free(name);
fclose(f);
hashmap__free(map);
if (err)
if (err) {
free(syms);
free_kallsyms_local(ksyms);
}
return err;
}

View file

@ -23,7 +23,14 @@ struct ksym {
long addr;
char *name;
};
struct ksyms;
struct ksyms {
struct ksym *syms;
size_t sym_cap;
size_t sym_cnt;
char **filtered_syms;
size_t filtered_cnt;
};
typedef int (*ksym_cmp_t)(const void *p1, const void *p2);
typedef int (*ksym_search_cmp_t)(const void *p1, const struct ksym *p2);
@ -53,7 +60,7 @@ ssize_t get_rel_offset(uintptr_t addr);
int read_build_id(const char *path, char *build_id, size_t size);
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel);
int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel);
int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel);
#endif

View file

@ -3378,6 +3378,8 @@ int main(int argc, char **argv)
}
}
free(env.presets[i].atoms);
if (env.presets[i].value.type == ENUMERATOR)
free(env.presets[i].value.svalue);
}
free(env.presets);
return -err;

View file

@ -16,6 +16,7 @@
#include <network_helpers.h>
#include "bpf_util.h"
#include "xdp_features.skel.h"
#include "xdp_features.h"
@ -212,7 +213,7 @@ static void set_env_default(void)
env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
env.feature.action = -EINVAL;
env.ifindex = -ENODEV;
strcpy(env.ifname, "unknown");
strscpy(env.ifname, "unknown");
make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT,
&env.dut_ctrl_addr, NULL);
make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,

View file

@ -550,7 +550,7 @@ static int rxq_num(const char *ifname)
struct ifreq ifr = {
.ifr_data = (void *)&ch,
};
strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
strscpy(ifr.ifr_name, ifname);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
@ -571,7 +571,7 @@ static void hwtstamp_ioctl(int op, const char *ifname, struct hwtstamp_config *c
struct ifreq ifr = {
.ifr_data = (void *)cfg,
};
strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
strscpy(ifr.ifr_name, ifname);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);