Merge branch 'xsk-fixes-for-af_xdp-fragment-handling'

Nikhil P. Rao says:

====================
xsk: Fixes for AF_XDP fragment handling

This series fixes two issues in AF_XDP zero-copy fragment handling:

Patch 1 fixes a buffer leak caused by incorrect list node handling after
commit b692bf9a75. The list_node field is now reused for both the xskb
pool list and the buffer free list. Using list_del() instead of
list_del_init() causes list_empty() checks in xp_free() to fail, preventing
buffers from being added to the free list.

Patch 2 fixes partial packet delivery to userspace. In the zero-copy path,
if the Rx queue fills up while enqueuing fragments, the remaining fragments
are dropped, causing the application to receive incomplete packets. The fix
ensures the Rx queue has sufficient space for all fragments before starting
to enqueue them.

[1] https://lore.kernel.org/oe-kbuild-all/202602051720.YfZO23pZ-lkp@intel.com/
[2] https://lore.kernel.org/oe-kbuild-all/202602172046.vf9DtpdF-lkp@intel.com/
====================

Link: https://patch.msgid.link/20260225000456.107806-1-nikhil.rao@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2026-02-28 08:55:14 -08:00
commit 0eb5965b29
2 changed files with 20 additions and 14 deletions

View file

@ -122,7 +122,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
goto out;
list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
list_del(&pos->list_node);
list_del_init(&pos->list_node);
xp_free(pos);
}
@ -157,7 +157,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
struct xdp_buff_xsk, list_node);
if (frag) {
list_del(&frag->list_node);
list_del_init(&frag->list_node);
ret = &frag->xdp;
}
@ -168,7 +168,7 @@ static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
list_del(&xskb->list_node);
list_del_init(&xskb->list_node);
}
static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)

View file

@ -167,26 +167,32 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
struct xdp_buff_xsk *pos, *tmp;
struct list_head *xskb_list;
u32 contd = 0;
u32 num_desc;
int err;
if (frags)
contd = XDP_PKT_CONTD;
err = __xsk_rcv_zc(xs, xskb, len, contd);
if (err)
goto err;
if (likely(!frags))
if (likely(!frags)) {
err = __xsk_rcv_zc(xs, xskb, len, contd);
if (err)
goto err;
return 0;
}
contd = XDP_PKT_CONTD;
num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
xs->rx_queue_full++;
err = -ENOBUFS;
goto err;
}
__xsk_rcv_zc(xs, xskb, len, contd);
xskb_list = &xskb->pool->xskb_list;
list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
if (list_is_singular(xskb_list))
contd = 0;
len = pos->xdp.data_end - pos->xdp.data;
err = __xsk_rcv_zc(xs, pos, len, contd);
if (err)
goto err;
list_del(&pos->list_node);
__xsk_rcv_zc(xs, pos, len, contd);
list_del_init(&pos->list_node);
}
return 0;