mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
ice: fix rxq info registering in mbuf packets
XDP RxQ info contains frag_size, which depends on the MTU. This makes the
old way of registering RxQ info before calculating new buffer sizes
invalid. Currently, it leads to frag_size being outdated, making it
sometimes impossible to grow tailroom in a mbuf packet. E.g. fragments are
actually 3K+, but frag size is still as if MTU was 1500.
Always register new XDP RxQ info after reconfiguring memory pools.
Fixes: 2fba7dc515 ("ice: Add support for XDP multi-buffer on Rx side")
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
Link: https://patch.msgid.link/20260305111253.2317394-4-larysa.zaremba@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
16394d8053
commit
02852b47c7
4 changed files with 14 additions and 20 deletions
|
|
@ -666,23 +666,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
|||
|
||||
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF ||
|
||||
ring->vsi->type == ICE_VSI_LB) {
|
||||
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
|
||||
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index,
|
||||
ring->q_vector->napi.napi_id,
|
||||
ring->rx_buf_len);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
ice_rx_xsk_pool(ring);
|
||||
err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (ring->xsk_pool) {
|
||||
xdp_rxq_info_unreg(&ring->xdp_rxq);
|
||||
|
||||
rx_buf_len =
|
||||
xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
||||
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
|
|
@ -705,14 +694,13 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
|
||||
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index,
|
||||
ring->q_vector->napi.napi_id,
|
||||
ring->rx_buf_len);
|
||||
if (err)
|
||||
goto err_destroy_fq;
|
||||
}
|
||||
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index,
|
||||
ring->q_vector->napi.napi_id,
|
||||
ring->rx_buf_len);
|
||||
if (err)
|
||||
goto err_destroy_fq;
|
||||
|
||||
xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
|
||||
ring->pp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3342,6 +3342,7 @@ process_rx:
|
|||
rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
|
||||
rx_rings[i].desc = NULL;
|
||||
rx_rings[i].xdp_buf = NULL;
|
||||
rx_rings[i].xdp_rxq = (struct xdp_rxq_info){ };
|
||||
|
||||
/* this is to allow wr32 to have something to write to
|
||||
* during early allocation of Rx buffers
|
||||
|
|
|
|||
|
|
@ -560,7 +560,9 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
|||
i = 0;
|
||||
}
|
||||
|
||||
if (rx_ring->vsi->type == ICE_VSI_PF &&
|
||||
if ((rx_ring->vsi->type == ICE_VSI_PF ||
|
||||
rx_ring->vsi->type == ICE_VSI_SF ||
|
||||
rx_ring->vsi->type == ICE_VSI_LB) &&
|
||||
xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
|
||||
xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
|
|
|
|||
|
|
@ -899,6 +899,9 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
|||
u16 ntc = rx_ring->next_to_clean;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
|
||||
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
|
||||
while (ntc != ntu) {
|
||||
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue