Merge branch 'net-mana-enforce-tx-sge-limit-and-fix-error-cleanup'

Aditya Garg says:

====================
net: mana: Enforce TX SGE limit and fix error cleanup

Add pre-transmission checks to block SKBs that exceed the hardware's SGE
limit. Force software segmentation for GSO traffic and linearize non-GSO
packets as needed.

Update TX error handling to drop failed SKBs and unmap resources
immediately.
====================

Link: https://patch.msgid.link/1763464269-10431-1-git-send-email-gargaditya@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-11-19 20:12:01 -08:00
commit 6152f41da6
5 changed files with 53 additions and 12 deletions

View file

@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
struct gdma_posted_wqe_info *wqe_info)
{
u32 client_oob_size = wqe_req->inline_oob_size;
struct gdma_context *gc;
u32 sgl_data_size;
u32 max_wqe_size;
u32 wqe_size;
@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wqe_size > max_wqe_size)
return -EINVAL;
if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
gc = wq->gdma_dev->gdma_context;
dev_err(gc->dev, "unsuccessful flow control!\n");
if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
return -ENOSPC;
}
if (wqe_info)
wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;

View file

@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/skbuff.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@ -329,6 +330,21 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
/* GSO skb with Hardware SGE limit exceeded is not expected here
* as they are handled in mana_features_check() callback
*/
if (skb_linearize(skb)) {
netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
skb_shinfo(skb)->nr_frags,
skb_is_gso(skb));
goto tx_drop_count;
}
apc->eth_stats.tx_linear_pkt_cnt++;
}
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@ -442,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
} else {
@ -478,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
err = NETDEV_TX_BUSY;
goto tx_busy;
goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@ -500,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@ -518,6 +531,25 @@ tx_drop:
return NETDEV_TX_OK;
}
#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
static netdev_features_t mana_features_check(struct sk_buff *skb,
struct net_device *ndev,
netdev_features_t features)
{
if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
/* Exceeds HW SGE limit.
* GSO case:
* Disable GSO so the stack will software-segment the skb
* into smaller skbs that fit the SGE budget.
* Non-GSO case:
* The xmit path will attempt skb_linearize() as a fallback.
*/
features &= ~NETIF_F_GSO_MASK;
}
return features;
}
#endif
static void mana_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *st)
{
@ -883,6 +915,9 @@ static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
.ndo_select_queue = mana_select_queue,
#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
.ndo_features_check = mana_features_check,
#endif
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
@ -1651,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;

View file

@ -18,6 +18,8 @@ static const struct mana_stats_desc mana_eth_stats[] = {
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
{"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
tx_linear_pkt_cnt)},
{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
rx_coalesced_err)},
{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,

View file

@ -486,6 +486,8 @@ struct gdma_wqe {
#define INLINE_OOB_SMALL_SIZE 8
#define INLINE_OOB_LARGE_SIZE 24
#define MANA_MAX_TX_WQE_SGL_ENTRIES 30
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
@ -592,6 +594,9 @@ enum {
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
/* Driver supports linearizing the skb when num_sge exceeds hardware limit */
#define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
/* Driver can send HWC periodically to query stats */
#define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
@ -605,7 +610,8 @@ enum {
GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY)
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE)
#define GDMA_DRV_CAP_FLAGS2 0

View file

@ -377,6 +377,7 @@ struct mana_ethtool_stats {
u64 wake_queue;
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 tx_linear_pkt_cnt;
u64 rx_coalesced_err;
u64 rx_cqe_unknown_type;
};
@ -592,6 +593,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
void mana_query_phy_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
extern struct dentry *mana_debugfs_root;