Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes in preparation for the net-next PR.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2026-02-11 15:14:35 +01:00
commit 83310d6133
38 changed files with 317 additions and 118 deletions

View file

@ -15,6 +15,7 @@ definitions:
type: enum
name: event-type
enum-name: mptcp-event-type
doc: Netlink MPTCP event types
name-prefix: mptcp-event-
entries:
-

View file

@ -1018,10 +1018,8 @@ zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
out_id = zl3073x_output_pin_out_get(pin->id);
out = zl3073x_out_state_get(zldev, out_id);
/* Convert value to ps and reverse two's complement negation applied
* during 'set'
*/
*phase_adjust = -out->phase_comp * pin->phase_gran;
/* The value in the register is expressed in half synth clock cycles. */
*phase_adjust = out->phase_comp * pin->phase_gran;
return 0;
}
@ -1043,10 +1041,8 @@ zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
out_id = zl3073x_output_pin_out_get(pin->id);
out = *zl3073x_out_state_get(zldev, out_id);
/* The value in the register is stored as two's complement negation
* of requested value and expressed in half synth clock cycles.
*/
out.phase_comp = -phase_adjust / pin->phase_gran;
/* The value in the register is expressed in half synth clock cycles. */
out.phase_comp = phase_adjust / pin->phase_gran;
/* Update output configuration from mailbox */
return zl3073x_out_state_set(zldev, out_id, &out);

View file

@ -791,26 +791,29 @@ static int bond_update_speed_duplex(struct slave *slave)
struct ethtool_link_ksettings ecmd;
int res;
slave->speed = SPEED_UNKNOWN;
slave->duplex = DUPLEX_UNKNOWN;
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
return 1;
goto speed_duplex_unknown;
if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
return 1;
goto speed_duplex_unknown;
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
return 1;
goto speed_duplex_unknown;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
return 0;
speed_duplex_unknown:
slave->speed = SPEED_UNKNOWN;
slave->duplex = DUPLEX_UNKNOWN;
return 1;
}
const char *bond_slave_link_status(s8 link)

View file

@ -284,6 +284,7 @@ static void ser_release(struct work_struct *work)
{
struct list_head list;
struct ser_device *ser, *tmp;
struct tty_struct *tty;
spin_lock(&ser_lock);
list_replace_init(&ser_release_list, &list);
@ -292,9 +293,11 @@ static void ser_release(struct work_struct *work)
if (!list_empty(&list)) {
rtnl_lock();
list_for_each_entry_safe(ser, tmp, &list, node) {
tty = ser->tty;
dev_close(ser->dev);
unregister_netdevice(ser->dev);
debugfs_deinit(ser);
tty_kref_put(tty);
}
rtnl_unlock();
}
@ -355,8 +358,6 @@ static void ldisc_close(struct tty_struct *tty)
{
struct ser_device *ser = tty->disc_data;
tty_kref_put(ser->tty);
spin_lock(&ser_lock);
list_move(&ser->node, &ser_release_list);
spin_unlock(&ser_lock);

View file

@ -165,7 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
select NET_SELFTESTS
imply NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.

View file

@ -705,14 +705,12 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue->tx_head = 0;
queue->tx_tail = 0;
queue_writel(queue, IER,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
}
}
macb_or_gem_writel(bp, NCFGR, ctrl);
@ -2954,6 +2952,7 @@ static int macb_open(struct net_device *dev)
}
bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);

View file

@ -1049,13 +1049,13 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
int order;
if (!alloc_size)
return;
goto not_init;
order = get_order(alloc_size);
if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return;
goto not_init;
}
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
@ -1093,6 +1093,13 @@ alloc_pages_error:
devm_kfree(ring_to_dev(ring), tx_spare);
devm_kzalloc_error:
ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
not_init:
/* When driver init or reset_init, the ring->tx_spare is always NULL;
* but when called from hns3_set_ringparam, it's usually not NULL, and
* will be restored if hns3_init_all_ring() failed. So it's safe to set
* ring->tx_spare to NULL here.
*/
ring->tx_spare = NULL;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer

View file

@ -307,7 +307,7 @@ static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
static int octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
{
u64 reg_val;
u64 oq_ctl = 0ULL;
@ -355,6 +355,7 @@ static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
reg_val = ((u64)time_threshold << 32) |
CFG_GET_OQ_INTR_PKT(oct->conf);
octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
return 0;
}
/* Setup registers for a PF mailbox */
@ -709,14 +710,26 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
{
u64 intr_mask = 0ULL;
u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (i = 0; i < num_rings; i++)
intr_mask |= (0x1ULL << (srn + i));
for (i = 0; i < num_rings; i++) {
intr_mask |= BIT_ULL(srn + i);
reg_val = octep_read_csr64(oct,
CN93_SDP_R_IN_INT_LEVELS(srn + i));
reg_val &= ~CN93_INT_ENA_BIT;
octep_write_csr64(oct,
CN93_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
reg_val = octep_read_csr64(oct,
CN93_SDP_R_OUT_INT_LEVELS(srn + i));
reg_val &= ~CN93_INT_ENA_BIT;
octep_write_csr64(oct,
CN93_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
}
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);

View file

@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include "octep_config.h"
#include "octep_main.h"
@ -327,12 +328,14 @@ static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
static int octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
{
u64 reg_val;
u64 oq_ctl = 0ULL;
u32 time_threshold = 0;
struct octep_oq *oq = oct->oq[oq_no];
unsigned long t_out_jiffies;
u32 time_threshold = 0;
u64 oq_ctl = 0ULL;
u64 reg_ba_val;
u64 reg_val;
oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@ -343,6 +346,36 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
}
octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), oq->max_count);
/* Wait for WMARK to get applied */
usleep_range(10, 15);
octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
oq->desc_ring_dma);
octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
oq->max_count);
reg_ba_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
if (reg_ba_val != oq->desc_ring_dma) {
t_out_jiffies = jiffies + 10 * HZ;
do {
if (reg_ba_val == ULLONG_MAX)
return -EFAULT;
octep_write_csr64(oct,
CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
oq->desc_ring_dma);
octep_write_csr64(oct,
CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
oq->max_count);
reg_ba_val =
octep_read_csr64(oct,
CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
} while ((reg_ba_val != oq->desc_ring_dma) &&
time_before(jiffies, t_out_jiffies));
if (reg_ba_val != oq->desc_ring_dma)
return -EAGAIN;
}
reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
@ -356,10 +389,6 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val |= (CNXK_R_OUT_CTL_ES_P);
octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
oq->desc_ring_dma);
octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
oq->max_count);
oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@ -385,6 +414,7 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val &= ~0xFFFFFFFFULL;
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
return 0;
}
/* Setup registers for a PF mailbox */
@ -720,14 +750,26 @@ static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
{
u64 intr_mask = 0ULL;
u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (i = 0; i < num_rings; i++)
intr_mask |= (0x1ULL << (srn + i));
for (i = 0; i < num_rings; i++) {
intr_mask |= BIT_ULL(srn + i);
reg_val = octep_read_csr64(oct,
CNXK_SDP_R_IN_INT_LEVELS(srn + i));
reg_val &= ~CNXK_INT_ENA_BIT;
octep_write_csr64(oct,
CNXK_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
reg_val = octep_read_csr64(oct,
CNXK_SDP_R_OUT_INT_LEVELS(srn + i));
reg_val &= ~CNXK_INT_ENA_BIT;
octep_write_csr64(oct,
CNXK_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
}
octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);

View file

@ -77,7 +77,7 @@ struct octep_pci_win_regs {
struct octep_hw_ops {
void (*setup_iq_regs)(struct octep_device *oct, int q);
void (*setup_oq_regs)(struct octep_device *oct, int q);
int (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
irqreturn_t (*mbox_intr_handler)(void *ioq_vector);

View file

@ -416,5 +416,6 @@ static inline u64 cn9k_pemx_pfx_csx_pfcfgx(u64 pem, u32 pf, u32 offset)
#define CN93_PEM_BAR4_INDEX 7
#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
#define CN93_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CN9K_PF_H_ */

View file

@ -413,5 +413,6 @@
#define CNXK_PEM_BAR4_INDEX 7
#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
#define CNXK_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CNXK_PF_H_ */

View file

@ -12,6 +12,8 @@
#include "octep_config.h"
#include "octep_main.h"
static void octep_oq_free_ring_buffers(struct octep_oq *oq);
static void octep_oq_reset_indices(struct octep_oq *oq)
{
oq->host_read_idx = 0;
@ -170,11 +172,15 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto oq_fill_buff_err;
octep_oq_reset_indices(oq);
oct->hw_ops.setup_oq_regs(oct, q_no);
if (oct->hw_ops.setup_oq_regs(oct, q_no))
goto oq_setup_err;
oct->num_oqs++;
return 0;
oq_setup_err:
octep_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;

View file

@ -196,7 +196,7 @@ static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
static int octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
u32 time_threshold = 0;
@ -239,6 +239,7 @@ static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
return 0;
}
/* Setup registers for a VF mailbox */

View file

@ -199,11 +199,13 @@ static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
static int octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
unsigned long t_out_jiffies;
u32 time_threshold = 0;
u64 oq_ctl = ULL(0);
u64 reg_ba_val;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
@ -214,6 +216,38 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
}
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no),
oq->max_count);
/* Wait for WMARK to get applied */
usleep_range(10, 15);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no),
oq->desc_ring_dma);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no),
oq->max_count);
reg_ba_val = octep_vf_read_csr64(oct,
CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no));
if (reg_ba_val != oq->desc_ring_dma) {
t_out_jiffies = jiffies + 10 * HZ;
do {
if (reg_ba_val == ULLONG_MAX)
return -EFAULT;
octep_vf_write_csr64(oct,
CNXK_VF_SDP_R_OUT_SLIST_BADDR
(oq_no), oq->desc_ring_dma);
octep_vf_write_csr64(oct,
CNXK_VF_SDP_R_OUT_SLIST_RSIZE
(oq_no), oq->max_count);
reg_ba_val =
octep_vf_read_csr64(oct,
CNXK_VF_SDP_R_OUT_SLIST_BADDR
(oq_no));
} while ((reg_ba_val != oq->desc_ring_dma) &&
time_before(jiffies, t_out_jiffies));
if (reg_ba_val != oq->desc_ring_dma)
return -EAGAIN;
}
reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
@ -227,8 +261,6 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
@ -250,6 +282,7 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val &= ~GENMASK_ULL(31, 0);
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
return 0;
}
/* Setup registers for a VF mailbox */

View file

@ -55,7 +55,7 @@ struct octep_vf_mmio {
struct octep_vf_hw_ops {
void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
int (*setup_oq_regs)(struct octep_vf_device *oct, int q);
void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);

View file

@ -12,6 +12,8 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq);
static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
{
oq->host_read_idx = 0;
@ -171,11 +173,15 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
goto oq_fill_buff_err;
octep_vf_oq_reset_indices(oq);
oct->hw_ops.setup_oq_regs(oct, q_no);
if (oct->hw_ops.setup_oq_regs(oct, q_no))
goto oq_setup_err;
oct->num_oqs++;
return 0;
oq_setup_err:
octep_vf_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;

View file

@ -1823,6 +1823,8 @@ static int cgx_lmac_exit(struct cgx *cgx)
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->mac_to_index_bmap.bmap);
rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
kfree(lmac->name);
kfree(lmac);
}

View file

@ -3632,11 +3632,22 @@ static void rvu_remove(struct pci_dev *pdev)
devm_kfree(&pdev->dev, rvu);
}
static void rvu_shutdown(struct pci_dev *pdev)
{
struct rvu *rvu = pci_get_drvdata(pdev);
if (!rvu)
return;
rvu_clear_rvum_blk_revid(rvu);
}
static struct pci_driver rvu_driver = {
.name = DRV_NAME,
.id_table = rvu_id_table,
.probe = rvu_probe,
.remove = rvu_remove,
.shutdown = rvu_shutdown,
};
static int __init rvu_init_module(void)

View file

@ -3315,6 +3315,7 @@ err_free_zc_bmap:
err_sriov_cleannup:
otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_unregister_dl(pf);
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);

View file

@ -78,7 +78,6 @@ static const struct pci_device_id skge_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */

View file

@ -263,9 +263,10 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
/* This means there's no module plugged in */
break;
default:
dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n",
idev->port_info->status.xcvr.pid,
idev->port_info->status.xcvr.pid);
dev_dbg_ratelimited(lif->ionic->dev,
"unknown xcvr type pid=%d / 0x%x\n",
idev->port_info->status.xcvr.pid,
idev->port_info->status.xcvr.pid);
break;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
* Copyright (C) 2025 Renesas Electronics Corporation
* Copyright (C) 2025 - 2026 Renesas Electronics Corporation
*/
#include <linux/err.h>
@ -60,6 +60,7 @@ static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
{
struct rswitch_device *rdev;
bool new_forwarding_offload;
unsigned int fwd_mask;
/* calculate fwd_mask with zeroes in bits corresponding to ports that
@ -73,8 +74,9 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
}
rswitch_for_all_ports(priv, rdev) {
if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
rdev->forwarding_offloaded) {
new_forwarding_offload = (rdev_for_l2_offload(rdev) && rdev->forwarding_requested);
if (new_forwarding_offload || rdev->forwarding_offloaded) {
/* Update allowed offload destinations even for ports
* with L2 offload enabled earlier.
*
@ -84,13 +86,10 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
priv->addr + FWPC2(rdev->port));
}
if (rdev_for_l2_offload(rdev) &&
rdev->forwarding_requested &&
!rdev->forwarding_offloaded) {
if (new_forwarding_offload && !rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, true, false);
} else if (rdev->forwarding_offloaded) {
else if (!new_forwarding_offload && rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, false, false);
}
}
}

View file

@ -91,8 +91,8 @@ static void loongson_default_data(struct pci_dev *pdev,
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
plat->clk_csr = STMMAC_CSR_20_35M;
/* clk_csr_i = 100-150MHz & MDC = clk_csr_i/62 */
plat->clk_csr = STMMAC_CSR_100_150M;
plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;

View file

@ -2551,6 +2551,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_clear_quattro;
}
/* BIGMAC may have bogus sizes */
if ((op->resource[3].end - op->resource[3].start) >= BMAC_REG_SIZE)
op->resource[3].end = op->resource[3].start + BMAC_REG_SIZE - 1;
hp->bigmacregs = devm_platform_ioremap_resource(op, 3);
if (IS_ERR(hp->bigmacregs)) {
dev_err(&op->dev, "Cannot map BIGMAC registers.\n");

View file

@ -192,6 +192,7 @@ config TI_ICSSG_PRUETH
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
depends on HSR || !HSR
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.

View file

@ -1472,7 +1472,7 @@ static void cpsw_unregister_ports(struct cpsw_common *cpsw)
for (i = 0; i < cpsw->data.slaves; i++) {
ndev = cpsw->slaves[i].ndev;
if (!ndev)
if (!ndev || ndev->reg_state != NETREG_REGISTERED)
continue;
priv = netdev_priv(ndev);
@ -1494,7 +1494,6 @@ static int cpsw_register_ports(struct cpsw_common *cpsw)
if (ret) {
dev_err(cpsw->dev,
"cpsw: err registering net device%d\n", i);
cpsw->slaves[i].ndev = NULL;
break;
}
}
@ -2003,7 +2002,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* setup netdevs */
ret = cpsw_create_ports(cpsw);
if (ret)
goto clean_unregister_netdev;
goto clean_cpts;
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
@ -2017,14 +2016,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
goto clean_unregister_netdev;
goto clean_cpts;
}
ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
goto clean_unregister_netdev;
goto clean_cpts;
}
if (!cpsw->cpts)
@ -2034,7 +2033,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching misc irq (%d)\n", ret);
goto clean_unregister_netdev;
goto clean_cpts;
}
/* Enable misc CPTS evnt_pend IRQ */
@ -2043,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
goto clean_unregister_netdev;
goto clean_cpts;
ret = cpsw_register_devlink(cpsw);
if (ret)
@ -2065,8 +2064,6 @@ skip_cpts:
clean_unregister_notifiers:
cpsw_unregister_notifiers(cpsw);
clean_unregister_netdev:
cpsw_unregister_ports(cpsw);
clean_cpts:
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);

View file

@ -790,18 +790,14 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->rx_buffer) {
dma_free_coherent(priv->dev,
RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
(RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
priv->rx_buffer, priv->dma_rx_addr);
priv->rx_buffer = NULL;
priv->dma_rx_addr = 0;
}
if (priv->tx_buffer) {
dma_free_coherent(priv->dev,
TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
priv->tx_buffer, priv->dma_tx_addr);
priv->tx_buffer = NULL;
priv->dma_tx_addr = 0;
}
}

View file

@ -11,7 +11,7 @@
#define MPTCP_PM_VER 1
/**
* enum mptcp_event_type
* enum mptcp_event_type - Netlink MPTCP event types
* @MPTCP_EVENT_UNSPEC: unused event
* @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
* good time to allocate memory and send ADD_ADDR if needed. Depending on the

View file

@ -22,6 +22,36 @@
struct atm_vcc *sigd = NULL;
/*
* find_get_vcc - validate and get a reference to a vcc pointer
* @vcc: the vcc pointer to validate
*
* This function validates that @vcc points to a registered VCC in vcc_hash.
* If found, it increments the socket reference count and returns the vcc.
* The caller must call sock_put(sk_atm(vcc)) when done.
*
* Returns the vcc pointer if valid, NULL otherwise.
*/
static struct atm_vcc *find_get_vcc(struct atm_vcc *vcc)
{
int i;
read_lock(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct sock *s;
sk_for_each(s, &vcc_hash[i]) {
if (atm_sk(s) == vcc) {
sock_hold(s);
read_unlock(&vcc_sklist_lock);
return vcc;
}
}
}
read_unlock(&vcc_sklist_lock);
return NULL;
}
static void sigd_put_skb(struct sk_buff *skb)
{
if (!sigd) {
@ -69,7 +99,14 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
msg = (struct atmsvc_msg *) skb->data;
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
vcc = *(struct atm_vcc **) &msg->vcc;
vcc = find_get_vcc(*(struct atm_vcc **)&msg->vcc);
if (!vcc) {
pr_debug("invalid vcc pointer in msg\n");
dev_kfree_skb(skb);
return -EINVAL;
}
pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);
@ -100,7 +137,16 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
case as_indicate:
vcc = *(struct atm_vcc **)&msg->listen_vcc;
/* Release the reference from msg->vcc, we'll use msg->listen_vcc instead */
sock_put(sk);
vcc = find_get_vcc(*(struct atm_vcc **)&msg->listen_vcc);
if (!vcc) {
pr_debug("invalid listen_vcc pointer in msg\n");
dev_kfree_skb(skb);
return -EINVAL;
}
sk = sk_atm(vcc);
pr_debug("as_indicate!!!\n");
lock_sock(sk);
@ -115,6 +161,8 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
/* Paired with find_get_vcc(msg->listen_vcc) above */
sock_put(sk);
return 0;
case as_close:
set_bit(ATM_VF_RELEASED, &vcc->flags);
@ -131,11 +179,15 @@ as_indicate_complete:
break;
default:
pr_alert("bad message type %d\n", (int)msg->type);
/* Paired with find_get_vcc(msg->vcc) above */
sock_put(sk);
return -EINVAL;
}
sk->sk_state_change(sk);
out:
dev_kfree_skb(skb);
/* Paired with find_get_vcc(msg->vcc) above */
sock_put(sk);
return 0;
}

View file

@ -559,6 +559,21 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
/* steal dst entry from skb_in, don't drop refcnt */
skb_dstref_steal(skb_in);
skb_dstref_restore(skb_in, orefdst);
/*
* At this point, fl4_dec.daddr should NOT be local (we
* checked fl4_dec.saddr above). However, a race condition
* may occur if the address is added to the interface
* concurrently. In that case, ip_route_input() returns a
* LOCAL route with dst.output=ip_rt_bug, which must not
* be used for output.
*/
if (!err && rt2 && rt2->rt_type == RTN_LOCAL) {
net_warn_ratelimited("detected local route for %pI4 during ICMP sending, src %pI4\n",
&fl4_dec.daddr, &fl4_dec.saddr);
dst_release(&rt2->dst);
err = -EINVAL;
}
}
if (err)
@ -1037,16 +1052,22 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
return;
}
if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
goto out;
/* IPPROTO_RAW sockets are not supposed to receive anything. */
if (protocol == IPPROTO_RAW)
goto out;
raw_icmp_error(skb, protocol, info);
ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, info);
return;
out:
__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
}
static bool icmp_tag_validation(int proto)

View file

@ -1067,6 +1067,12 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
if (reason != SKB_NOT_DROPPED_YET)
goto out;
if (nexthdr == IPPROTO_RAW) {
/* Add a more specific reason later ? */
reason = SKB_DROP_REASON_NOT_SPECIFIED;
goto out;
}
/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
Without this we will not able f.e. to make source routed
pmtu discovery.

View file

@ -1044,26 +1044,23 @@ out_free:
return ret;
}
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
static void mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool force)
{
struct mptcp_rm_list list = { .nr = 0 };
bool ret;
bool announced;
list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
ret = mptcp_remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
announced = mptcp_remove_anno_list_by_saddr(msk, addr);
if (announced || force) {
spin_lock_bh(&msk->pm.lock);
if (ret) {
__set_bit(addr->id, msk->pm.id_avail_bitmap);
if (announced)
msk->pm.add_addr_signaled--;
}
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
return ret;
}
static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
@ -1097,17 +1094,15 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
list.ids[0] = mptcp_endp_get_local_id(msk, addr);
if (remove_subflow) {
spin_lock_bh(&msk->pm.lock);
mptcp_pm_rm_subflow(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
spin_lock_bh(&msk->pm.lock);
spin_lock_bh(&msk->pm.lock);
if (remove_subflow)
mptcp_pm_rm_subflow(msk, &list);
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
__mark_subflow_endp_available(msk, list.ids[0]);
spin_unlock_bh(&msk->pm.lock);
}
else /* mark endp ID as available, e.g. Signal or MPC endp */
__set_bit(addr->id, msk->pm.id_avail_bitmap);
spin_unlock_bh(&msk->pm.lock);
if (msk->mpc_endpoint_id == entry->addr.id)
msk->mpc_endpoint_id = 0;

View file

@ -103,7 +103,7 @@ static void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn)
* It creates a unique token to identify the new mptcp connection,
* a secret local key and the initial data sequence number (idsn).
*
* Returns 0 on success.
* Return: 0 on success.
*/
int mptcp_token_new_request(struct request_sock *req)
{
@ -146,7 +146,7 @@ int mptcp_token_new_request(struct request_sock *req)
* the computed token at a later time, this is needed to process
* join requests.
*
* returns 0 on success.
* Return: 0 on success.
*/
int mptcp_token_new_connect(struct sock *ssk)
{
@ -241,7 +241,7 @@ found:
* This function returns the mptcp connection structure with the given token.
* A reference count on the mptcp socket returned is taken.
*
* returns NULL if no connection with the given token value exists.
* Return: NULL if no connection with the given token value exists.
*/
struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
{
@ -288,11 +288,13 @@ EXPORT_SYMBOL_GPL(mptcp_token_get_sock);
* @s_slot: start slot number
* @s_num: start number inside the given lock
*
* This function returns the first mptcp connection structure found inside the
* token container starting from the specified position, or NULL.
* Description:
* On successful iteration, the iterator is moved to the next position and a
* reference to the returned socket is acquired.
*
* On successful iteration, the iterator is moved to the next position and
* a reference to the returned socket is acquired.
* Return:
* The first mptcp connection structure found inside the token container
* starting from the specified position, or NULL.
*/
struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
long *s_num)

View file

@ -762,6 +762,14 @@ static void llc_shdlc_deinit(struct nfc_llc *llc)
{
struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
timer_shutdown_sync(&shdlc->connect_timer);
timer_shutdown_sync(&shdlc->t1_timer);
timer_shutdown_sync(&shdlc->t2_timer);
shdlc->t1_active = false;
shdlc->t2_active = false;
cancel_work_sync(&shdlc->sm_work);
skb_queue_purge(&shdlc->rcv_q);
skb_queue_purge(&shdlc->send_q);
skb_queue_purge(&shdlc->ack_pending_q);

View file

@ -460,7 +460,7 @@ static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
rcu_read_lock();
tmp = rcu_dereference(aead);
if (tmp)
atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
atomic_add_unless(&tmp->users, -1, lim);
rcu_read_unlock();
}

View file

@ -1650,10 +1650,9 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uad
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
* If we will make it after state is locked,
* we will have to recheck all again in any case.
*/
err = prepare_peercred(&peercred);
if (err)
goto out;
/* create new sock for complete connection */
newsk = unix_create1(net, NULL, 0, sock->type);
@ -1662,10 +1661,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uad
goto out;
}
err = prepare_peercred(&peercred);
if (err)
goto out;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
if (!skb) {

View file

@ -1367,8 +1367,8 @@ void xdisconnect(int fd)
int main_loop(void)
{
struct addrinfo *peer = NULL;
int fd = 0, ret, fd_in = 0;
struct addrinfo *peer;
struct wstate winfo;
if (cfg_input && cfg_sockopt_types.mptfo) {