Merge branch 'big-tcp-without-hbh-in-ipv6'

Alice Mikityanska says:

====================
BIG TCP without HBH in IPv6

Resubmitting after the grace period.

This series is part 1 of "BIG TCP for UDP tunnels". Due to the number of
patches, I'm splitting it into two logical parts:

* Remove hop-by-hop header for BIG TCP IPv6 to align with BIG TCP IPv4.
* Fix up things that prevent BIG TCP from working with UDP tunnels.

The current BIG TCP IPv6 code inserts a hop-by-hop extension header with
32-bit length of the packet. When the packet is encapsulated, and either
the outer or the inner protocol is IPv6, or both are IPv6, there will be
1 or 2 HBH headers that need to be dealt with. The issues that arise:

1. The drivers don't strip it, and they'd all need to know the structure
of each tunnel protocol in order to strip it correctly, also taking into
account all combinations of IPv4/IPv6 inner/outer protocols.

2. Even if (1) is implemented, it would be an additional performance
penalty per aggregated packet.

3. The skb_gso_validate_network_len check is skipped in
ip6_finish_output_gso when IP6SKB_FAKEJUMBO is set, but it seems that it
would make sense to do the actual validation, just taking into account
the length of the HBH header. When the support for tunnels is added, it
becomes trickier, because there may be one or two HBH headers, depending
on whether it's IPv6 in IPv6 or not.

At the same time, having an HBH header to store the 32-bit length is not
strictly necessary, as BIG TCP IPv4 doesn't do anything like this and
just restores the length from skb->len. The same thing can be done for
BIG TCP IPv6. Removing HBH from BIG TCP would allow to simplify the
implementation significantly, and align it with BIG TCP IPv4, which has
been a long-standing goal.
====================

Link: https://patch.msgid.link/20260205133925.526371-1-alice.kernel@fastmail.im
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2026-02-06 20:48:35 -08:00
commit 1fdad81d88
22 changed files with 61 additions and 282 deletions

View file

@ -1463,9 +1463,6 @@ netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto tx_free;
last_frag = skb_shinfo(skb)->nr_frags;
txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];

View file

@ -517,9 +517,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto tx_free;
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
@ -13881,7 +13878,6 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
struct hop_jumbo_hdr *jhdr;
int hdr_count = 0;
u8 *nexthdr;
int start;
@ -13910,24 +13906,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
if (hdrlen > 64)
return false;
/* The ext header may be a hop-by-hop header inserted for
* big TCP purposes. This will be removed before sending
* from NIC, so do not count it.
*/
if (*nexthdr == NEXTHDR_HOP) {
if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
goto increment_hdr;
jhdr = (struct hop_jumbo_hdr *)hp;
if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
jhdr->nexthdr != IPPROTO_TCP)
goto increment_hdr;
goto next_hdr;
}
increment_hdr:
hdr_count++;
next_hdr:
nexthdr = &hp->nexthdr;
start += hdrlen;
}

View file

@ -963,9 +963,6 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto drop;
if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers

View file

@ -2156,9 +2156,6 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto out_drop;
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))

View file

@ -636,28 +636,20 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
void **pfrag,
int *hopbyhop)
void **pfrag)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
*lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
* mainly so that tcpdump can dissect them.
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
*lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@ -884,7 +876,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@ -893,7 +884,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@ -903,7 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr, &hopbyhop);
&inline_ok, &fragptr);
if (unlikely(!real_size))
goto tx_drop_count;
@ -956,7 +946,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@ -1021,31 +1011,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
/* Copy headers;
* note that we already verified that it is linear
*/
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
if (unlikely(hopbyhop)) {
/* remove the HBH header.
* Layout: [Ethernet header][IPv6 header][HBH][TCP header]
*/
memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
h6->nexthdr = IPPROTO_TCP;
/* Copy the TCP header after the IPv6 one */
memcpy(h6 + 1,
skb->data + ETH_HLEN + sizeof(*h6) +
sizeof(struct hop_jumbo_hdr),
tcp_hdrlen(skb));
/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
} else {
/* Copy headers;
* note that we already verified that it is linear
*/
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
}
ring->tso_packets++;
i = shinfo->gso_segs;

View file

@ -152,12 +152,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
* to inline later in the transmit descriptor
*/
static inline u16
mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
*hopbyhop = 0;
if (skb->encapsulation) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
@ -167,17 +166,12 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
else
ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
}
}
stats->tso_packets++;
stats->tso_bytes += skb->len - ihs - *hopbyhop;
stats->tso_bytes += skb->len - ihs;
}
return ihs;
@ -239,7 +233,6 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@ -275,16 +268,14 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
int hopbyhop;
u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
.headlen = skb_headlen(skb) - ihs - hopbyhop,
.hopbyhop = hopbyhop,
.headlen = skb_headlen(skb) - ihs,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@ -439,7 +430,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
u16 ihs = attr->ihs;
struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@ -456,28 +446,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (ihs) {
u8 *start = eseg->inline_hdr.start;
if (unlikely(attr->hopbyhop)) {
/* remove the HBH header.
* Layout: [Ethernet header][IPv6 header][HBH][TCP header]
*/
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
ihs += VLAN_HLEN;
h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
} else {
unsafe_memcpy(start, skb->data,
ETH_HLEN + sizeof(*h6),
MLX5_UNSAFE_MEMCPY_DISCLAIMER);
h6 = (struct ipv6hdr *)(start + ETH_HLEN);
}
h6->nexthdr = IPPROTO_TCP;
/* Copy the TCP header after the IPv6 one */
memcpy(h6 + 1,
skb->data + ETH_HLEN + sizeof(*h6) +
sizeof(struct hop_jumbo_hdr),
tcp_hdrlen(skb));
/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
} else if (skb_vlan_tag_present(skb)) {
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(start, skb, ihs);
ihs += VLAN_HLEN;
stats->added_vlan_packets++;
@ -491,7 +460,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@ -1019,34 +988,14 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
if (unlikely(attr.hopbyhop)) {
struct ipv6hdr *h6;
/* remove the HBH header.
* Layout: [Ethernet header][IPv6 header][HBH][TCP header]
*/
unsafe_memcpy(eseg->inline_hdr.start, skb->data,
ETH_HLEN + sizeof(*h6),
MLX5_UNSAFE_MEMCPY_DISCLAIMER);
h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
h6->nexthdr = IPPROTO_TCP;
/* Copy the TCP header after the IPv6 one */
unsafe_memcpy(h6 + 1,
skb->data + ETH_HLEN + sizeof(*h6) +
sizeof(struct hop_jumbo_hdr),
tcp_hdrlen(skb),
MLX5_UNSAFE_MEMCPY_DISCLAIMER);
/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
} else {
unsafe_memcpy(eseg->inline_hdr.start, skb->data,
attr.ihs,
MLX5_UNSAFE_MEMCPY_DISCLAIMER);
}
unsafe_memcpy(eseg->inline_hdr.start, skb->data,
attr.ihs,
MLX5_UNSAFE_MEMCPY_DISCLAIMER);
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;

View file

@ -355,9 +355,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto tx_drop_count;
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;

View file

@ -126,6 +126,28 @@ static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
skb_network_header_len(skb);
}
static inline unsigned int
ipv6_payload_len(const struct sk_buff *skb, const struct ipv6hdr *ip6)
{
u32 len = ntohs(ip6->payload_len);
return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
len :
skb->len - skb_network_offset(skb) - sizeof(struct ipv6hdr);
}
static inline unsigned int skb_ipv6_payload_len(const struct sk_buff *skb)
{
return ipv6_payload_len(skb, ipv6_hdr(skb));
}
#define IPV6_MAXPLEN 65535
static inline void ipv6_set_payload_len(struct ipv6hdr *ip6, unsigned int len)
{
ip6->payload_len = len <= IPV6_MAXPLEN ? htons(len) : 0;
}
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@ -155,7 +177,6 @@ struct inet6_skb_parm {
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
#define IP6SKB_MCROUTE 2048
};

View file

@ -25,8 +25,6 @@ struct ip_tunnel_info;
#define SIN6_LEN_RFC2133 24
#define IPV6_MAXPLEN 65535
/*
* NextHeader field of IPv6 header
*/
@ -151,17 +149,6 @@ struct frag_hdr {
__be32 identification;
};
/*
* Jumbo payload option, as described in RFC 2675 2.
*/
struct hop_jumbo_hdr {
u8 nexthdr;
u8 hdrlen;
u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
u8 tlv_len; /* 4 */
__be32 jumbo_payload_len;
};
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
@ -464,72 +451,6 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);
/* This helper is specialized for BIG TCP needs.
* It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
* It assumes headers are already in skb->head.
* Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there.
*/
static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
{
const struct hop_jumbo_hdr *jhdr;
const struct ipv6hdr *nhdr;
if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
return 0;
if (skb->protocol != htons(ETH_P_IPV6))
return 0;
if (skb_network_offset(skb) +
sizeof(struct ipv6hdr) +
sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
return 0;
nhdr = ipv6_hdr(skb);
if (nhdr->nexthdr != NEXTHDR_HOP)
return 0;
jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
jhdr->nexthdr != IPPROTO_TCP)
return 0;
return jhdr->nexthdr;
}
/* Return 0 if HBH header is successfully removed
* Or if HBH removal is unnecessary (packet is not big TCP)
* Return error to indicate dropping the packet
*/
static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
{
const int hophdr_len = sizeof(struct hop_jumbo_hdr);
int nexthdr = ipv6_has_hopopt_jumbo(skb);
struct ipv6hdr *h6;
if (!nexthdr)
return 0;
if (skb_cow_head(skb, 0))
return -1;
/* Remove the HBH header.
* Layout: [Ethernet header][IPv6 header][HBH][L4 Header]
*/
memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb),
skb_network_header(skb) - skb_mac_header(skb) +
sizeof(struct ipv6hdr));
__skb_pull(skb, hophdr_len);
skb->network_header += hophdr_len;
skb->mac_header += hophdr_len;
h6 = ipv6_hdr(skb);
h6->nexthdr = nexthdr;
return 0;
}
static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);

View file

@ -42,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
if (ip6h->version != 6)
return -1;
pkt_len = ntohs(ip6h->payload_len);
pkt_len = ipv6_payload_len(pkt->skb, ip6h);
skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
@ -86,7 +86,7 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
if (ip6h->version != 6)
goto inhdr_error;
pkt_len = ntohs(ip6h->payload_len);
pkt_len = ipv6_payload_len(pkt->skb, ip6h);
if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
idev = __in6_dev_get(nft_in(pkt));
__IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);

View file

@ -58,7 +58,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
if (hdr->version != 6)
goto inhdr_error;
pkt_len = ntohs(hdr->payload_len);
pkt_len = ipv6_payload_len(skb, hdr);
if (hdr->nexthdr == NEXTHDR_HOP && nf_ip6_check_hbh_len(skb, &pkt_len))
goto drop;

View file

@ -229,7 +229,7 @@ static int nf_ct_br_ipv6_check(const struct sk_buff *skb)
if (hdr->version != 6)
return -1;
len = ntohs(hdr->payload_len) + sizeof(struct ipv6hdr) + nhoff;
len = ipv6_payload_len(skb, hdr) + sizeof(struct ipv6hdr) + nhoff;
if (skb->len < len)
return -1;
@ -269,7 +269,7 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NF_ACCEPT;
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
len = sizeof(struct ipv6hdr) + skb_ipv6_payload_len(skb);
if (pskb_trim_rcsum(skb, len))
return NF_ACCEPT;

View file

@ -3813,8 +3813,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
skb_transport_header_was_set(skb) &&
skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
!ipv6_has_hopopt_jumbo(skb))
skb_network_header_len(skb) != sizeof(struct ipv6hdr))
features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
return features;
@ -3917,8 +3916,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
!ipv6_has_hopopt_jumbo(skb))
skb_network_header_len(skb) != sizeof(struct ipv6hdr))
goto sw_checksum;
switch (skb->csum_offset) {

View file

@ -115,8 +115,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
(p->protocol == htons(ETH_P_IPV6) &&
skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
p->encapsulation)
return -E2BIG;
}

View file

@ -262,7 +262,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
pkt_len = ntohs(hdr->payload_len);
pkt_len = ipv6_payload_len(skb, hdr);
/* pkt_len may be zero if Jumbo payload option is present */
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {

View file

@ -94,7 +94,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
const struct net_offload *ops;
int proto, err;
int proto;
struct frag_hdr *fptr;
unsigned int payload_len;
u8 *prevhdr;
@ -104,9 +104,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
bool gso_partial;
skb_reset_network_header(skb);
err = ipv6_hopopt_jumbo_remove(skb);
if (err)
return ERR_PTR(err);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
@ -336,41 +333,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
const struct net_offload *ops;
struct ipv6hdr *iph;
int err = -ENOSYS;
u32 payload_len;
if (skb->encapsulation) {
skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
skb_set_inner_network_header(skb, nhoff);
}
payload_len = skb->len - nhoff - sizeof(*iph);
if (unlikely(payload_len > IPV6_MAXPLEN)) {
struct hop_jumbo_hdr *hop_jumbo;
int hoplen = sizeof(*hop_jumbo);
/* Move network header left */
memmove(skb_mac_header(skb) - hoplen, skb_mac_header(skb),
skb->transport_header - skb->mac_header);
skb->data -= hoplen;
skb->len += hoplen;
skb->mac_header -= hoplen;
skb->network_header -= hoplen;
iph = (struct ipv6hdr *)(skb->data + nhoff);
hop_jumbo = (struct hop_jumbo_hdr *)(iph + 1);
/* Build hop-by-hop options */
hop_jumbo->nexthdr = iph->nexthdr;
hop_jumbo->hdrlen = 0;
hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
hop_jumbo->tlv_len = 4;
hop_jumbo->jumbo_payload_len = htonl(payload_len + hoplen);
iph->nexthdr = NEXTHDR_HOP;
iph->payload_len = 0;
} else {
iph = (struct ipv6hdr *)(skb->data + nhoff);
iph->payload_len = htons(payload_len);
}
iph = (struct ipv6hdr *)(skb->data + nhoff);
ipv6_set_payload_len(iph, skb->len - nhoff - sizeof(*iph));
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);

View file

@ -179,8 +179,7 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
static int ip6_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *skb, unsigned int mtu)
{
if (unlikely(!(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
!skb_gso_validate_network_len(skb, mtu)))
if (unlikely(!skb_gso_validate_network_len(skb, mtu)))
return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
return ip6_finish_output2(net, sk, skb);
@ -273,8 +272,6 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
struct inet6_dev *idev = ip6_dst_idev(dst);
struct hop_jumbo_hdr *hop_jumbo;
int hoplen = sizeof(*hop_jumbo);
struct net *net = sock_net(sk);
unsigned int head_room;
struct net_device *dev;
@ -287,7 +284,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
rcu_read_lock();
dev = dst_dev_rcu(dst);
head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
@ -313,19 +310,8 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
&fl6->saddr);
}
if (unlikely(seg_len > IPV6_MAXPLEN)) {
hop_jumbo = __skb_push(skb, hoplen);
hop_jumbo->nexthdr = proto;
hop_jumbo->hdrlen = 0;
hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
hop_jumbo->tlv_len = 4;
hop_jumbo->jumbo_payload_len = htonl(seg_len + hoplen);
proto = IPPROTO_HOPOPTS;
if (unlikely(seg_len > IPV6_MAXPLEN))
seg_len = 0;
IP6CB(skb)->flags |= IP6SKB_FAKEJUMBO;
}
__skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);

View file

@ -125,12 +125,7 @@ EXPORT_SYMBOL(ip6_dst_hoplimit);
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
ipv6_set_payload_len(ipv6_hdr(skb), skb->len - sizeof(struct ipv6hdr));
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
/* if egress device is enslaved to an L3 master device pass the

View file

@ -949,7 +949,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
*next_protocol = IPPROTO_IPV6;
if (payload_len)
*payload_len =
ntohs(old_ipv6h->payload_len) +
ipv6_payload_len(skb, old_ipv6h) +
sizeof(*old_ipv6h);
old_dsfield = ipv6_get_dsfield(old_ipv6h);
*ttl = old_ipv6h->hop_limit;

View file

@ -121,7 +121,7 @@ int nf_ct_skb_network_trim(struct sk_buff *skb, int family)
len = skb_ip_totlen(skb);
break;
case NFPROTO_IPV6:
len = ntohs(ipv6_hdr(skb)->payload_len);
len = skb_ipv6_payload_len(skb);
if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) {
int err = nf_ip6_check_hbh_len(skb, &len);

View file

@ -561,7 +561,7 @@ dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
nf_log_buf_add(m, "LEN=%zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
ipv6_payload_len(skb, ih) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));

View file

@ -1279,7 +1279,7 @@ static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
continue;
seglen = ntohs(ipv6h_check->payload_len);
seglen = ipv6_payload_len(skb, ipv6h_check);
} else {
WARN_ON(1); /* shouldn't happen */
continue;