mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
tcp: secure_seq: add back ports to TS offset
This reverts28ee1b746f("secure_seq: downgrade to per-host timestamp offsets") tcp_tw_recycle went away in 2017. Zhouyan Deng reported off-path TCP source port leakage via SYN cookie side-channel that can be fixed in multiple ways. One of them is to bring back TCP ports in TS offset randomization. As a bonus, we perform a single siphash() computation to provide both an ISN and a TS offset. Fixes:28ee1b746f("secure_seq: downgrade to per-host timestamp offsets") Reported-by: Zhouyan Deng <dengzhouyan_nwpu@163.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Acked-by: Florian Westphal <fw@strlen.de> Link: https://patch.msgid.link/20260302205527.1982836-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7f083faf59
commit
165573e41f
8 changed files with 127 additions and 108 deletions
|
|
@ -20,7 +20,6 @@
|
|||
#include <net/tcp.h>
|
||||
|
||||
static siphash_aligned_key_t net_secret;
|
||||
static siphash_aligned_key_t ts_secret;
|
||||
|
||||
#define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ)
|
||||
|
||||
|
|
@ -28,11 +27,6 @@ static __always_inline void net_secret_init(void)
|
|||
{
|
||||
net_get_random_once(&net_secret, sizeof(net_secret));
|
||||
}
|
||||
|
||||
static __always_inline void ts_secret_init(void)
|
||||
{
|
||||
net_get_random_once(&ts_secret, sizeof(ts_secret));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
|
|
@ -53,28 +47,9 @@ static u32 seq_scale(u32 seq)
|
|||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
u32 secure_tcpv6_ts_off(const struct net *net,
|
||||
const __be32 *saddr, const __be32 *daddr)
|
||||
{
|
||||
const struct {
|
||||
struct in6_addr saddr;
|
||||
struct in6_addr daddr;
|
||||
} __aligned(SIPHASH_ALIGNMENT) combined = {
|
||||
.saddr = *(struct in6_addr *)saddr,
|
||||
.daddr = *(struct in6_addr *)daddr,
|
||||
};
|
||||
|
||||
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
|
||||
return 0;
|
||||
|
||||
ts_secret_init();
|
||||
return siphash(&combined, offsetofend(typeof(combined), daddr),
|
||||
&ts_secret);
|
||||
}
|
||||
EXPORT_IPV6_MOD(secure_tcpv6_ts_off);
|
||||
|
||||
u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
|
||||
__be16 sport, __be16 dport)
|
||||
union tcp_seq_and_ts_off
|
||||
secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
|
||||
const __be32 *daddr, __be16 sport, __be16 dport)
|
||||
{
|
||||
const struct {
|
||||
struct in6_addr saddr;
|
||||
|
|
@ -87,14 +62,20 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
|
|||
.sport = sport,
|
||||
.dport = dport
|
||||
};
|
||||
u32 hash;
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
net_secret_init();
|
||||
hash = siphash(&combined, offsetofend(typeof(combined), dport),
|
||||
&net_secret);
|
||||
return seq_scale(hash);
|
||||
|
||||
st.hash64 = siphash(&combined, offsetofend(typeof(combined), dport),
|
||||
&net_secret);
|
||||
|
||||
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
|
||||
st.ts_off = 0;
|
||||
|
||||
st.seq = seq_scale(st.seq);
|
||||
return st;
|
||||
}
|
||||
EXPORT_SYMBOL(secure_tcpv6_seq);
|
||||
EXPORT_SYMBOL(secure_tcpv6_seq_and_ts_off);
|
||||
|
||||
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||
__be16 dport)
|
||||
|
|
@ -118,33 +99,30 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
|
||||
{
|
||||
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
|
||||
return 0;
|
||||
|
||||
ts_secret_init();
|
||||
return siphash_2u32((__force u32)saddr, (__force u32)daddr,
|
||||
&ts_secret);
|
||||
}
|
||||
|
||||
/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
|
||||
* but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
|
||||
* it would be easy enough to have the former function use siphash_4u32, passing
|
||||
* the arguments as separate u32.
|
||||
*/
|
||||
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
||||
__be16 sport, __be16 dport)
|
||||
union tcp_seq_and_ts_off
|
||||
secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
|
||||
__be16 sport, __be16 dport)
|
||||
{
|
||||
u32 hash;
|
||||
u32 ports = (__force u32)sport << 16 | (__force u32)dport;
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
net_secret_init();
|
||||
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
|
||||
(__force u32)sport << 16 | (__force u32)dport,
|
||||
&net_secret);
|
||||
return seq_scale(hash);
|
||||
|
||||
st.hash64 = siphash_3u32((__force u32)saddr, (__force u32)daddr,
|
||||
ports, &net_secret);
|
||||
|
||||
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
|
||||
st.ts_off = 0;
|
||||
|
||||
st.seq = seq_scale(st.seq);
|
||||
return st;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(secure_tcp_seq);
|
||||
EXPORT_SYMBOL_GPL(secure_tcp_seq_and_ts_off);
|
||||
|
||||
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -378,9 +378,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
|
|||
tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
|
||||
|
||||
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
||||
tsoff = secure_tcp_ts_off(net,
|
||||
ip_hdr(skb)->daddr,
|
||||
ip_hdr(skb)->saddr);
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
st = secure_tcp_seq_and_ts_off(net,
|
||||
ip_hdr(skb)->daddr,
|
||||
ip_hdr(skb)->saddr,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
tsoff = st.ts_off;
|
||||
tcp_opt.rcv_tsecr -= tsoff;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7646,6 +7646,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
struct sock *fastopen_sk = NULL;
|
||||
union tcp_seq_and_ts_off st;
|
||||
struct request_sock *req;
|
||||
bool want_cookie = false;
|
||||
struct dst_entry *dst;
|
||||
|
|
@ -7715,9 +7716,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||
if (!dst)
|
||||
goto drop_and_free;
|
||||
|
||||
if (tmp_opt.tstamp_ok || (!want_cookie && !isn))
|
||||
st = af_ops->init_seq_and_ts_off(net, skb);
|
||||
|
||||
if (tmp_opt.tstamp_ok) {
|
||||
tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
|
||||
tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
|
||||
tcp_rsk(req)->ts_off = st.ts_off;
|
||||
}
|
||||
if (!want_cookie && !isn) {
|
||||
int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
|
||||
|
|
@ -7739,7 +7743,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||
goto drop_and_release;
|
||||
}
|
||||
|
||||
isn = af_ops->init_seq(skb);
|
||||
isn = st.seq;
|
||||
}
|
||||
|
||||
tcp_ecn_create_request(req, skb, sk, dst);
|
||||
|
|
|
|||
|
|
@ -105,17 +105,14 @@ static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
|
|||
|
||||
static DEFINE_MUTEX(tcp_exit_batch_mutex);
|
||||
|
||||
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
|
||||
static union tcp_seq_and_ts_off
|
||||
tcp_v4_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
|
||||
{
|
||||
return secure_tcp_seq(ip_hdr(skb)->daddr,
|
||||
ip_hdr(skb)->saddr,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
}
|
||||
|
||||
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
|
||||
{
|
||||
return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
|
||||
return secure_tcp_seq_and_ts_off(net,
|
||||
ip_hdr(skb)->daddr,
|
||||
ip_hdr(skb)->saddr,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
}
|
||||
|
||||
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
||||
|
|
@ -327,15 +324,16 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len
|
|||
rt = NULL;
|
||||
|
||||
if (likely(!tp->repair)) {
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
st = secure_tcp_seq_and_ts_off(net,
|
||||
inet->inet_saddr,
|
||||
inet->inet_daddr,
|
||||
inet->inet_sport,
|
||||
usin->sin_port);
|
||||
if (!tp->write_seq)
|
||||
WRITE_ONCE(tp->write_seq,
|
||||
secure_tcp_seq(inet->inet_saddr,
|
||||
inet->inet_daddr,
|
||||
inet->inet_sport,
|
||||
usin->sin_port));
|
||||
WRITE_ONCE(tp->tsoffset,
|
||||
secure_tcp_ts_off(net, inet->inet_saddr,
|
||||
inet->inet_daddr));
|
||||
WRITE_ONCE(tp->write_seq, st.seq);
|
||||
WRITE_ONCE(tp->tsoffset, st.ts_off);
|
||||
}
|
||||
|
||||
atomic_set(&inet->inet_id, get_random_u16());
|
||||
|
|
@ -1677,8 +1675,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
|
|||
.cookie_init_seq = cookie_v4_init_sequence,
|
||||
#endif
|
||||
.route_req = tcp_v4_route_req,
|
||||
.init_seq = tcp_v4_init_seq,
|
||||
.init_ts_off = tcp_v4_init_ts_off,
|
||||
.init_seq_and_ts_off = tcp_v4_init_seq_and_ts_off,
|
||||
.send_synack = tcp_v4_send_synack,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -151,9 +151,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
|
|||
tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
|
||||
|
||||
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
||||
tsoff = secure_tcpv6_ts_off(net,
|
||||
ipv6_hdr(skb)->daddr.s6_addr32,
|
||||
ipv6_hdr(skb)->saddr.s6_addr32);
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
st = secure_tcpv6_seq_and_ts_off(net,
|
||||
ipv6_hdr(skb)->daddr.s6_addr32,
|
||||
ipv6_hdr(skb)->saddr.s6_addr32,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
tsoff = st.ts_off;
|
||||
tcp_opt.rcv_tsecr -= tsoff;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -105,18 +105,14 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 tcp_v6_init_seq(const struct sk_buff *skb)
|
||||
static union tcp_seq_and_ts_off
|
||||
tcp_v6_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
|
||||
{
|
||||
return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
|
||||
ipv6_hdr(skb)->saddr.s6_addr32,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
}
|
||||
|
||||
static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
|
||||
{
|
||||
return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
|
||||
ipv6_hdr(skb)->saddr.s6_addr32);
|
||||
return secure_tcpv6_seq_and_ts_off(net,
|
||||
ipv6_hdr(skb)->daddr.s6_addr32,
|
||||
ipv6_hdr(skb)->saddr.s6_addr32,
|
||||
tcp_hdr(skb)->dest,
|
||||
tcp_hdr(skb)->source);
|
||||
}
|
||||
|
||||
static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
|
||||
|
|
@ -320,14 +316,16 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
|
|||
sk_set_txhash(sk);
|
||||
|
||||
if (likely(!tp->repair)) {
|
||||
union tcp_seq_and_ts_off st;
|
||||
|
||||
st = secure_tcpv6_seq_and_ts_off(net,
|
||||
np->saddr.s6_addr32,
|
||||
sk->sk_v6_daddr.s6_addr32,
|
||||
inet->inet_sport,
|
||||
inet->inet_dport);
|
||||
if (!tp->write_seq)
|
||||
WRITE_ONCE(tp->write_seq,
|
||||
secure_tcpv6_seq(np->saddr.s6_addr32,
|
||||
sk->sk_v6_daddr.s6_addr32,
|
||||
inet->inet_sport,
|
||||
inet->inet_dport));
|
||||
tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
|
||||
sk->sk_v6_daddr.s6_addr32);
|
||||
WRITE_ONCE(tp->write_seq, st.seq);
|
||||
tp->tsoffset = st.ts_off;
|
||||
}
|
||||
|
||||
if (tcp_fastopen_defer_connect(sk, &err))
|
||||
|
|
@ -817,8 +815,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
|
|||
.cookie_init_seq = cookie_v6_init_sequence,
|
||||
#endif
|
||||
.route_req = tcp_v6_route_req,
|
||||
.init_seq = tcp_v6_init_seq,
|
||||
.init_ts_off = tcp_v6_init_ts_off,
|
||||
.init_seq_and_ts_off = tcp_v6_init_seq_and_ts_off,
|
||||
.send_synack = tcp_v6_send_synack,
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue