xfrm: Use nested-BH locking for nat_keepalive_sk_ipv[46]

nat_keepalive_sk_ipv[46] is a per-CPU variable and relies on disabled BH
for its locking. Without per-CPU locking in local_bh_disable() on
PREEMPT_RT this data structure requires explicit locking.

Use sock_bh_locked which has a sock pointer and a local_lock_t. Use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.

Cc: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20250512092736.229935-7-bigeasy@linutronix.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Sebastian Andrzej Siewior 2025-05-12 11:27:27 +02:00 committed by Paolo Abeni
parent b9eef3391d
commit 9c607d4b65

View file

@ -9,9 +9,13 @@
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4);
static DEFINE_PER_CPU(struct sock_bh_locked, nat_keepalive_sk_ipv4) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
#if IS_ENABLED(CONFIG_IPV6)
static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6);
static DEFINE_PER_CPU(struct sock_bh_locked, nat_keepalive_sk_ipv6) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
#endif
struct nat_keepalive {
@ -56,10 +60,12 @@ static int nat_keepalive_send_ipv4(struct sk_buff *skb,
skb_dst_set(skb, &rt->dst);
sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4);
local_lock_nested_bh(&nat_keepalive_sk_ipv4.bh_lock);
sk = this_cpu_read(nat_keepalive_sk_ipv4.sock);
sock_net_set(sk, net);
err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos);
sock_net_set(sk, &init_net);
local_unlock_nested_bh(&nat_keepalive_sk_ipv4.bh_lock);
return err;
}
@ -89,15 +95,19 @@ static int nat_keepalive_send_ipv6(struct sk_buff *skb,
fl6.fl6_sport = ka->encap_sport;
fl6.fl6_dport = ka->encap_dport;
sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6);
local_lock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
sk = this_cpu_read(nat_keepalive_sk_ipv6.sock);
sock_net_set(sk, net);
dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL);
if (IS_ERR(dst))
if (IS_ERR(dst)) {
local_unlock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
return PTR_ERR(dst);
}
skb_dst_set(skb, dst);
err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0);
sock_net_set(sk, &init_net);
local_unlock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
return err;
}
#endif
@ -202,7 +212,7 @@ static void nat_keepalive_work(struct work_struct *work)
(ctx.next_run - ctx.now) * HZ);
}
static int nat_keepalive_sk_init(struct sock * __percpu *socks,
static int nat_keepalive_sk_init(struct sock_bh_locked __percpu *socks,
unsigned short family)
{
struct sock *sk;
@ -214,22 +224,22 @@ static int nat_keepalive_sk_init(struct sock * __percpu *socks,
if (err < 0)
goto err;
*per_cpu_ptr(socks, i) = sk;
per_cpu_ptr(socks, i)->sock = sk;
}
return 0;
err:
for_each_possible_cpu(i)
inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
inet_ctl_sock_destroy(per_cpu_ptr(socks, i)->sock);
return err;
}
static void nat_keepalive_sk_fini(struct sock * __percpu *socks)
static void nat_keepalive_sk_fini(struct sock_bh_locked __percpu *socks)
{
int i;
for_each_possible_cpu(i)
inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
inet_ctl_sock_destroy(per_cpu_ptr(socks, i)->sock);
}
void xfrm_nat_keepalive_state_updated(struct xfrm_state *x)