udp: udplite is unlikely
Add some unlikely() annotations to speed up the fast path, at least with clang compiler. Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20260105101719.2378881-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
e4bc5dd53b
commit
e9cd04b281
|
|
@ -236,7 +236,7 @@ static inline void udp_allow_gso(struct sock *sk)
|
|||
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
|
||||
#endif
|
||||
|
||||
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
|
||||
#define IS_UDPLITE(__sk) (unlikely(__sk->sk_protocol == IPPROTO_UDPLITE))
|
||||
|
||||
static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -527,18 +527,18 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
|
|||
* SNMP statistics for UDP and UDP-Lite
|
||||
*/
|
||||
#define UDP_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
if (unlikely(is_udplite)) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
|
||||
#define __UDP_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
|
||||
|
||||
#define __UDP6_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
|
||||
if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
|
||||
else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
|
||||
} while(0)
|
||||
#define UDP6_INC_STATS(net, field, __lite) do { \
|
||||
if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
|
||||
if (unlikely(__lite)) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
|
||||
else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
|
||||
} while(0)
|
||||
|
||||
|
|
|
|||
|
|
@ -1193,7 +1193,7 @@ csum_partial:
|
|||
|
||||
send:
|
||||
err = ip_send_skb(sock_net(sk), skb);
|
||||
if (err) {
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOBUFS &&
|
||||
!inet_test_bit(RECVERR, sk)) {
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
|
|
@ -2429,7 +2429,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
|
|||
/*
|
||||
* UDP-Lite specific tests, ignored on UDP sockets
|
||||
*/
|
||||
if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (unlikely(udp_test_bit(UDPLITE_RECV_CC, sk) &&
|
||||
UDP_SKB_CB(skb)->partial_cov)) {
|
||||
u16 pcrlen = READ_ONCE(up->pcrlen);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -875,7 +875,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
|
|||
/*
|
||||
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
|
||||
*/
|
||||
if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (unlikely(udp_test_bit(UDPLITE_RECV_CC, sk) &&
|
||||
UDP_SKB_CB(skb)->partial_cov)) {
|
||||
u16 pcrlen = READ_ONCE(up->pcrlen);
|
||||
|
||||
if (pcrlen == 0) { /* full coverage was set */
|
||||
|
|
@ -1439,7 +1440,7 @@ csum_partial:
|
|||
|
||||
send:
|
||||
err = ip6_send_skb(skb);
|
||||
if (err) {
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
|
|
|
|||
Loading…
Reference in New Issue