tcp: move tcp_rate_skb_delivered() to tcp_input.c
tcp_rate_skb_delivered() is only called from tcp_input.c. Move it there and make it static. Both gcc and clang are (auto)inlining it, TCP performance is increased at a small space cost. $ scripts/bloat-o-meter -t vmlinux.old vmlinux.new add/remove: 0/2 grow/shrink: 3/0 up/down: 509/-187 (322) Function old new delta tcp_sacktag_walk 1682 1867 +185 tcp_ack 5230 5405 +175 tcp_shifted_skb 437 586 +149 __pfx_tcp_rate_skb_delivered 16 - -16 tcp_rate_skb_delivered 171 - -171 Total: Before=22566192, After=22566514, chg +0.00% Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Link: https://patch.msgid.link/20260118123204.2315993-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
2d265e2fe6
commit
670ade3bfa
|
|
@ -1356,8 +1356,6 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
|
|||
void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
|
||||
|
||||
/* From tcp_rate.c */
|
||||
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
||||
struct rate_sample *rs);
|
||||
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
|
||||
bool is_sack_reneg, struct rate_sample *rs);
|
||||
void tcp_rate_check_app_limited(struct sock *sk);
|
||||
|
|
|
|||
|
|
@ -1637,6 +1637,50 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
|||
return sacked;
|
||||
}
|
||||
|
||||
/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
|
||||
* delivery information when the skb was last transmitted.
|
||||
*
|
||||
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
|
||||
* called multiple times. We favor the information from the most recently
|
||||
* sent skb, i.e., the skb with the most recently sent time and the highest
|
||||
* sequence.
|
||||
*/
|
||||
static void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
||||
struct rate_sample *rs)
|
||||
{
|
||||
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u64 tx_tstamp;
|
||||
|
||||
if (!scb->tx.delivered_mstamp)
|
||||
return;
|
||||
|
||||
tx_tstamp = tcp_skb_timestamp_us(skb);
|
||||
if (!rs->prior_delivered ||
|
||||
tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
|
||||
scb->end_seq, rs->last_end_seq)) {
|
||||
rs->prior_delivered_ce = scb->tx.delivered_ce;
|
||||
rs->prior_delivered = scb->tx.delivered;
|
||||
rs->prior_mstamp = scb->tx.delivered_mstamp;
|
||||
rs->is_app_limited = scb->tx.is_app_limited;
|
||||
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
|
||||
rs->last_end_seq = scb->end_seq;
|
||||
|
||||
/* Record send time of most recently ACKed packet: */
|
||||
tp->first_tx_mstamp = tx_tstamp;
|
||||
/* Find the duration of the "send phase" of this window: */
|
||||
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
|
||||
scb->tx.first_tx_mstamp);
|
||||
|
||||
}
|
||||
/* Mark off the skb delivered once it's sacked to avoid being
|
||||
* used again when it's cumulatively acked. For acked packets
|
||||
* we don't need to reset since it'll be freed soon.
|
||||
*/
|
||||
if (scb->sacked & TCPCB_SACKED_ACKED)
|
||||
scb->tx.delivered_mstamp = 0;
|
||||
}
|
||||
|
||||
/* Shift newly-SACKed bytes from this skb to the immediately previous
|
||||
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -34,50 +34,6 @@
|
|||
* ready to send in the write queue.
|
||||
*/
|
||||
|
||||
/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
|
||||
* delivery information when the skb was last transmitted.
|
||||
*
|
||||
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
|
||||
* called multiple times. We favor the information from the most recently
|
||||
* sent skb, i.e., the skb with the most recently sent time and the highest
|
||||
* sequence.
|
||||
*/
|
||||
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
||||
struct rate_sample *rs)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
||||
u64 tx_tstamp;
|
||||
|
||||
if (!scb->tx.delivered_mstamp)
|
||||
return;
|
||||
|
||||
tx_tstamp = tcp_skb_timestamp_us(skb);
|
||||
if (!rs->prior_delivered ||
|
||||
tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
|
||||
scb->end_seq, rs->last_end_seq)) {
|
||||
rs->prior_delivered_ce = scb->tx.delivered_ce;
|
||||
rs->prior_delivered = scb->tx.delivered;
|
||||
rs->prior_mstamp = scb->tx.delivered_mstamp;
|
||||
rs->is_app_limited = scb->tx.is_app_limited;
|
||||
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
|
||||
rs->last_end_seq = scb->end_seq;
|
||||
|
||||
/* Record send time of most recently ACKed packet: */
|
||||
tp->first_tx_mstamp = tx_tstamp;
|
||||
/* Find the duration of the "send phase" of this window: */
|
||||
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
|
||||
scb->tx.first_tx_mstamp);
|
||||
|
||||
}
|
||||
/* Mark off the skb delivered once it's sacked to avoid being
|
||||
* used again when it's cumulatively acked. For acked packets
|
||||
* we don't need to reset since it'll be freed soon.
|
||||
*/
|
||||
if (scb->sacked & TCPCB_SACKED_ACKED)
|
||||
scb->tx.delivered_mstamp = 0;
|
||||
}
|
||||
|
||||
/* Update the connection delivery information and generate a rate sample. */
|
||||
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
|
||||
bool is_sack_reneg, struct rate_sample *rs)
|
||||
|
|
|
|||
Loading…
Reference in New Issue