static int tcp_try_undo_recovery(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_may_undo(tp)) { // 如果可以undo
/* Happy end! We did not retransmit anything
* or our original transmission succeeded.
*/
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(sk, 1); // 具体处理
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
else
NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
/* Hold old state until something *above* high_seq
* is ACKed. For Reno it is MUST to prevent false
* fast retransmits (RFC2582). SACK TCP is safe. */
tcp_moderate_cwnd(tp); // 更新窗口大小
return 1;
}
tcp_set_ca_state(sk, TCP_CA_Open);
return 0;
}
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
static void tcp_try_undo_dsack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && !tp->undo_retrans) { // 所有的段都被确认了
DBGUNDO(sk, "D-SACK");
tcp_undo_cwr(sk, 1); // 撤销(1)
tp->undo_marker = 0;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
}
}
撤销函数
1234567891011121314151617181920212223242526
static void tcp_undo_cwr(struct sock *sk, const int undo)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->prior_ssthresh) { // 如果保存了旧的门限值
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->undo_cwnd)
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); // 这个函数可以自己添加
else
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); // 如果没有定义那个函数,那么做简单的处理
if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
TCP_ECN_withdraw_cwr(tp);
}
} else { // 没有保存旧的阈值
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
}
tcp_moderate_cwnd(tp); // 上面已经说了
tp->snd_cwnd_stamp = tcp_time_stamp;
/* There is something screwy going on with the retrans hints after
an undo */
tcp_clear_all_retrans_hints(tp); // 清空所有的重传信息
}
接收到重复的ACK,那么需要对sacked_out处理,看函数tcp_add_reno_sack:
123456789
/* Emulate SACKs for SACKless connection: account for a new dupack. */
static void tcp_add_reno_sack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->sacked_out++; // 收到重复的ACK,那么这个值++
tcp_check_reno_reordering(sk, 0); // 检查是否有reordering(1)
tcp_verify_left_out(tp);
}
看看这个检查reordering函数:
12345678910
/* If we receive more dupacks than we expected counting segments
* in assumption of absent reordering, interpret this as reordering.
* The only another reason could be bug in receiver TCP.
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_limit_reno_sacked(tp)) // 检查sack的数量是否超过限度
tcp_update_reordering(sk, tp->packets_out + addend, 0); // 如果是reordering则更新reordering
}
12345678910111213141516
/* Limits sacked_out so that sum with lost_out isn't ever larger than
* packets_out. Returns zero if sacked_out adjustement wasn't necessary.
*/
int tcp_limit_reno_sacked(struct tcp_sock *tp) // 限制sacked_out目的是使得sacked_out + lost_out <= packeted_out
{
u32 holes;
holes = max(tp->lost_out, 1U); // 获得hole
holes = min(holes, tp->packets_out);
if ((tp->sacked_out + holes) > tp->packets_out) { // 如果大于发出的包,那么reordering就需要了
tp->sacked_out = tp->packets_out - holes; // 因为此处的dup-ack是reorder造成的
return 1;
}
return 0;
}
下面看看更新reordering函数tcp_update_reordering:
123456789101112131415161718192021222324252627
static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
if (metric > tp->reordering) { // 如果现在的数量 > 之前的reorder
tp->reordering = min(TCP_MAX_REORDERING, metric); // 获得ordering值(注意不能超过最大设置值)
/* This exciting event is worth to be remembered. 8) */
if (ts)
NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); // 统计信息
else if (tcp_is_reno(tp))
NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
else if (tcp_is_fack(tp))
NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
else
NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
tp->reordering,
tp->fackets_out,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp); // 禁用fack(fack是基于有序的,因为已经使用order了,所以禁用fack)
}
}
/* Undo during fast recovery after partial ACK. */
static int tcp_try_undo_partial(struct sock *sk, int acked)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Partial ACK arrived. Force Hoe's retransmit. */ // 收到部分ACK,对于SACK来说不需要重传,对于RENO需要
int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); // 或者facked_out数量比reordering要大
if (tcp_may_undo(tp)) { // 是否可以调整(上面已说)
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
if (tp->retrans_out == 0) // 重传包=0
tp->retrans_stamp = 0; // 重置重传时间
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); // 需要更新reordering( 上面 )
DBGUNDO(sk, "Hoe");
tcp_undo_cwr(sk, 0); // 撤销操作( 上面 )
NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
* ones are most probably delayed as well.
*/
failed = 0; // 表示不用重传了,可以发送新的数据
}
return failed; // 返回是否需要重传
}