kk Blog —— 通用基础


date [-d @int|str] [+%s|"+%F %T"]
netstat -ltunp
sar -n DEV 1

TCP的定时器系列 — 保活定时器

http://blog.csdn.net/zhangskd/article/details/44177475

主要内容:保活定时器的实现,TCP_USER_TIMEOUT选项的实现。
内核版本:3.15.2

原理

HTTP有Keepalive功能,TCP也有Keepalive功能,虽然都叫Keepalive,但是它们的目的却是不一样的。为了说明这一点,先来看下长连接和短连接的定义。

连接的“长短”是什么?
短连接:建立一条连接,传输一个请求,马上关闭连接。
长连接:建立一条连接,传输一个请求,过会儿,又传输若干个请求,最后再关闭连接。

长连接的好处是显而易见的,多个请求可以复用一条连接,省去连接建立和释放的时间开销和系统调用,但也意味着服务器的一部分资源会被长时间占用着。

HTTP的Keepalive,顾名思义,目的在于延长连接的时间,以便在同一条连接中传输多个HTTP请求。

HTTP服务器一般会提供Keepalive Timeout参数,用来决定连接保持多久,什么时候关闭连接。

当连接使用了Keepalive功能时,对于客户端发送过来的一个请求,服务器端会发送一个响应,然后开始计时,如果经过Timeout时间后,客户端没有再发送请求过来,服务器端就把连接关了,不再保持连接了。

TCP的Keepalive,是挂羊头卖狗肉的,目的在于看看对方有没有发生异常,如果有异常就及时关闭连接。

当传输双方不主动关闭连接时,就算双方没有交换任何数据,连接也是一直有效的。

如果这个时候对端、中间网络出现异常而导致连接不可用,本端如何得知这一信息呢?

答案就是保活定时器。它每隔一段时间会超时,超时后会检查连接是否空闲太久了,如果空闲的时间超过了设置时间,就会发送探测报文。然后通过对端是否响应、响应是否符合预期,来判断对端是否正常,如果不正常,就主动关闭连接,而不用等待HTTP层的关闭了。

当服务器发送探测报文时,客户端可能处于4种不同的情况:仍然正常运行、已经崩溃、已经崩溃并重启了、由于中间链路问题不可达。在不同的情况下,服务器会得到不一样的反馈。

(1) 客户主机依然正常运行,并且从服务器端可达

客户端的TCP响应正常,从而服务器端知道对方是正常的。保活定时器会在两小时以后继续触发。

(2) 客户主机已经崩溃,并且关闭或者正在重新启动

客户端的TCP没有响应,服务器没有收到对探测包的响应,此后每隔75s发送探测报文,一共发送9次。

socket函数会返回-1,errno设置为ETIMEDOUT,表示连接超时。

(3) 客户主机已经崩溃,并且重新启动了

客户端的TCP发送RST,服务器端收到后关闭此连接。

socket函数会返回-1,errno设置为ECONNRESET,表示连接被对端复位了。

(4) 客户主机依然正常运行,但是从服务器不可达

双方的反应和第二种是一样的,因为服务器不能区分对端异常与中间链路异常。

socket函数会返回-1,errno设置为EHOSTUNREACH,表示对端不可达。

选项

内核默认并不使用TCP Keepalive功能,除非用户设置了SO_KEEPALIVE选项。

有两种方式可以自行调整保活定时器的参数:一种是修改TCP参数,一种是使用TCP层选项。

(1) TCP参数

tcp_keepalive_time

最后一次数据交换到TCP发送第一个保活探测报文的时间,即允许连接空闲的时间,默认为7200s。

tcp_keepalive_intvl

保活探测报文的重传时间,默认为75s。

tcp_keepalive_probes

保活探测报文的发送次数,默认为9次。

Q:一次完整的保活探测需要花费多长时间?

A:tcp_keepalive_time + tcp_keepalive_intvl * tcp_keepalive_probes,默认值为7875s。如果觉得两个多小时太长了,可以自行调整上述参数。

(2) TCP层选项

TCP_KEEPIDLE:含义同tcp_keepalive_time。

TCP_KEEPINTVL:含义同tcp_keepalive_intvl。

TCP_KEEPCNT:含义同tcp_keepalive_probes。

Q:既然有了TCP参数可供调整,为什么还增加了上述的TCP层选项?

A:TCP参数是面向本机的所有TCP连接,一旦调整了,对所有的连接都有效。而TCP层选项是面向一条连接的,一旦调整了,只对本条连接有效。

激活

在连接建立后,可以通过设置SO_KEEPALIVE选项,来激活保活定时器。

1
2
int keepalive = 1;
setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &keepalive, sizeof(keepalive));
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval,   
	unsigned int optlen)  
{  
	...  
	case SO_KEEPALIVE:  
#ifdef CONFIG_INET  
		if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM)  
			tcp_set_keepalive(sk, valbool); /* 激活或删除保活定时器 */  
#endif  
		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); /* 设置或取消SOCK_KEEPOPEN标志位 */  
		break;  
	...  
}  
  
static inline void sock_valbool_flag (struct sock *sk, int bit, int valbool)  
{  
	if (valbool)  
		sock_set_flag(sk, bit);  
	else  
		sock_reset_flag(sk, bit);  
}  
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
void tcp_set_keepalive(struct sock *sk, int val)  
{  
	/* 不在以下两个状态设置保活定时器: 
	 * TCP_CLOSE:sk_timer用作FIN_WAIT2定时器 
	 * TCP_LISTEN:sk_timer用作SYNACK重传定时器 
	 */  
	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))  
		return;  
  
	/* 如果SO_KEEPALIVE选项值为1,且此前没有设置SOCK_KEEPOPEN标志, 
	 * 则激活sk_timer,用作保活定时器。 
	 */  
	if (val && !sock_flag(sk, SOCK_KEEPOPEN))  
		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));  
	else if (!val)  
		/* 如果SO_KEEPALIVE选项值为0,则删除保活定时器 */  
		inet_csk_delete_keepalive_timer(sk);  
}  
   
/* 保活定时器的超时时间 */  
static inline int keepalive_time_when(const struct tcp_sock *tp)  
{  
	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;  
}  
  
void inet_csk_reset_keepalive_timer (struc sock *sk, unsigned long len)  
{  
	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);  
}  

可以使用TCP层选项来动态调整保活定时器的参数。

1
2
3
4
5
6
7
int keepidle = 600;
int keepintvl = 10;
int keepcnt = 6;

setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &keepidle, sizeof(keepidle));
setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &keepintvl, sizeof(keepintvl));
setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &keepcnt, sizeof(keepcnt));
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
struct tcp_sock {  
	...  
	/* 最后一次接收到ACK的时间 */  
	u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */  
	...  
	/* time before keep alive takes place, 空闲多久后才发送探测报文 */  
	unsigned int keepalive_time;  
	/* time iterval between keep alive probes */  
	unsigned int keepalive_intvl; /* 探测报文之间的时间间隔 */  
	/* num of allowed keep alive probes */  
	u8 keepalive_probes; /* 探测报文的发送次数 */  
	...  
	struct {  
		...  
		/* 最后一次接收到带负荷的报文的时间 */  
		__u32 lrcvtime; /* timestamp of last received data packet */  
		...  
	} icsk_ack;  
	...  
};  
  
#define TCP_KEEPIDLE 4 /* Start Keepalives after this period */  
#define TCP_KEEPINTVL 5 /* Interval between keepalives */  
#define TCP_KEEPCNT 6 /* Number of keepalives before death */  
   
#define MAX_TCP_KEEPIDLE 32767  
#define MAX_TCP_KEEPINTVL 32767  
#define MAX_TCP_KEEPCNT 127  
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
static int do_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,  
	unsigned int optlen)  
{  
	...  
	case TCP_KEEPIDLE:  
	   if (val < 1 || val > MAX_TCP_KEEPIDLE)  
		   err = -EINVAL;  
		else {  
			tp->keepalive_time = val * HZ; /* 设置新的空闲时间 */  
  
			/* 如果有使用SO_KEEPALIVE选项,连接处于非监听非结束的状态。 
			 * 这个时候保活定时器已经在计时了,这里设置新的超时时间。 
			 */  
			if (sock_flag(sk, SOCK_KEEPOPEN) &&   
				!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {  
				u32 elapsed = keepalive_time_elapsed(tp); /* 连接已经经历的空闲时间 */  
  
				if (tp->keepalive_time > elapsed)  
					elapsed = tp->keepalive_time - elapsed; /* 接着等待的时间,然后超时 */  
				else  
					elapsed = 0; /* 会导致马上超时 */  
				inet_csk_reset_keepalive_timer(sk, elapsed);  
			}  
		}  
		break;  
  
	case TCP_KEEPINTVL:  
		if (val < 1 || val > MAX_TCP_KEEPINTVL)  
			err = -EINVAL;  
		else  
			tp->keepalive_intvl = val * HZ; /* 设置新的探测报文间隔 */  
		break;  
  
	case TCP_KEEPCNT:  
		if (val < 1 || val > MAX_TCP_KEEPCNT)  
			err = -EINVAL;  
		else  
			tp->keepalive_probes = val; /* 设置新的探测次数 */  
		break;  
	...  
}  

到目前为止,连接已经经历的空闲时间,即最后一次接收到报文至今的时间。

1
2
3
4
5
6
7
8
9
10
11
static inline u32 keepalive_time_elapsed (const struct tcp_sock *tp)  
{  
	const struct inet_connection_sock *icsk = &tp->inet_conn;  
  
	/* lrcvtime是最后一次接收到数据报的时间 
	 * rcv_tstamp是最后一次接收到ACK的时间 
	 * 返回值就是最后一次接收到报文,到现在的时间,即经历的空闲时间。 
	 */  
	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,  
		tcp_time_stamp - tp->rcv_tstamp);  
}  

超时处理函数

我们知道保活定时器、SYNACK重传定时器、FIN_WAIT2定时器是共用一个定时器实例sk->sk_timer,所以它们的超时处理函数也是一样的,都为tcp_keepalive_timer()。而在函数内部,可以根据此时连接所处的状态,来判断是哪个定时器触发了超时。

Q:什么时候判断对端为异常并关闭连接?

A:分两种情况。

  1. 用户使用了TCP_USER_TIMEOUT选项。当连接的空闲时间超过了用户设置的时间,且有发送过探测报文。

  2. 用户没有使用TCP_USER_TIMEOUT选项。当发送保活探测包的次数达到了保活探测的最大次数时。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
static void tcp_keepalive_timer (unsigned long data)  
{  
	struct sock *sk = (struct sock *) data;  
	struct inet_connection_sock *icsk = inet_csk(sk);  
	struct tcp_sock *tp = tcp_sk(sk);  
	u32 elapsed;  
  
	/* Only process if socket is not in use. */  
	bh_lock_sock(sk);  
  
	/* 加锁以保证在此期间,连接状态不会被用户进程修改。 
	 * 如果用户进程正在使用此sock,那么过50ms再来看看。 
	 */  
	if (sock_owned_by_user(sk)) {  
		/* Try again later. */  
		inet_csk_reset_keepalive_timer(sk, HZ/20);  
		goto out;  
	}  
  
	/* 三次握手期间,用作SYNACK定时器 */  
	if (sk->sk_state == TCP_LISTEN) {  
		tcp_synack_timer(sk);  
		goto out;  
	}      
  
	/* 连接释放期间,用作FIN_WAIT2定时器 */  
	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {  
		...  
	}  
  
	/* 接下来就是用作保活定时器了 */  
	if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)  
		goto out;  
  
	elapsed = keepalive_time_when(tp); /* 连接的空闲时间超过此值,就发送保活探测报文 */  
  
	/* It is alive without keepalive. 
	 * 如果网络中有发送且未确认的数据包,或者发送队列不为空,说明连接不是idle的? 
	 * 既然连接不是idle的,就没有必要探测对端是否正常。 
	 * 保活定时器重新开始计时即可。 
	 *  
	 * 而实际上当网络中有发送且未确认的数据包时,对端也可能会发生异常而没有响应。 
	 * 这个时候会导致数据包的不断重传,只能依靠重传超过了允许的最大时间,来判断连接超时。 
	 * 为了解决这一问题,引入了TCP_USER_TIMEOUT,允许用户指定超时时间,可见下文:) 
	 */  
	if (tp->packets_out || tcp_send_head(sk))  
		goto resched; /* 保活定时器重新开始计时 */  
  
	/* 连接经历的空闲时间,即上次收到报文至今的时间 */  
	elapsed = keepalive_time_elapsed(tp);  
  
	/* 如果连接空闲的时间超过了设置的时间值 */  
	if (elapsed >= keepalive_time_when(tp)) {  
  
		/* 什么时候关闭连接? 
		 * 1. 使用了TCP_USER_TIMEOUT选项。当连接空闲时间超过了用户设置的时间,且有发送过探测报文。 
		 * 2. 用户没有使用选项。当发送的保活探测包达到了保活探测的最大次数。 
		 */  
		if (icsk->icsk_user_timeout != 0 && elapsed >= icsk->icsk_user_timeout &&  
			icsk->icsk_probes_out > 0) || (icsk->icsk_user_timeout == 0 &&  
			icsk->icsk_probes_out >= keepalive_probes(tp))) {  
			tcp_send_active_reset(sk, GFP_ATOMIC); /* 构造一个RST包并发送 */  
			tcp_write_err(sk); /* 报告错误,关闭连接 */  
			goto out;  
		}  
  
		/* 如果还不到关闭连接的时候,就继续发送保活探测包 */  
		if (tcp_write_wakeup(sk) <= 0) {  
			icsk->icsk_probes_out++; /* 已发送的保活探测包个数 */  
			elapsed = keepalive_intvl_when(tp); /* 下次超时的时间,默认为75s */  
		} else {  
			/* If keepalive was lost due to local congestion, try harder. */  
			elapsd = TCP_RESOURCE_PROBE_INTERVAL; /* 默认为500ms,会使超时更加频繁 */  
		}  
  
	} else {  
		/* 如果连接的空闲时间,还没有超过设定值,则接着等待 */  
		elapsed = keepalive_time_when(tp) - elapsed;  
	}   
  
	sk_mem_reclaim(sk);  
  
resched: /* 重设保活定时器 */  
	inet_csk_reset_keepalive_timer(sk, elapsed);  
	goto out;   
  
out:  
	bh_unlock_sock(sk);  
	sock_put(sk);  
}  

Q:TCP是如何发送Keepalive探测报文的?

A:分两种情况。

  1. 有新的数据段可供发送,且对端接收窗口还没被塞满。发送新的数据段,来作为探测包。

  2. 没有新的数据段可供发送,或者对端的接收窗口满了。发送序号为snd_una - 1、长度为0的ACK包作为探测包。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/* Initiate keepalive or window probe from timer. */  
  
int tcp_write_wakeup (struct sock *sk)  
{  
	struct tcp_sock *tp = tcp_sk(sk);  
	struct sk_buff *skb;  
  
	if (sk->sk_state == TCP_CLOSE)  
		return -1;  
  
	/* 如果还有未发送过的数据包,并且对端的接收窗口还没有满 */  
	if ((skb = tcp_send_head(sk)) != NULL && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {  
		int err;  
		unsigned int mss = tcp_current_mss(sk); /* 当前的MSS */  
		/* 对端接收窗口所允许的最大报文长度 */  
		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;  
  
		/* pushed_seq记录发送出去的最后一个字节的序号 */  
		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))  
			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;  
  
		/* 如果对端接收窗口小于此数据段的长度,或者此数据段的长度超过了MSS,那么就要进行分段 */  
		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || skb->len > mss) {  
			seg_size = min(seg_size, mss);  
			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; /* 设置PSH标志,让对端马上把数据提交给程序 */  
			if (tcp_fragment(sk, skb, seg_size, mss)) /* 进行分段 */  
				return -1;  
		} else if (! tcp_skb_pcount(skb)) /* 进行TSO分片 */  
			tcp_set_skb_tso_segs(sk, skb, mss); /* 初始化分片相关变量 */  
  
		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;  
		TCP_SKB_CB(skb)->when = tcp_time_stamp;  
		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); /* 发送此数据段 */  
		if (!err)  
			tcp_event_new_data_sent(sk, skb); /* 发送了新的数据,更新相关参数 */  
  
	} else { /* 如果没有新的数据段可用作探测报文发送,或者对端的接收窗口为0 */  
  
	   /* 处于紧急模式时,额外发送一个序号为snd_una的ACK包,告诉对端紧急指针 */  
	   if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))  
		   tcp_xmit_probe_skb(sk, 1);  
  
		/* 发送一个序号为snd_una -1的ACK包,长度为0,这是一个序号过时的报文。 
		 * snd_una: first byte we want an ack for,所以snd_una - 1序号的字节已经被确认过了。 
		 * 对端会响应一个ACK。 
		 */  
		return tcp_xmit_probe_skb(sk, 0);  
	}  
}  

Q:当没有新的数据可以用作探测包、或者对端的接收窗口为0时,怎么办呢?

A:发送一个序号为snd_una - 1、长度为0的ACK包,对端收到此包后会发送一个ACK响应。如此一来本端就能够知道对端是否还活着、接收窗口是否打开了。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
/* This routine sends a packet with an out of date sequence number. 
 * It assumes the other end will try to ack it. 
 *  
 * Question: what should we make while urgent mode? 
 * 4.4BSD forces sending single byte of data. We cannot send out of window 
 * data, because we have SND.NXT == SND.MAX... 
 *  
 * Current solution: to send TWO zero-length segments in urgent mode: 
 * one is with SEG.SEG=SND.UNA to deliver urgent pointer, another is out-of-date with 
 * SND.UNA - 1 to probe window. 
 */  
  
static int tcp_xmit_probe_skb (struct sock *sk, int urgent)  
{  
	struct tcp_sock *tp = tcp_sk(sk);  
	struct sk_buff *skb;  
  
	/* We don't queue it, tcp_transmit_skb() sets ownership. */  
	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));  
	if (skb == NULL)  
		return -1;  
  
	/* Reserve space for headers and set control bits. */  
	skb_reserve(skb, MAX_TCP_HEADER);  
  
	/* Use a previous sequence. This should cause the other end to send an ack. 
	 * Don't queue or clone SKB, just send it. 
	 */  
	/* 如果没有设置紧急指针,那么发送的序号为snd_una - 1,否则发送的序号为snd_una */  
	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);  
	TCP_SKB_CB(skb)->when = tcp_time_stamp;  
	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); /* 发送探测包 */  
}  

发送RST包。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/* We get here when a process closes a file descriptor (either due to an explicit close() 
 * or as a byproduct of exit()'ing) and there was unread data in the receive queue. 
 * This behavior is recommended by RFC 2525, section 2.17. -DaveM 
 */  
  
void tcp_send_active_reset (struct sock *sk, gfp_t priority)  
{  
	struct sk_buff *skb;  
	/* NOTE: No TCP options attached and we never retransmit this. */  
	skb = alloc_skb(MAX_TCP_HEADER, priority);  
	if (!skb) {  
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);  
		return;  
	}  
  
	/* Reserve space for headers and prepare control bits. */  
	skb_reserve(skb, MAX_TCP_HEADER); /* 为报文头部预留空间 */  
	/* 初始化不携带数据的skb的一些控制字段 */  
	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST);  
  
	/* Send if off,发送此RST包*/  
	TCP_SKB_CB(skb)->when = tcp_time_stamp;  
	if (tcp_transmit_skb(sk, skb, 0, priority))  
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);  
	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);  
}  
  
static inline __u32 tcp_acceptable_seq (const struct sock *sk)  
{  
	const struct tcp_sock *tp = tcp_sk(sk);  
  
	/* 如果snd_nxt在对端接收窗口范围内 */  
	if (! before(tcp_wnd_end(tp), tp->snd_nxt))  
		return tp->snd_nxt;  
	else  
		return tcp_wnd_end(tp);  
}  

TCP_USER_TIMEOUT选项

从上文可知同时符合以下条件时,保活定时器才会发送探测报文:

  1. 网络中没有发送且未确认的数据包。

  2. 发送队列为空。

  3. 连接的空闲时间超过了设定的时间。

Q:如果网络中有发送且未确认的数据包、或者发送队列不为空时,保活定时器不起作用了,岂不是不能够检测到对端的异常了?

A:可以使用TCP_USER_TIMEOUT,显式的指定当发送数据多久后还没有得到响应,就判定连接超时,从而主动关闭连接。

TCP_USER_TIMEOUT选项会影响到超时重传定时器和保活定时器。

(1) 超时重传定时器

判断连接是否超时,分3种情况:

  1. SYN包:当SYN包的重传次数达到上限时,判定连接超时。(默认允许重传5次,初始超时时间为1s,总共历时31s)

  2. 非SYN包,用户使用TCP_USER_TIMEOUT:当数据包发出去后的等待时间超过用户设置的时间时,判定连接超时。

  3. 非SYN包,用户没有使用TCP_USER_TIMEOUT:当数据包发出去后的等待时间超过以TCP_RTO_MIN为初始超时时间,重传boundary次所花费的时间后,判定连接超时。(boundary的最大值为tcp_retries2,默认值为15)

(2) 保活定时器

判断连接是否异常,分2种情况:

  1. 用户使用了TCP_USER_TIMEOUT选项。当连接的空闲时间超过了用户设置的时间,且有发送过探测报文。

  2. 用户没有使用TCP_USER_TIMEOUT选项。当发送保活探测包的次数达到了保活探测的最大次数时。

ack loop

patch

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
commit 4fb17a6091674f469e8ac85dc770fbf9a9ba7cc8
Author: Neal Cardwell <ncardwell@google.com>
Date:   Fri Feb 6 16:04:41 2015 -0500

	tcp: mitigate ACK loops for connections as tcp_timewait_sock
	
	Ensure that in state FIN_WAIT2 or TIME_WAIT, where the connection is
	represented by a tcp_timewait_sock, we rate limit dupacks in response
	to incoming packets (a) with TCP timestamps that fail PAWS checks, or
	(b) with sequence numbers that are out of the acceptable window.
	
	We do not send a dupack in response to out-of-window packets if it has
	been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we
	last sent a dupack in response to an out-of-window packet.
	
	Reported-by: Avery Fay <avery@mixpanel.com>
	Signed-off-by: Neal Cardwell <ncardwell@google.com>
	Signed-off-by: Yuchung Cheng <ycheng@google.com>
	Signed-off-by: Eric Dumazet <edumazet@google.com>
	Signed-off-by: David S. Miller <davem@davemloft.net>

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 66d85a8..1a7adb4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -342,6 +342,10 @@ struct tcp_timewait_sock {
  u32           tw_rcv_wnd;
  u32           tw_ts_offset;
  u32           tw_ts_recent;
+
+ /* The time we sent the last out-of-window ACK: */
+ u32           tw_last_oow_ack_time;
+
  long              tw_ts_recent_stamp;
 #ifdef CONFIG_TCP_MD5SIG
  struct tcp_md5sig_key     *tw_md5_key;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 98a8405..dd11ac7 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
  return seq == e_win && seq == end_seq;
 }
 
+static enum tcp_tw_status
+tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
+               const struct sk_buff *skb, int mib_idx)
+{
+ struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+
+ if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
+               &tcptw->tw_last_oow_ack_time)) {
+     /* Send ACK. Note, we do not put the bucket,
+      * it will be released by caller.
+      */
+     return TCP_TW_ACK;
+ }
+
+ /* We are rate-limiting, so just release the tw sock and drop skb. */
+ inet_twsk_put(tw);
+ return TCP_TW_SUCCESS;
+}
+
 /*
  * * Main purpose of TIME-WAIT state is to close connection gracefully,
  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
          !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
                 tcptw->tw_rcv_nxt,
                 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
-         return TCP_TW_ACK;
+         return tcp_timewait_check_oow_rate_limit(
+             tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
 
      if (th->rst)
          goto kill;
@@ -250,10 +270,8 @@ kill:
          inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
                     TCP_TIMEWAIT_LEN);
 
-     /* Send ACK. Note, we do not put the bucket,
-      * it will be released by caller.
-      */
-     return TCP_TW_ACK;
+     return tcp_timewait_check_oow_rate_limit(
+         tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
  }
  inet_twsk_put(tw);
  return TCP_TW_SUCCESS;
@@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
      tcptw->tw_ts_recent  = tp->rx_opt.ts_recent;
      tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
      tcptw->tw_ts_offset  = tp->tsoffset;
+     tcptw->tw_last_oow_ack_time = 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
      if (tw->tw_family == PF_INET6) {

commit f2b2c582e82429270d5818fbabe653f4359d7024
Author: Neal Cardwell <ncardwell@google.com>
Date:   Fri Feb 6 16:04:40 2015 -0500

	tcp: mitigate ACK loops for connections as tcp_sock
	
	Ensure that in state ESTABLISHED, where the connection is represented
	by a tcp_sock, we rate limit dupacks in response to incoming packets
	(a) with TCP timestamps that fail PAWS checks, or (b) with sequence
	numbers or ACK numbers that are out of the acceptable window.
	
	We do not send a dupack in response to out-of-window packets if it has
	been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we
	last sent a dupack in response to an out-of-window packet.
	
	There is already a similar (although global) rate-limiting mechanism
	for "challenge ACKs". When deciding whether to send a challence ACK,
	we first consult the new per-connection rate limit, and then the
	global rate limit.
	
	Reported-by: Avery Fay <avery@mixpanel.com>
	Signed-off-by: Neal Cardwell <ncardwell@google.com>
	Signed-off-by: Yuchung Cheng <ycheng@google.com>
	Signed-off-by: Eric Dumazet <edumazet@google.com>
	Signed-off-by: David S. Miller <davem@davemloft.net>

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index bcc828d..66d85a8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -153,6 +153,7 @@ struct tcp_sock {
      u32 snd_sml;    /* Last byte of the most recently transmitted small packet */
  u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
  u32 lsndtime;   /* timestamp of last sent data packet (for restart window) */
+ u32 last_oow_ack_time;  /* timestamp of last out-of-window ACK */
 
  u32 tsoffset;   /* timestamp offset */
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9401aa43..8fdd27b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3322,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
 }
 
 /* RFC 5961 7 [ACK Throttling] */
-static void tcp_send_challenge_ack(struct sock *sk)
+static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
 {
  /* unprotected vars, we dont care of overwrites */
  static u32 challenge_timestamp;
  static unsigned int challenge_count;
- u32 now = jiffies / HZ;
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 now;
+
+ /* First check our per-socket dupack rate limit. */
+ if (tcp_oow_rate_limited(sock_net(sk), skb,
+              LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+              &tp->last_oow_ack_time))
+     return;
 
+ /* Then check the check host-wide RFC 5961 rate limit. */
+ now = jiffies / HZ;
  if (now != challenge_timestamp) {
      challenge_timestamp = now;
      challenge_count = 0;
@@ -3424,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
  if (before(ack, prior_snd_una)) {
      /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
      if (before(ack, prior_snd_una - tp->max_window)) {
-         tcp_send_challenge_ack(sk);
+         tcp_send_challenge_ack(sk, skb);
          return -1;
      }
      goto old_ack;
@@ -4993,7 +5002,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
      tcp_paws_discard(sk, skb)) {
      if (!th->rst) {
          NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
-         tcp_send_dupack(sk, skb);
+         if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                       LINUX_MIB_TCPACKSKIPPEDPAWS,
+                       &tp->last_oow_ack_time))
+             tcp_send_dupack(sk, skb);
          goto discard;
      }
      /* Reset is accepted even if it did not pass PAWS. */
@@ -5010,7 +5022,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
      if (!th->rst) {
          if (th->syn)
              goto syn_challenge;
-         tcp_send_dupack(sk, skb);
+         if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                       LINUX_MIB_TCPACKSKIPPEDSEQ,
+                       &tp->last_oow_ack_time))
+             tcp_send_dupack(sk, skb);
      }
      goto discard;
  }
@@ -5026,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
      if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
          tcp_reset(sk);
      else
-         tcp_send_challenge_ack(sk);
+         tcp_send_challenge_ack(sk, skb);
      goto discard;
  }
 
@@ -5040,7 +5055,7 @@ syn_challenge:
      if (syn_inerr)
          TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
-     tcp_send_challenge_ack(sk);
+     tcp_send_challenge_ack(sk, skb);
      goto discard;
  }
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 131aa49..98a8405 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -467,6 +467,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
      tcp_enable_early_retrans(newtp);
      newtp->tlp_high_seq = 0;
      newtp->lsndtime = treq->snt_synack;
+     newtp->last_oow_ack_time = 0;
      newtp->total_retrans = req->num_retrans;
 
      /* So many TCP implementations out there (incorrectly) count the

commit a9b2c06dbef48ed31cff1764c5ce824829106f4f
Author: Neal Cardwell <ncardwell@google.com>
Date:   Fri Feb 6 16:04:39 2015 -0500

	tcp: mitigate ACK loops for connections as tcp_request_sock
	
	In the SYN_RECV state, where the TCP connection is represented by
	tcp_request_sock, we now rate-limit SYNACKs in response to a client's
	retransmitted SYNs: we do not send a SYNACK in response to client SYN
	if it has been less than sysctl_tcp_invalid_ratelimit (default 500ms)
	since we last sent a SYNACK in response to a client's retransmitted
	SYN.
	
	This allows the vast majority of legitimate client connections to
	proceed unimpeded, even for the most aggressive platforms, iOS and
	MacOS, which actually retransmit SYNs 1-second intervals for several
	times in a row. They use SYN RTO timeouts following the progression:
	1,1,1,1,1,2,4,8,16,32.
	
	Reported-by: Avery Fay <avery@mixpanel.com>
	Signed-off-by: Neal Cardwell <ncardwell@google.com>
	Signed-off-by: Yuchung Cheng <ycheng@google.com>
	Signed-off-by: Eric Dumazet <edumazet@google.com>
	Signed-off-by: David S. Miller <davem@davemloft.net>

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 67309ec..bcc828d 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -115,6 +115,7 @@ struct tcp_request_sock {
  u32             rcv_isn;
  u32             snt_isn;
  u32             snt_synack; /* synack sent time */
+ u32             last_oow_ack_time; /* last SYNACK */
  u32             rcv_nxt; /* the ack # by SYNACK. For
                        * FastOpen it's the seq#
                        * after data-in-SYN.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b81f45c..da4196fb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1145,6 +1145,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
  tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
  tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
  tcp_rsk(req)->snt_synack = tcp_time_stamp;
+ tcp_rsk(req)->last_oow_ack_time = 0;
  req->mss = rx_opt->mss_clamp;
  req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
  ireq->tstamp_ok = rx_opt->tstamp_ok;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index bc9216d..131aa49 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -605,7 +605,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
       * Reset timer after retransmitting SYNACK, similar to
       * the idea of fast retransmit in recovery.
       */
-     if (!inet_rtx_syn_ack(sk, req))
+     if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                   LINUX_MIB_TCPACKSKIPPEDSYNRECV,
+                   &tcp_rsk(req)->last_oow_ack_time) &&
+
+         !inet_rtx_syn_ack(sk, req))
          req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
                     TCP_RTO_MAX) + jiffies;
      return NULL;

commit 032ee4236954eb214651cb9bfc1b38ffa8fd7a01
Author: Neal Cardwell <ncardwell@google.com>
Date:   Fri Feb 6 16:04:38 2015 -0500

	tcp: helpers to mitigate ACK loops by rate-limiting out-of-window dupacks
	
	Helpers for mitigating ACK loops by rate-limiting dupacks sent in
	response to incoming out-of-window packets.
	
	This patch includes:
	
	- rate-limiting logic
	- sysctl to control how often we allow dupacks to out-of-window packets
	- SNMP counter for cases where we rate-limited our dupack sending
	
	The rate-limiting logic in this patch decides to not send dupacks in
	response to out-of-window segments if (a) they are SYNs or pure ACKs
	and (b) the remote endpoint is sending them faster than the configured
	rate limit.
	
	We rate-limit our responses rather than blocking them entirely or
	resetting the connection, because legitimate connections can rely on
	dupacks in response to some out-of-window segments. For example, zero
	window probes are typically sent with a sequence number that is below
	the current window, and ZWPs thus expect to thus elicit a dupack in
	response.
	
	We allow dupacks in response to TCP segments with data, because these
	may be spurious retransmissions for which the remote endpoint wants to
	receive DSACKs. This is safe because segments with data can't
	realistically be part of ACK loops, which by their nature consist of
	each side sending pure/data-less ACKs to each other.
	
	The dupack interval is controlled by a new sysctl knob,
	tcp_invalid_ratelimit, given in milliseconds, in case an administrator
	needs to dial this upward in the face of a high-rate DoS attack. The
	name and units are chosen to be analogous to the existing analogous
	knob for ICMP, icmp_ratelimit.
	
	The default value for tcp_invalid_ratelimit is 500ms, which allows at
	most one such dupack per 500ms. This is chosen to be 2x faster than
	the 1-second minimum RTO interval allowed by RFC 6298 (section 2, rule
	2.4). We allow the extra 2x factor because network delay variations
	can cause packets sent at 1 second intervals to be compressed and
	arrive much closer.
	
	Reported-by: Avery Fay <avery@mixpanel.com>
	Signed-off-by: Neal Cardwell <ncardwell@google.com>
	Signed-off-by: Yuchung Cheng <ycheng@google.com>
	Signed-off-by: Eric Dumazet <edumazet@google.com>
	Signed-off-by: David S. Miller <davem@davemloft.net>

diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a5e4c81..1b8c964 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -290,6 +290,28 @@ tcp_frto - INTEGER
 
  By default it's enabled with a non-zero value. 0 disables F-RTO.
 
+tcp_invalid_ratelimit - INTEGER
+ Limit the maximal rate for sending duplicate acknowledgments
+ in response to incoming TCP packets that are for an existing
+ connection but that are invalid due to any of these reasons:
+
+   (a) out-of-window sequence number,
+   (b) out-of-window acknowledgment number, or
+   (c) PAWS (Protection Against Wrapped Sequence numbers) check failure
+
+ This can help mitigate simple "ack loop" DoS attacks, wherein
+ a buggy or malicious middlebox or man-in-the-middle can
+ rewrite TCP header fields in manner that causes each endpoint
+ to think that the other is sending invalid TCP segments, thus
+ causing each side to send an unterminating stream of duplicate
+ acknowledgments for invalid segments.
+
+ Using 0 disables rate-limiting of dupacks in response to
+ invalid segments; otherwise this value specifies the minimal
+ space between sending such dupacks, in milliseconds.
+
+ Default: 500 (milliseconds).
+
 tcp_keepalive_time - INTEGER
  How often TCP sends out keepalive messages when keepalive is enabled.
  Default: 2hours.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 28e9bd3..b81f45c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -274,6 +274,7 @@ extern int sysctl_tcp_challenge_ack_limit;
 extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_autocorking;
+extern int sysctl_tcp_invalid_ratelimit;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -1236,6 +1237,37 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
  return true;
 }
 
+/* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+ * attacks that send repeated SYNs or ACKs for the same connection. To
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
+ */
+static inline bool tcp_oow_rate_limited(struct net *net,
+                 const struct sk_buff *skb,
+                 int mib_idx, u32 *last_oow_ack_time)
+{
+ /* Data packets without SYNs are not likely part of an ACK loop. */
+ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+     !tcp_hdr(skb)->syn)
+     goto not_rate_limited;
+
+ if (*last_oow_ack_time) {
+     s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+     if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+         NET_INC_STATS_BH(net, mib_idx);
+         return true;    /* rate-limited: don't send yet! */
+     }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+not_rate_limited:
+ return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 static inline void tcp_mib_init(struct net *net)
 {
  /* See RFC 2012 */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index b222241..6a6fb74 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -270,6 +270,12 @@ enum
  LINUX_MIB_TCPHYSTARTTRAINCWND,      /* TCPHystartTrainCwnd */
  LINUX_MIB_TCPHYSTARTDELAYDETECT,    /* TCPHystartDelayDetect */
  LINUX_MIB_TCPHYSTARTDELAYCWND,      /* TCPHystartDelayCwnd */
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV,     /* TCPACKSkippedSynRecv */
+ LINUX_MIB_TCPACKSKIPPEDPAWS,        /* TCPACKSkippedPAWS */
+ LINUX_MIB_TCPACKSKIPPEDSEQ,     /* TCPACKSkippedSeq */
+ LINUX_MIB_TCPACKSKIPPEDFINWAIT2,    /* TCPACKSkippedFinWait2 */
+ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT,    /* TCPACKSkippedTimeWait */
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,   /* TCPACKSkippedChallenge */
  __LINUX_MIB_MAX
 };
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8f9cd20..d8953ef 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -292,6 +292,12 @@ static const struct snmp_mib snmp4_net_list[] = {
  SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
  SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
  SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
+ SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV),
+ SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS),
+ SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ),
+ SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
+ SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
+ SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
  SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index e0ee384..82601a6 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -729,6 +729,13 @@ static struct ctl_table ipv4_table[] = {
      .extra2     = &one,
  },
  {
+     .procname   = "tcp_invalid_ratelimit",
+     .data       = &sysctl_tcp_invalid_ratelimit,
+     .maxlen     = sizeof(int),
+     .mode       = 0644,
+     .proc_handler   = proc_dointvec_ms_jiffies,
+ },
+ {
      .procname   = "icmp_msgs_per_sec",
      .data       = &sysctl_icmp_msgs_per_sec,
      .maxlen     = sizeof(int),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d3dfff7..9401aa43 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -100,6 +100,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 
 #define FLAG_DATA        0x01 /* Incoming frame contained data.      */
 #define FLAG_WIN_UPDATE      0x02 /* Incoming ACK was a window update.   */

sample

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#define KMSG_COMPONENT "synflood"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netdevice.h>

#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>

__be16 cport = 80;
char *selfip = NULL;

module_param(cport, short, S_IRUGO);
module_param(selfip, charp, S_IRUGO);

void skbcsum(struct sk_buff *skb)
{
	struct tcphdr *tcph;
	struct iphdr *iph;
	int iphl;
	int tcphl;
	int tcplen;

	iph = (struct iphdr *)skb->data;
	iphl = iph->ihl << 2;
	tcph = (struct tcphdr *)(skb->data + iphl);
	tcphl = tcph->doff << 2;

	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	tcph->check    = 0;
	tcplen        = skb->len - (iph->ihl << 2);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
				tcplen, IPPROTO_TCP, 0);
		skb->csum_start    = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct tcphdr, check);
	}
	else {
		skb->csum = 0;
		skb->csum = skb_checksum(skb, iph->ihl << 2, tcplen, 0);
		tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
				tcplen, IPPROTO_TCP, skb->csum);

	}
}

int pktcome = 0;
int fincome = 0;
static int check(__be32 ip, __be16 port, int syn, int fin)
{
	if ((selfip == NULL || ip == in_aton(selfip)) && ntohs(port) == cport) {
		if (syn) {
			pktcome = 0;
			fincome = 0;
		}
		pktcome ++;
		if (pktcome > 30 || fincome == 3)
			return 1;
		fincome |= fin;
	}
	return 0;
}

static unsigned int local_in(unsigned int hooknum, 
	struct sk_buff *skb, const struct net_device *in, 
	const struct net_device *out, int (*okfn) (struct sk_buff *))
{
	struct iphdr *iph;
	struct tcphdr *th;

	if (unlikely(skb->pkt_type != PACKET_HOST))
		goto exit;
	if (unlikely(skb->protocol != __constant_htons(ETH_P_IP)))
		goto exit;
	iph = (struct iphdr *)skb_network_header(skb);
	if (iph->protocol != IPPROTO_TCP)
		goto exit;
	if (unlikely(!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct tcphdr))))
		goto drop_out;
	skb_set_transport_header(skb, iph->ihl * 4);
	th = tcp_hdr(skb);
	if (check(iph->daddr, th->dest, th->syn, th->fin)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		th->seq = htonl(ntohl(th->seq) + 10000000);
	}
exit:
	return NF_ACCEPT;
drop_out:
	return NF_DROP;
}

static unsigned int local_out(unsigned int hooknum, 
	struct sk_buff *skb, const struct net_device *in, 
	const struct net_device *out, int (*okfn) (struct sk_buff *))
{
	struct iphdr *iph;
	struct tcphdr *th;

	iph = (struct iphdr *)skb_network_header(skb);
	if (iph->protocol != IPPROTO_TCP)
		goto exit;
	if (unlikely(!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct tcphdr))))
		goto drop_out;
	skb_set_transport_header(skb, iph->ihl * 4);
	th = tcp_hdr(skb);
	if (check(iph->saddr, th->source, 0, (th->fin) << 1)) {
		th->seq = htonl(ntohl(th->seq) + 10000000);
		skbcsum(skb);
	}
exit:
	return NF_ACCEPT;
drop_out:
	return NF_DROP;
}

static struct nf_hook_ops syndef_ops[] __read_mostly = {
	{
		.hook = local_in,
		.owner = THIS_MODULE,
		.pf = PF_INET,
		.hooknum = NF_INET_LOCAL_IN,
		.priority = 100,
	},
	{
		.hook = local_out,
		.owner = THIS_MODULE,
		.pf = PF_INET,
		.hooknum = NF_INET_LOCAL_OUT,
		.priority = 100,
	},

};

int __init loopack_init(void)
{
	int ret;

	ret = nf_register_hooks(syndef_ops, ARRAY_SIZE(syndef_ops));
	if (ret < 0) {
		pr_err("can't register hooks.\n");
		goto hooks_err;
	}

	pr_err("init success.\n");

hooks_err:
	return ret;
}

void __exit loopack_exit(void)
{
	nf_unregister_hooks(syndef_ops, ARRAY_SIZE(syndef_ops));

	pr_err("unload success.\n");
}

module_init(loopack_init);
module_exit(loopack_exit);
MODULE_AUTHOR("kk");
MODULE_VERSION("1.0.0");
MODULE_LICENSE("GPL");

linux c libcurl的简单使用

http://blog.chinaunix.net/uid-23095063-id-163160.html

1
yum install libcurl libcurl-devel
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#include <curl/curl.h>
#include <stdio.h>
#include <string.h>

CURL *curl;
CURLcode res;

size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream)
{
	if (strlen((char *)stream) + strlen((char *)ptr) > 999999) return 0;
	strcat(stream, (char *)ptr);
//    printf("%s\n", ptr);
	return nmemb;
}

char *down_file(char *url)
{
	static char str[1000000];
	int ret;

	struct curl_slist *slist = NULL;
	slist = curl_slist_append(slist, "Connection: Keep-Alive"); //http长连接
	curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);

	strcpy(str, "");

	curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); // 显示详细信息

	curl_easy_setopt(curl, CURLOPT_URL, url); //设置下载地址
	curl_easy_setopt(curl, CURLOPT_TIMEOUT, 3); //设置超时时间

	curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); //设置写数据的函数
	curl_easy_setopt(curl, CURLOPT_WRITEDATA, str); //设置写数据的变量

	res = curl_easy_perform(curl); //执行下载

	str[999999] = '\0';
	if (CURLE_OK != res) //判断是否下载成功
		return NULL;

	return str;
}

int main()
{
	char url[200];
	curl = curl_easy_init(); //对curl进行初始化

	char *result;
	printf("Please Input a url: ");
	while (scanf("%s", url) != EOF) {
		result = down_file(url);
		if (result)
			puts(result);
		else
			puts("Get Error!");
		printf("\nPlease Input a url: ");
	}
	curl_easy_cleanup(curl); //释放curl资源

	return 0;
}