1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
|
// net/ipv4/tcp_input.c
// tcp_v4_rcv -> tcp_v4_do_rcv -> tcp_rcv_state_process -> tcp_v4_conn_request -> tcp_conn_request
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb)
{
...
struct request_sock *req;
bool want_cookie = false;
...
// 判断是否启用 syncookies
// tcp_syncookies=2 就是强制启用 syncookies,忽略 syn backlog
if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
if (!want_cookie)
goto drop;
}
...
// 处理 syn 包的 struct sock 对象
// !want_cookie 是不将 req 跟 listener 相互绑定起来,响应 synack 后会释放掉 req 对象,从而减少服务器的资源占用
req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
if (!req)
goto drop;
req->syncookie = want_cookie;
...
if (want_cookie) {
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
if (!tmp_opt.tstamp_ok)
inet_rsk(req)->ecn_ok = 0;
}
...
if (!want_cookie) {
tcp_reqsk_record_syn(sk, req, skb);
fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
}
if (fastopen_sk) {
af_ops->send_synack(fastopen_sk, dst, &fl, req,
&foc, TCP_SYNACK_FASTOPEN, skb);
...
} else {
tcp_rsk(req)->tfo_listener = false;
if (!want_cookie) // 不启用 syncookies,则将 req 加入到 syn backlog
inet_csk_reqsk_queue_hash_add(sk, req,
tcp_timeout_init((struct sock *)req));
// 发送 synack 包
af_ops->send_synack(sk, dst, &fl, req, &foc,
!want_cookie ? TCP_SYNACK_NORMAL :
TCP_SYNACK_COOKIE,
skb);
if (want_cookie) {
// 发送 synack 包后,释放掉 req
// 当发生 synflood 时,释放 req 是为了减少服务器资源占用
reqsk_free(req);
return 0;
}
}
...
return 0;
...
}
// net/ipv4/tcp_input.c
struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk_listener,
bool attach_listener)
{
struct request_sock *req = reqsk_alloc(ops, sk_listener,
attach_listener);
...
return req;
}
// include/net/request_sock.h
static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
{
struct request_sock *req;
req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
if (!req)
return NULL;
req->rsk_listener = NULL;
if (attach_listener) { // syncookie 启用时,attach_listener 为 false
...
req->rsk_listener = sk_listener;
}
...
return req;
}
// net/ipv4/tcp_ipv4.c
// af_ops->send_synack
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{
...
int err = -1;
struct sk_buff *skb;
...
skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
if (skb) {
...
}
return err;
}
/// net/ipv4/tcp_output.c
/**
* tcp_make_synack - 分配一个 skb 并构建一个 SYNACK 包
*/
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{
...
struct sk_buff *skb;
...
skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
...
switch (synack_type) {
case TCP_SYNACK_NORMAL:
skb_set_owner_w(skb, req_to_sk(req));
break;
case TCP_SYNACK_COOKIE:
// synflood 攻击时,skb 对象不绑定 socket 是为了减少服务器资源占用
break;
case TCP_SYNACK_FASTOPEN:
skb_set_owner_w(skb, (struct sock *)sk);
break;
}
...
return skb;
}
|