2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
89 #ifdef CONFIG_TCP_MD5SIG
90 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
93 __be32 daddr, __be32 saddr, struct tcphdr *th);
96 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
102 struct inet_hashinfo tcp_hashinfo;
104 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
106 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 tcp_hdr(skb)->source);
112 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
117 /* With PAWS, it is safe from the viewpoint
118 of data integrity. Even without PAWS it is safe provided sequence
119 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
121 Actually, the idea is close to VJ's one, only timestamp cache is
122 held not per host, but per port pair and TW bucket is used as state
125 If TW bucket has been already destroyed we fall back to VJ's scheme
126 and use initial timestamp retrieved from peer table.
128 if (tcptw->tw_ts_recent_stamp &&
129 (twp == NULL || (sysctl_tcp_tw_reuse &&
130 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
131 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
132 if (tp->write_seq == 0)
134 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
135 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
143 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145 /* This will initiate an outgoing connection. */
146 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148 struct inet_sock *inet = inet_sk(sk);
149 struct tcp_sock *tp = tcp_sk(sk);
150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 __be32 daddr, nexthop;
156 if (addr_len < sizeof(struct sockaddr_in))
159 if (usin->sin_family != AF_INET)
160 return -EAFNOSUPPORT;
162 nexthop = daddr = usin->sin_addr.s_addr;
163 if (inet->opt && inet->opt->srr) {
166 nexthop = inet->opt->faddr;
169 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
170 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 inet->inet_sport, usin->sin_port, sk, 1);
174 if (tmp == -ENETUNREACH)
175 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 if (!inet->opt || !inet->opt->srr)
187 if (!inet->inet_saddr)
188 inet->inet_saddr = rt->rt_src;
189 inet->inet_rcv_saddr = inet->inet_saddr;
191 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
192 /* Reset inherited state */
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 if (tcp_death_row.sysctl_tw_recycle &&
199 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
200 struct inet_peer *peer = rt_get_peer(rt);
202 * VJ's idea. We save last timestamp seen from
203 * the destination in peer table, when entering state
204 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
205 * when trying new connection.
208 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
209 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
210 tp->rx_opt.ts_recent = peer->tcp_ts;
214 inet->inet_dport = usin->sin_port;
215 inet->inet_daddr = daddr;
217 inet_csk(sk)->icsk_ext_hdr_len = 0;
219 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
221 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
223 /* Socket identity is still unknown (sport may be zero).
224 * However we set state to SYN-SENT and not releasing socket
225 * lock select source port, enter ourselves into the hash tables and
226 * complete initialization after this.
228 tcp_set_state(sk, TCP_SYN_SENT);
229 err = inet_hash_connect(&tcp_death_row, sk);
233 err = ip_route_newports(&rt, IPPROTO_TCP,
234 inet->inet_sport, inet->inet_dport, sk);
238 /* OK, now commit destination to socket. */
239 sk->sk_gso_type = SKB_GSO_TCPV4;
240 sk_setup_caps(sk, &rt->u.dst);
243 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
248 inet->inet_id = tp->write_seq ^ jiffies;
250 err = tcp_connect(sk);
259 * This unhashes the socket and releases the local port,
262 tcp_set_state(sk, TCP_CLOSE);
264 sk->sk_route_caps = 0;
265 inet->inet_dport = 0;
270 * This routine does path mtu discovery as defined in RFC1191.
272 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk->sk_state == TCP_LISTEN)
284 /* We don't check in the destentry if pmtu discovery is forbidden
285 * on this route. We just assume that no packet_to_big packets
286 * are send back when pmtu discovery is not active.
287 * There is a small race when the user changes this flag in the
288 * route, but I think that's acceptable.
290 if ((dst = __sk_dst_check(sk, 0)) == NULL)
293 dst->ops->update_pmtu(dst, mtu);
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
298 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
299 sk->sk_err_soft = EMSGSIZE;
303 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
304 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
305 tcp_sync_mss(sk, mtu);
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
312 tcp_simple_retransmit(sk);
313 } /* else let the usual retransmit timer handle it */
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code. After adjustment
321 * header points to the first 8 bytes of the tcp header. We need
322 * to find the appropriate port.
324 * The locking strategy used here is very "optimistic". When
325 * someone else accesses the socket the ICMP is just dropped
326 * and for some paths there is no check at all.
327 * A more general error queue to queue errors for later handling
328 * is probably better.
332 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
334 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
335 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
336 struct inet_connection_sock *icsk;
338 struct inet_sock *inet;
339 const int type = icmp_hdr(icmp_skb)->type;
340 const int code = icmp_hdr(icmp_skb)->code;
346 struct net *net = dev_net(icmp_skb->dev);
348 if (icmp_skb->len < (iph->ihl << 2) + 8) {
349 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
354 iph->saddr, th->source, inet_iif(icmp_skb));
356 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
359 if (sk->sk_state == TCP_TIME_WAIT) {
360 inet_twsk_put(inet_twsk(sk));
365 /* If too many ICMPs get dropped on busy
366 * servers this needs to be solved differently.
368 if (sock_owned_by_user(sk))
369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
371 if (sk->sk_state == TCP_CLOSE)
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 seq = ntohl(th->seq);
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, tp->snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
389 case ICMP_SOURCE_QUENCH:
390 /* Just silently ignore these. */
392 case ICMP_PARAMETERPROB:
395 case ICMP_DEST_UNREACH:
396 if (code > NR_ICMP_UNREACH)
399 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
400 if (!sock_owned_by_user(sk))
401 do_pmtu_discovery(sk, iph, info);
405 err = icmp_err_convert[code].errno;
406 /* check if icmp_skb allows revert of backoff
407 * (see draft-zimmermann-tcp-lcd) */
408 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
410 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
414 icsk->icsk_backoff--;
415 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
419 skb = tcp_write_queue_head(sk);
422 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
423 tcp_time_stamp - TCP_SKB_CB(skb)->when);
426 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
427 remaining, TCP_RTO_MAX);
428 } else if (sock_owned_by_user(sk)) {
429 /* RTO revert clocked out retransmission,
430 * but socket is locked. Will defer. */
431 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
434 /* RTO revert clocked out retransmission.
435 * Will retransmit now */
436 tcp_retransmit_timer(sk);
440 case ICMP_TIME_EXCEEDED:
447 switch (sk->sk_state) {
448 struct request_sock *req, **prev;
450 if (sock_owned_by_user(sk))
453 req = inet_csk_search_req(sk, &prev, th->dest,
454 iph->daddr, iph->saddr);
458 /* ICMPs are not backlogged, hence we cannot get
459 an established socket here.
463 if (seq != tcp_rsk(req)->snt_isn) {
464 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
469 * Still in SYN_RECV, just remove it silently.
470 * There is no good way to pass the error to the newly
471 * created socket, and POSIX does not want network
472 * errors returned from accept().
474 inet_csk_reqsk_queue_drop(sk, req, prev);
478 case TCP_SYN_RECV: /* Cannot happen.
479 It can f.e. if SYNs crossed.
481 if (!sock_owned_by_user(sk)) {
484 sk->sk_error_report(sk);
488 sk->sk_err_soft = err;
493 /* If we've already connected we will keep trying
494 * until we time out, or the user gives up.
496 * rfc1122 4.2.3.9 allows to consider as hard errors
497 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
498 * but it is obsoleted by pmtu discovery).
500 * Note, that in modern internet, where routing is unreliable
501 * and in each dark corner broken firewalls sit, sending random
502 * errors ordered by their masters even this two messages finally lose
503 * their original sense (even Linux sends invalid PORT_UNREACHs)
505 * Now we are in compliance with RFCs.
510 if (!sock_owned_by_user(sk) && inet->recverr) {
512 sk->sk_error_report(sk);
513 } else { /* Only an error on timeout */
514 sk->sk_err_soft = err;
522 static void __tcp_v4_send_check(struct sk_buff *skb,
523 __be32 saddr, __be32 daddr)
525 struct tcphdr *th = tcp_hdr(skb);
527 if (skb->ip_summed == CHECKSUM_PARTIAL) {
528 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
529 skb->csum_start = skb_transport_header(skb) - skb->head;
530 skb->csum_offset = offsetof(struct tcphdr, check);
532 th->check = tcp_v4_check(skb->len, saddr, daddr,
539 /* This routine computes an IPv4 TCP checksum. */
540 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
542 struct inet_sock *inet = inet_sk(sk);
544 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
547 int tcp_v4_gso_send_check(struct sk_buff *skb)
549 const struct iphdr *iph;
552 if (!pskb_may_pull(skb, sizeof(*th)))
559 skb->ip_summed = CHECKSUM_PARTIAL;
560 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
565 * This routine will send an RST to the other tcp.
567 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569 * Answer: if a packet caused RST, it is not for a socket
570 * existing in our system, if it is matched to a socket,
571 * it is just duplicate segment or bug in other side's TCP.
572 * So that we build reply only basing on parameters
573 * arrived with segment.
574 * Exception: precedence violation. We do not implement it in any case.
577 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
579 struct tcphdr *th = tcp_hdr(skb);
582 #ifdef CONFIG_TCP_MD5SIG
583 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 struct ip_reply_arg arg;
587 #ifdef CONFIG_TCP_MD5SIG
588 struct tcp_md5sig_key *key;
592 /* Never send a reset in response to a reset. */
596 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
599 /* Swap the send and the receive. */
600 memset(&rep, 0, sizeof(rep));
601 rep.th.dest = th->source;
602 rep.th.source = th->dest;
603 rep.th.doff = sizeof(struct tcphdr) / 4;
607 rep.th.seq = th->ack_seq;
610 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
611 skb->len - (th->doff << 2));
614 memset(&arg, 0, sizeof(arg));
615 arg.iov[0].iov_base = (unsigned char *)&rep;
616 arg.iov[0].iov_len = sizeof(rep.th);
618 #ifdef CONFIG_TCP_MD5SIG
619 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
621 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
623 (TCPOPT_MD5SIG << 8) |
625 /* Update length and the length the header thinks exists */
626 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
627 rep.th.doff = arg.iov[0].iov_len / 4;
629 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
630 key, ip_hdr(skb)->saddr,
631 ip_hdr(skb)->daddr, &rep.th);
634 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
635 ip_hdr(skb)->saddr, /* XXX */
636 arg.iov[0].iov_len, IPPROTO_TCP, 0);
637 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
638 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
640 net = dev_net(skb_dst(skb)->dev);
641 ip_send_reply(net->ipv4.tcp_sock, skb,
642 &arg, arg.iov[0].iov_len);
644 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
645 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
648 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
649 outside socket context is ugly, certainly. What can I do?
652 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
653 u32 win, u32 ts, int oif,
654 struct tcp_md5sig_key *key,
657 struct tcphdr *th = tcp_hdr(skb);
660 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
661 #ifdef CONFIG_TCP_MD5SIG
662 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
666 struct ip_reply_arg arg;
667 struct net *net = dev_net(skb_dst(skb)->dev);
669 memset(&rep.th, 0, sizeof(struct tcphdr));
670 memset(&arg, 0, sizeof(arg));
672 arg.iov[0].iov_base = (unsigned char *)&rep;
673 arg.iov[0].iov_len = sizeof(rep.th);
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
676 (TCPOPT_TIMESTAMP << 8) |
678 rep.opt[1] = htonl(tcp_time_stamp);
679 rep.opt[2] = htonl(ts);
680 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
683 /* Swap the send and the receive. */
684 rep.th.dest = th->source;
685 rep.th.source = th->dest;
686 rep.th.doff = arg.iov[0].iov_len / 4;
687 rep.th.seq = htonl(seq);
688 rep.th.ack_seq = htonl(ack);
690 rep.th.window = htons(win);
692 #ifdef CONFIG_TCP_MD5SIG
694 int offset = (ts) ? 3 : 0;
696 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
698 (TCPOPT_MD5SIG << 8) |
700 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
701 rep.th.doff = arg.iov[0].iov_len/4;
703 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
704 key, ip_hdr(skb)->saddr,
705 ip_hdr(skb)->daddr, &rep.th);
708 arg.flags = reply_flags;
709 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
710 ip_hdr(skb)->saddr, /* XXX */
711 arg.iov[0].iov_len, IPPROTO_TCP, 0);
712 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
714 arg.bound_dev_if = oif;
716 ip_send_reply(net->ipv4.tcp_sock, skb,
717 &arg, arg.iov[0].iov_len);
719 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
722 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
724 struct inet_timewait_sock *tw = inet_twsk(sk);
725 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
727 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
728 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
731 tcp_twsk_md5_key(tcptw),
732 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
738 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
739 struct request_sock *req)
741 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
742 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
745 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
746 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
750 * Send a SYN-ACK after having received a SYN.
751 * This still operates on a request_sock only, not on a big
754 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
755 struct request_sock *req,
756 struct request_values *rvp)
758 const struct inet_request_sock *ireq = inet_rsk(req);
760 struct sk_buff * skb;
762 /* First, grab a route. */
763 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
766 skb = tcp_make_synack(sk, dst, req, rvp);
769 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
771 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
774 err = net_xmit_eval(err);
781 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
782 struct request_values *rvp)
784 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
785 return tcp_v4_send_synack(sk, NULL, req, rvp);
789 * IPv4 request_sock destructor.
791 static void tcp_v4_reqsk_destructor(struct request_sock *req)
793 kfree(inet_rsk(req)->opt);
796 static void syn_flood_warning(const struct sk_buff *skb)
800 #ifdef CONFIG_SYN_COOKIES
801 if (sysctl_tcp_syncookies)
802 msg = "Sending cookies";
805 msg = "Dropping request";
807 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
808 ntohs(tcp_hdr(skb)->dest), msg);
812 * Save and compile IPv4 options into the request_sock if needed.
814 static struct ip_options *tcp_v4_save_options(struct sock *sk,
817 struct ip_options *opt = &(IPCB(skb)->opt);
818 struct ip_options *dopt = NULL;
820 if (opt && opt->optlen) {
821 int opt_size = optlength(opt);
822 dopt = kmalloc(opt_size, GFP_ATOMIC);
824 if (ip_options_echo(dopt, skb)) {
833 #ifdef CONFIG_TCP_MD5SIG
835 * RFC2385 MD5 checksumming requires a mapping of
836 * IP address->MD5 Key.
837 * We need to maintain these in the sk structure.
840 /* Find the Key structure for an address. */
841 static struct tcp_md5sig_key *
842 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
844 struct tcp_sock *tp = tcp_sk(sk);
847 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
849 for (i = 0; i < tp->md5sig_info->entries4; i++) {
850 if (tp->md5sig_info->keys4[i].addr == addr)
851 return &tp->md5sig_info->keys4[i].base;
856 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
857 struct sock *addr_sk)
859 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
862 EXPORT_SYMBOL(tcp_v4_md5_lookup);
864 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
865 struct request_sock *req)
867 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
870 /* This can be called on a newly created socket, from other files */
871 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
872 u8 *newkey, u8 newkeylen)
874 /* Add Key to the list */
875 struct tcp_md5sig_key *key;
876 struct tcp_sock *tp = tcp_sk(sk);
877 struct tcp4_md5sig_key *keys;
879 key = tcp_v4_md5_do_lookup(sk, addr);
881 /* Pre-existing entry - just update that one. */
884 key->keylen = newkeylen;
886 struct tcp_md5sig_info *md5sig;
888 if (!tp->md5sig_info) {
889 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
891 if (!tp->md5sig_info) {
895 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
897 if (tcp_alloc_md5sig_pool(sk) == NULL) {
901 md5sig = tp->md5sig_info;
903 if (md5sig->alloced4 == md5sig->entries4) {
904 keys = kmalloc((sizeof(*keys) *
905 (md5sig->entries4 + 1)), GFP_ATOMIC);
908 tcp_free_md5sig_pool();
912 if (md5sig->entries4)
913 memcpy(keys, md5sig->keys4,
914 sizeof(*keys) * md5sig->entries4);
916 /* Free old key list, and reference new one */
917 kfree(md5sig->keys4);
918 md5sig->keys4 = keys;
922 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
923 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
924 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
929 EXPORT_SYMBOL(tcp_v4_md5_do_add);
931 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
932 u8 *newkey, u8 newkeylen)
934 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
938 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
940 struct tcp_sock *tp = tcp_sk(sk);
943 for (i = 0; i < tp->md5sig_info->entries4; i++) {
944 if (tp->md5sig_info->keys4[i].addr == addr) {
946 kfree(tp->md5sig_info->keys4[i].base.key);
947 tp->md5sig_info->entries4--;
949 if (tp->md5sig_info->entries4 == 0) {
950 kfree(tp->md5sig_info->keys4);
951 tp->md5sig_info->keys4 = NULL;
952 tp->md5sig_info->alloced4 = 0;
953 } else if (tp->md5sig_info->entries4 != i) {
954 /* Need to do some manipulation */
955 memmove(&tp->md5sig_info->keys4[i],
956 &tp->md5sig_info->keys4[i+1],
957 (tp->md5sig_info->entries4 - i) *
958 sizeof(struct tcp4_md5sig_key));
960 tcp_free_md5sig_pool();
967 EXPORT_SYMBOL(tcp_v4_md5_do_del);
969 static void tcp_v4_clear_md5_list(struct sock *sk)
971 struct tcp_sock *tp = tcp_sk(sk);
973 /* Free each key, then the set of key keys,
974 * the crypto element, and then decrement our
975 * hold on the last resort crypto.
977 if (tp->md5sig_info->entries4) {
979 for (i = 0; i < tp->md5sig_info->entries4; i++)
980 kfree(tp->md5sig_info->keys4[i].base.key);
981 tp->md5sig_info->entries4 = 0;
982 tcp_free_md5sig_pool();
984 if (tp->md5sig_info->keys4) {
985 kfree(tp->md5sig_info->keys4);
986 tp->md5sig_info->keys4 = NULL;
987 tp->md5sig_info->alloced4 = 0;
991 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
994 struct tcp_md5sig cmd;
995 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
998 if (optlen < sizeof(cmd))
1001 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1004 if (sin->sin_family != AF_INET)
1007 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1008 if (!tcp_sk(sk)->md5sig_info)
1010 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1016 if (!tcp_sk(sk)->md5sig_info) {
1017 struct tcp_sock *tp = tcp_sk(sk);
1018 struct tcp_md5sig_info *p;
1020 p = kzalloc(sizeof(*p), sk->sk_allocation);
1024 tp->md5sig_info = p;
1025 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1028 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1031 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1032 newkey, cmd.tcpm_keylen);
1035 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1036 __be32 daddr, __be32 saddr, int nbytes)
1038 struct tcp4_pseudohdr *bp;
1039 struct scatterlist sg;
1041 bp = &hp->md5_blk.ip4;
1044 * 1. the TCP pseudo-header (in the order: source IP address,
1045 * destination IP address, zero-padded protocol number, and
1051 bp->protocol = IPPROTO_TCP;
1052 bp->len = cpu_to_be16(nbytes);
1054 sg_init_one(&sg, bp, sizeof(*bp));
1055 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1058 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1059 __be32 daddr, __be32 saddr, struct tcphdr *th)
1061 struct tcp_md5sig_pool *hp;
1062 struct hash_desc *desc;
1064 hp = tcp_get_md5sig_pool();
1066 goto clear_hash_noput;
1067 desc = &hp->md5_desc;
1069 if (crypto_hash_init(desc))
1071 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1073 if (tcp_md5_hash_header(hp, th))
1075 if (tcp_md5_hash_key(hp, key))
1077 if (crypto_hash_final(desc, md5_hash))
1080 tcp_put_md5sig_pool();
1084 tcp_put_md5sig_pool();
1086 memset(md5_hash, 0, 16);
1090 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1091 struct sock *sk, struct request_sock *req,
1092 struct sk_buff *skb)
1094 struct tcp_md5sig_pool *hp;
1095 struct hash_desc *desc;
1096 struct tcphdr *th = tcp_hdr(skb);
1097 __be32 saddr, daddr;
1100 saddr = inet_sk(sk)->inet_saddr;
1101 daddr = inet_sk(sk)->inet_daddr;
1103 saddr = inet_rsk(req)->loc_addr;
1104 daddr = inet_rsk(req)->rmt_addr;
1106 const struct iphdr *iph = ip_hdr(skb);
1111 hp = tcp_get_md5sig_pool();
1113 goto clear_hash_noput;
1114 desc = &hp->md5_desc;
1116 if (crypto_hash_init(desc))
1119 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1121 if (tcp_md5_hash_header(hp, th))
1123 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1125 if (tcp_md5_hash_key(hp, key))
1127 if (crypto_hash_final(desc, md5_hash))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash, 0, 16);
1140 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1142 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1145 * This gets called for each TCP segment that arrives
1146 * so we want to be efficient.
1147 * We have 3 drop cases:
1148 * o No MD5 hash and one expected.
1149 * o MD5 hash and we're not expecting one.
1150 * o MD5 hash and its wrong.
1152 __u8 *hash_location = NULL;
1153 struct tcp_md5sig_key *hash_expected;
1154 const struct iphdr *iph = ip_hdr(skb);
1155 struct tcphdr *th = tcp_hdr(skb);
1157 unsigned char newhash[16];
1159 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1160 hash_location = tcp_parse_md5sig_option(th);
1162 /* We've parsed the options - do we have a hash? */
1163 if (!hash_expected && !hash_location)
1166 if (hash_expected && !hash_location) {
1167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1171 if (!hash_expected && hash_location) {
1172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1176 /* Okay, so this is hash_expected and hash_location -
1177 * so we need to calculate the checksum.
1179 genhash = tcp_v4_md5_hash_skb(newhash,
1183 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1184 if (net_ratelimit()) {
1185 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1186 &iph->saddr, ntohs(th->source),
1187 &iph->daddr, ntohs(th->dest),
1188 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1197 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1199 .obj_size = sizeof(struct tcp_request_sock),
1200 .rtx_syn_ack = tcp_v4_rtx_synack,
1201 .send_ack = tcp_v4_reqsk_send_ack,
1202 .destructor = tcp_v4_reqsk_destructor,
1203 .send_reset = tcp_v4_send_reset,
1204 .syn_ack_timeout = tcp_syn_ack_timeout,
1207 #ifdef CONFIG_TCP_MD5SIG
1208 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1209 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1210 .calc_md5_hash = tcp_v4_md5_hash_skb,
1214 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1215 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1216 .twsk_unique = tcp_twsk_unique,
1217 .twsk_destructor= tcp_twsk_destructor,
1220 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1222 struct tcp_extend_values tmp_ext;
1223 struct tcp_options_received tmp_opt;
1225 struct request_sock *req;
1226 struct inet_request_sock *ireq;
1227 struct tcp_sock *tp = tcp_sk(sk);
1228 struct dst_entry *dst = NULL;
1229 __be32 saddr = ip_hdr(skb)->saddr;
1230 __be32 daddr = ip_hdr(skb)->daddr;
1231 __u32 isn = TCP_SKB_CB(skb)->when;
1232 #ifdef CONFIG_SYN_COOKIES
1233 int want_cookie = 0;
1235 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1238 /* Never answer to SYNs send to broadcast or multicast */
1239 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1242 /* TW buckets are converted to open requests without
1243 * limitations, they conserve resources and peer is
1244 * evidently real one.
1246 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1247 if (net_ratelimit())
1248 syn_flood_warning(skb);
1249 #ifdef CONFIG_SYN_COOKIES
1250 if (sysctl_tcp_syncookies) {
1257 /* Accept backlog is full. If we have already queued enough
1258 * of warm entries in syn queue, drop request. It is better than
1259 * clogging syn queue with openreqs with exponentially increasing
1262 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1265 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1269 #ifdef CONFIG_TCP_MD5SIG
1270 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1273 tcp_clear_options(&tmp_opt);
1274 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1275 tmp_opt.user_mss = tp->rx_opt.user_mss;
1276 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1278 if (tmp_opt.cookie_plus > 0 &&
1279 tmp_opt.saw_tstamp &&
1280 !tp->rx_opt.cookie_out_never &&
1281 (sysctl_tcp_cookie_size > 0 ||
1282 (tp->cookie_values != NULL &&
1283 tp->cookie_values->cookie_desired > 0))) {
1285 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1286 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1288 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1289 goto drop_and_release;
1291 /* Secret recipe starts with IP addresses */
1292 *mess++ ^= (__force u32)daddr;
1293 *mess++ ^= (__force u32)saddr;
1295 /* plus variable length Initiator Cookie */
1298 *c++ ^= *hash_location++;
1300 #ifdef CONFIG_SYN_COOKIES
1301 want_cookie = 0; /* not our kind of cookie */
1303 tmp_ext.cookie_out_never = 0; /* false */
1304 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1305 } else if (!tp->rx_opt.cookie_in_always) {
1306 /* redundant indications, but ensure initialization. */
1307 tmp_ext.cookie_out_never = 1; /* true */
1308 tmp_ext.cookie_plus = 0;
1310 goto drop_and_release;
1312 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1314 if (want_cookie && !tmp_opt.saw_tstamp)
1315 tcp_clear_options(&tmp_opt);
1317 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1318 tcp_openreq_init(req, &tmp_opt, skb);
1320 ireq = inet_rsk(req);
1321 ireq->loc_addr = daddr;
1322 ireq->rmt_addr = saddr;
1323 ireq->no_srccheck = inet_sk(sk)->transparent;
1324 ireq->opt = tcp_v4_save_options(sk, skb);
1326 if (security_inet_conn_request(sk, skb, req))
1330 TCP_ECN_create_request(req, tcp_hdr(skb));
1333 #ifdef CONFIG_SYN_COOKIES
1334 req->cookie_ts = tmp_opt.tstamp_ok;
1336 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1338 struct inet_peer *peer = NULL;
1340 /* VJ's idea. We save last timestamp seen
1341 * from the destination in peer table, when entering
1342 * state TIME-WAIT, and check against it before
1343 * accepting new connection request.
1345 * If "isn" is not zero, this request hit alive
1346 * timewait bucket, so that all the necessary checks
1347 * are made in the function processing timewait state.
1349 if (tmp_opt.saw_tstamp &&
1350 tcp_death_row.sysctl_tw_recycle &&
1351 (dst = inet_csk_route_req(sk, req)) != NULL &&
1352 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1353 peer->v4daddr == saddr) {
1354 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1355 (s32)(peer->tcp_ts - req->ts_recent) >
1357 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1358 goto drop_and_release;
1361 /* Kill the following clause, if you dislike this way. */
1362 else if (!sysctl_tcp_syncookies &&
1363 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1364 (sysctl_max_syn_backlog >> 2)) &&
1365 (!peer || !peer->tcp_ts_stamp) &&
1366 (!dst || !dst_metric(dst, RTAX_RTT))) {
1367 /* Without syncookies last quarter of
1368 * backlog is filled with destinations,
1369 * proven to be alive.
1370 * It means that we continue to communicate
1371 * to destinations, already remembered
1372 * to the moment of synflood.
1374 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1375 &saddr, ntohs(tcp_hdr(skb)->source));
1376 goto drop_and_release;
1379 isn = tcp_v4_init_sequence(skb);
1381 tcp_rsk(req)->snt_isn = isn;
1383 if (tcp_v4_send_synack(sk, dst, req,
1384 (struct request_values *)&tmp_ext) ||
1388 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1401 * The three way handshake has completed - we got a valid synack -
1402 * now create the new socket.
1404 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1405 struct request_sock *req,
1406 struct dst_entry *dst)
1408 struct inet_request_sock *ireq;
1409 struct inet_sock *newinet;
1410 struct tcp_sock *newtp;
1412 #ifdef CONFIG_TCP_MD5SIG
1413 struct tcp_md5sig_key *key;
1416 if (sk_acceptq_is_full(sk))
1419 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1422 newsk = tcp_create_openreq_child(sk, req, skb);
1426 newsk->sk_gso_type = SKB_GSO_TCPV4;
1427 sk_setup_caps(newsk, dst);
1429 newtp = tcp_sk(newsk);
1430 newinet = inet_sk(newsk);
1431 ireq = inet_rsk(req);
1432 newinet->inet_daddr = ireq->rmt_addr;
1433 newinet->inet_rcv_saddr = ireq->loc_addr;
1434 newinet->inet_saddr = ireq->loc_addr;
1435 newinet->opt = ireq->opt;
1437 newinet->mc_index = inet_iif(skb);
1438 newinet->mc_ttl = ip_hdr(skb)->ttl;
1439 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1441 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1442 newinet->inet_id = newtp->write_seq ^ jiffies;
1444 tcp_mtup_init(newsk);
1445 tcp_sync_mss(newsk, dst_mtu(dst));
1446 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1447 if (tcp_sk(sk)->rx_opt.user_mss &&
1448 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1449 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1451 tcp_initialize_rcv_mss(newsk);
1453 #ifdef CONFIG_TCP_MD5SIG
1454 /* Copy over the MD5 key from the original socket */
1455 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1458 * We're using one, so create a matching key
1459 * on the newsk structure. If we fail to get
1460 * memory, then we end up not copying the key
1463 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1465 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1466 newkey, key->keylen);
1467 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1471 __inet_hash_nolisten(newsk, NULL);
1472 __inet_inherit_port(sk, newsk);
1477 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1484 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1486 struct tcphdr *th = tcp_hdr(skb);
1487 const struct iphdr *iph = ip_hdr(skb);
1489 struct request_sock **prev;
1490 /* Find possible connection requests. */
1491 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1492 iph->saddr, iph->daddr);
1494 return tcp_check_req(sk, skb, req, prev);
1496 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1497 th->source, iph->daddr, th->dest, inet_iif(skb));
1500 if (nsk->sk_state != TCP_TIME_WAIT) {
1504 inet_twsk_put(inet_twsk(nsk));
1508 #ifdef CONFIG_SYN_COOKIES
1510 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1515 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1517 const struct iphdr *iph = ip_hdr(skb);
1519 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1520 if (!tcp_v4_check(skb->len, iph->saddr,
1521 iph->daddr, skb->csum)) {
1522 skb->ip_summed = CHECKSUM_UNNECESSARY;
1527 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1528 skb->len, IPPROTO_TCP, 0);
1530 if (skb->len <= 76) {
1531 return __skb_checksum_complete(skb);
1537 /* The socket must have it's spinlock held when we get
1540 * We have a potential double-lock case here, so even when
1541 * doing backlog processing we use the BH locking scheme.
1542 * This is because we cannot sleep with the original spinlock
1545 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1548 #ifdef CONFIG_TCP_MD5SIG
1550 * We really want to reject the packet as early as possible
1552 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1553 * o There is an MD5 option and we're not expecting one
1555 if (tcp_v4_inbound_md5_hash(sk, skb))
1559 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1560 sock_rps_save_rxhash(sk, skb->rxhash);
1561 TCP_CHECK_TIMER(sk);
1562 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1566 TCP_CHECK_TIMER(sk);
1570 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1573 if (sk->sk_state == TCP_LISTEN) {
1574 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1579 if (tcp_child_process(sk, nsk, skb)) {
1586 sock_rps_save_rxhash(sk, skb->rxhash);
1589 TCP_CHECK_TIMER(sk);
1590 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1594 TCP_CHECK_TIMER(sk);
1598 tcp_v4_send_reset(rsk, skb);
1601 /* Be careful here. If this function gets more complicated and
1602 * gcc suffers from register pressure on the x86, sk (in %ebx)
1603 * might be destroyed here. This current version compiles correctly,
1604 * but you have been warned.
1609 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1617 int tcp_v4_rcv(struct sk_buff *skb)
1619 const struct iphdr *iph;
1623 struct net *net = dev_net(skb->dev);
1625 if (skb->pkt_type != PACKET_HOST)
1628 /* Count it even if it's bad */
1629 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1631 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1636 if (th->doff < sizeof(struct tcphdr) / 4)
1638 if (!pskb_may_pull(skb, th->doff * 4))
1641 /* An explanation is required here, I think.
1642 * Packet length and doff are validated by header prediction,
1643 * provided case of th->doff==0 is eliminated.
1644 * So, we defer the checks. */
1645 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1650 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1651 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1652 skb->len - th->doff * 4);
1653 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1654 TCP_SKB_CB(skb)->when = 0;
1655 TCP_SKB_CB(skb)->flags = iph->tos;
1656 TCP_SKB_CB(skb)->sacked = 0;
1658 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1663 if (sk->sk_state == TCP_TIME_WAIT)
1666 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1667 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1668 goto discard_and_relse;
1671 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1672 goto discard_and_relse;
1675 if (sk_filter(sk, skb))
1676 goto discard_and_relse;
1680 bh_lock_sock_nested(sk);
1682 if (!sock_owned_by_user(sk)) {
1683 #ifdef CONFIG_NET_DMA
1684 struct tcp_sock *tp = tcp_sk(sk);
1685 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1686 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1687 if (tp->ucopy.dma_chan)
1688 ret = tcp_v4_do_rcv(sk, skb);
1692 if (!tcp_prequeue(sk, skb))
1693 ret = tcp_v4_do_rcv(sk, skb);
1695 } else if (unlikely(sk_add_backlog(sk, skb))) {
1697 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1698 goto discard_and_relse;
1707 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1710 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1712 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1714 tcp_v4_send_reset(NULL, skb);
1718 /* Discard frame. */
1727 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1728 inet_twsk_put(inet_twsk(sk));
1732 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1733 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1734 inet_twsk_put(inet_twsk(sk));
1737 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1739 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1741 iph->daddr, th->dest,
1744 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1745 inet_twsk_put(inet_twsk(sk));
1749 /* Fall through to ACK */
1752 tcp_v4_timewait_ack(sk, skb);
1756 case TCP_TW_SUCCESS:;
1761 /* VJ's idea. Save last timestamp seen from this destination
1762 * and hold it at least for normal timewait interval to use for duplicate
1763 * segment detection in subsequent connections, before they enter synchronized
1767 int tcp_v4_remember_stamp(struct sock *sk)
1769 struct inet_sock *inet = inet_sk(sk);
1770 struct tcp_sock *tp = tcp_sk(sk);
1771 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1772 struct inet_peer *peer = NULL;
1775 if (!rt || rt->rt_dst != inet->inet_daddr) {
1776 peer = inet_getpeer(inet->inet_daddr, 1);
1780 rt_bind_peer(rt, 1);
1785 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1786 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1787 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1788 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1789 peer->tcp_ts = tp->rx_opt.ts_recent;
1799 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1801 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1804 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1806 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1807 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1808 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1809 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1810 peer->tcp_ts = tcptw->tw_ts_recent;
1819 const struct inet_connection_sock_af_ops ipv4_specific = {
1820 .queue_xmit = ip_queue_xmit,
1821 .send_check = tcp_v4_send_check,
1822 .rebuild_header = inet_sk_rebuild_header,
1823 .conn_request = tcp_v4_conn_request,
1824 .syn_recv_sock = tcp_v4_syn_recv_sock,
1825 .remember_stamp = tcp_v4_remember_stamp,
1826 .net_header_len = sizeof(struct iphdr),
1827 .setsockopt = ip_setsockopt,
1828 .getsockopt = ip_getsockopt,
1829 .addr2sockaddr = inet_csk_addr2sockaddr,
1830 .sockaddr_len = sizeof(struct sockaddr_in),
1831 .bind_conflict = inet_csk_bind_conflict,
1832 #ifdef CONFIG_COMPAT
1833 .compat_setsockopt = compat_ip_setsockopt,
1834 .compat_getsockopt = compat_ip_getsockopt,
1838 #ifdef CONFIG_TCP_MD5SIG
1839 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1840 .md5_lookup = tcp_v4_md5_lookup,
1841 .calc_md5_hash = tcp_v4_md5_hash_skb,
1842 .md5_add = tcp_v4_md5_add_func,
1843 .md5_parse = tcp_v4_parse_md5_keys,
1847 /* NOTE: A lot of things set to zero explicitly by call to
1848 * sk_alloc() so need not be done here.
1850 static int tcp_v4_init_sock(struct sock *sk)
1852 struct inet_connection_sock *icsk = inet_csk(sk);
1853 struct tcp_sock *tp = tcp_sk(sk);
1855 skb_queue_head_init(&tp->out_of_order_queue);
1856 tcp_init_xmit_timers(sk);
1857 tcp_prequeue_init(tp);
1859 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1860 tp->mdev = TCP_TIMEOUT_INIT;
1862 /* So many TCP implementations out there (incorrectly) count the
1863 * initial SYN frame in their delayed-ACK and congestion control
1864 * algorithms that we must have the following bandaid to talk
1865 * efficiently to them. -DaveM
1869 /* See draft-stevens-tcpca-spec-01 for discussion of the
1870 * initialization of these values.
1872 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1873 tp->snd_cwnd_clamp = ~0;
1874 tp->mss_cache = TCP_MSS_DEFAULT;
1876 tp->reordering = sysctl_tcp_reordering;
1877 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1879 sk->sk_state = TCP_CLOSE;
1881 sk->sk_write_space = sk_stream_write_space;
1882 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1884 icsk->icsk_af_ops = &ipv4_specific;
1885 icsk->icsk_sync_mss = tcp_sync_mss;
1886 #ifdef CONFIG_TCP_MD5SIG
1887 tp->af_specific = &tcp_sock_ipv4_specific;
1890 /* TCP Cookie Transactions */
1891 if (sysctl_tcp_cookie_size > 0) {
1892 /* Default, cookies without s_data_payload. */
1894 kzalloc(sizeof(*tp->cookie_values),
1896 if (tp->cookie_values != NULL)
1897 kref_init(&tp->cookie_values->kref);
1899 /* Presumed zeroed, in order of appearance:
1900 * cookie_in_always, cookie_out_never,
1901 * s_data_constant, s_data_in, s_data_out
1903 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1904 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1907 percpu_counter_inc(&tcp_sockets_allocated);
1913 void tcp_v4_destroy_sock(struct sock *sk)
1915 struct tcp_sock *tp = tcp_sk(sk);
1917 tcp_clear_xmit_timers(sk);
1919 tcp_cleanup_congestion_control(sk);
1921 /* Cleanup up the write buffer. */
1922 tcp_write_queue_purge(sk);
1924 /* Cleans up our, hopefully empty, out_of_order_queue. */
1925 __skb_queue_purge(&tp->out_of_order_queue);
1927 #ifdef CONFIG_TCP_MD5SIG
1928 /* Clean up the MD5 key list, if any */
1929 if (tp->md5sig_info) {
1930 tcp_v4_clear_md5_list(sk);
1931 kfree(tp->md5sig_info);
1932 tp->md5sig_info = NULL;
1936 #ifdef CONFIG_NET_DMA
1937 /* Cleans up our sk_async_wait_queue */
1938 __skb_queue_purge(&sk->sk_async_wait_queue);
1941 /* Clean prequeue, it must be empty really */
1942 __skb_queue_purge(&tp->ucopy.prequeue);
1944 /* Clean up a referenced TCP bind bucket. */
1945 if (inet_csk(sk)->icsk_bind_hash)
1949 * If sendmsg cached page exists, toss it.
1951 if (sk->sk_sndmsg_page) {
1952 __free_page(sk->sk_sndmsg_page);
1953 sk->sk_sndmsg_page = NULL;
1956 /* TCP Cookie Transactions */
1957 if (tp->cookie_values != NULL) {
1958 kref_put(&tp->cookie_values->kref,
1959 tcp_cookie_values_release);
1960 tp->cookie_values = NULL;
1963 percpu_counter_dec(&tcp_sockets_allocated);
1966 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1968 #ifdef CONFIG_PROC_FS
1969 /* Proc filesystem TCP sock list dumping. */
1971 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1973 return hlist_nulls_empty(head) ? NULL :
1974 list_entry(head->first, struct inet_timewait_sock, tw_node);
1977 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1979 return !is_a_nulls(tw->tw_node.next) ?
1980 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1984 * Get next listener socket follow cur. If cur is NULL, get first socket
1985 * starting from bucket given in st->bucket; when st->bucket is zero the
1986 * very first socket in the hash table is returned.
1988 static void *listening_get_next(struct seq_file *seq, void *cur)
1990 struct inet_connection_sock *icsk;
1991 struct hlist_nulls_node *node;
1992 struct sock *sk = cur;
1993 struct inet_listen_hashbucket *ilb;
1994 struct tcp_iter_state *st = seq->private;
1995 struct net *net = seq_file_net(seq);
1998 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1999 spin_lock_bh(&ilb->lock);
2000 sk = sk_nulls_head(&ilb->head);
2004 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2008 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2009 struct request_sock *req = cur;
2011 icsk = inet_csk(st->syn_wait_sk);
2015 if (req->rsk_ops->family == st->family) {
2022 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2025 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2027 sk = sk_next(st->syn_wait_sk);
2028 st->state = TCP_SEQ_STATE_LISTENING;
2029 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2031 icsk = inet_csk(sk);
2032 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2033 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2035 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2039 sk_nulls_for_each_from(sk, node) {
2040 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
2044 icsk = inet_csk(sk);
2045 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2046 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2048 st->uid = sock_i_uid(sk);
2049 st->syn_wait_sk = sk;
2050 st->state = TCP_SEQ_STATE_OPENREQ;
2054 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2056 spin_unlock_bh(&ilb->lock);
2058 if (++st->bucket < INET_LHTABLE_SIZE) {
2059 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2060 spin_lock_bh(&ilb->lock);
2061 sk = sk_nulls_head(&ilb->head);
2069 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2071 struct tcp_iter_state *st = seq->private;
2076 rc = listening_get_next(seq, NULL);
2078 while (rc && *pos) {
2079 rc = listening_get_next(seq, rc);
2085 static inline int empty_bucket(struct tcp_iter_state *st)
2087 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2088 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2092 * Get first established socket starting from bucket given in st->bucket.
2093 * If st->bucket is zero, the very first socket in the hash is returned.
2095 static void *established_get_first(struct seq_file *seq)
2097 struct tcp_iter_state *st = seq->private;
2098 struct net *net = seq_file_net(seq);
2102 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2104 struct hlist_nulls_node *node;
2105 struct inet_timewait_sock *tw;
2106 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2108 /* Lockless fast path for the common case of empty buckets */
2109 if (empty_bucket(st))
2113 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2114 if (sk->sk_family != st->family ||
2115 !net_eq(sock_net(sk), net)) {
2121 st->state = TCP_SEQ_STATE_TIME_WAIT;
2122 inet_twsk_for_each(tw, node,
2123 &tcp_hashinfo.ehash[st->bucket].twchain) {
2124 if (tw->tw_family != st->family ||
2125 !net_eq(twsk_net(tw), net)) {
2131 spin_unlock_bh(lock);
2132 st->state = TCP_SEQ_STATE_ESTABLISHED;
2138 static void *established_get_next(struct seq_file *seq, void *cur)
2140 struct sock *sk = cur;
2141 struct inet_timewait_sock *tw;
2142 struct hlist_nulls_node *node;
2143 struct tcp_iter_state *st = seq->private;
2144 struct net *net = seq_file_net(seq);
2149 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2153 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2160 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2161 st->state = TCP_SEQ_STATE_ESTABLISHED;
2163 /* Look for next non empty bucket */
2165 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2168 if (st->bucket > tcp_hashinfo.ehash_mask)
2171 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2172 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2174 sk = sk_nulls_next(sk);
2176 sk_nulls_for_each_from(sk, node) {
2177 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2181 st->state = TCP_SEQ_STATE_TIME_WAIT;
2182 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2190 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2192 struct tcp_iter_state *st = seq->private;
2196 rc = established_get_first(seq);
2199 rc = established_get_next(seq, rc);
2205 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2208 struct tcp_iter_state *st = seq->private;
2210 st->state = TCP_SEQ_STATE_LISTENING;
2211 rc = listening_get_idx(seq, &pos);
2214 st->state = TCP_SEQ_STATE_ESTABLISHED;
2215 rc = established_get_idx(seq, pos);
2221 static void *tcp_seek_last_pos(struct seq_file *seq)
2223 struct tcp_iter_state *st = seq->private;
2224 int offset = st->offset;
2225 int orig_num = st->num;
2228 switch (st->state) {
2229 case TCP_SEQ_STATE_OPENREQ:
2230 case TCP_SEQ_STATE_LISTENING:
2231 if (st->bucket >= INET_LHTABLE_SIZE)
2233 st->state = TCP_SEQ_STATE_LISTENING;
2234 rc = listening_get_next(seq, NULL);
2235 while (offset-- && rc)
2236 rc = listening_get_next(seq, rc);
2241 case TCP_SEQ_STATE_ESTABLISHED:
2242 case TCP_SEQ_STATE_TIME_WAIT:
2243 st->state = TCP_SEQ_STATE_ESTABLISHED;
2244 if (st->bucket > tcp_hashinfo.ehash_mask)
2246 rc = established_get_first(seq);
2247 while (offset-- && rc)
2248 rc = established_get_next(seq, rc);
2256 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2258 struct tcp_iter_state *st = seq->private;
2261 if (*pos && *pos == st->last_pos) {
2262 rc = tcp_seek_last_pos(seq);
2267 st->state = TCP_SEQ_STATE_LISTENING;
2271 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2274 st->last_pos = *pos;
2278 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2280 struct tcp_iter_state *st = seq->private;
2283 if (v == SEQ_START_TOKEN) {
2284 rc = tcp_get_idx(seq, 0);
2288 switch (st->state) {
2289 case TCP_SEQ_STATE_OPENREQ:
2290 case TCP_SEQ_STATE_LISTENING:
2291 rc = listening_get_next(seq, v);
2293 st->state = TCP_SEQ_STATE_ESTABLISHED;
2296 rc = established_get_first(seq);
2299 case TCP_SEQ_STATE_ESTABLISHED:
2300 case TCP_SEQ_STATE_TIME_WAIT:
2301 rc = established_get_next(seq, v);
2306 st->last_pos = *pos;
2310 static void tcp_seq_stop(struct seq_file *seq, void *v)
2312 struct tcp_iter_state *st = seq->private;
2314 switch (st->state) {
2315 case TCP_SEQ_STATE_OPENREQ:
2317 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2318 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2320 case TCP_SEQ_STATE_LISTENING:
2321 if (v != SEQ_START_TOKEN)
2322 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2324 case TCP_SEQ_STATE_TIME_WAIT:
2325 case TCP_SEQ_STATE_ESTABLISHED:
2327 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2332 static int tcp_seq_open(struct inode *inode, struct file *file)
2334 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2335 struct tcp_iter_state *s;
2338 err = seq_open_net(inode, file, &afinfo->seq_ops,
2339 sizeof(struct tcp_iter_state));
2343 s = ((struct seq_file *)file->private_data)->private;
2344 s->family = afinfo->family;
2349 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2352 struct proc_dir_entry *p;
2354 afinfo->seq_fops.open = tcp_seq_open;
2355 afinfo->seq_fops.read = seq_read;
2356 afinfo->seq_fops.llseek = seq_lseek;
2357 afinfo->seq_fops.release = seq_release_net;
2359 afinfo->seq_ops.start = tcp_seq_start;
2360 afinfo->seq_ops.next = tcp_seq_next;
2361 afinfo->seq_ops.stop = tcp_seq_stop;
2363 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2364 &afinfo->seq_fops, afinfo);
2370 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2372 proc_net_remove(net, afinfo->name);
2375 static void get_openreq4(struct sock *sk, struct request_sock *req,
2376 struct seq_file *f, int i, int uid, int *len)
2378 const struct inet_request_sock *ireq = inet_rsk(req);
2379 int ttd = req->expires - jiffies;
2381 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2382 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2385 ntohs(inet_sk(sk)->inet_sport),
2387 ntohs(ireq->rmt_port),
2389 0, 0, /* could print option size, but that is af dependent. */
2390 1, /* timers active (only the expire timer) */
2391 jiffies_to_clock_t(ttd),
2394 0, /* non standard timer */
2395 0, /* open_requests have no inode */
2396 atomic_read(&sk->sk_refcnt),
2401 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2404 unsigned long timer_expires;
2405 struct tcp_sock *tp = tcp_sk(sk);
2406 const struct inet_connection_sock *icsk = inet_csk(sk);
2407 struct inet_sock *inet = inet_sk(sk);
2408 __be32 dest = inet->inet_daddr;
2409 __be32 src = inet->inet_rcv_saddr;
2410 __u16 destp = ntohs(inet->inet_dport);
2411 __u16 srcp = ntohs(inet->inet_sport);
2414 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2416 timer_expires = icsk->icsk_timeout;
2417 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2419 timer_expires = icsk->icsk_timeout;
2420 } else if (timer_pending(&sk->sk_timer)) {
2422 timer_expires = sk->sk_timer.expires;
2425 timer_expires = jiffies;
2428 if (sk->sk_state == TCP_LISTEN)
2429 rx_queue = sk->sk_ack_backlog;
2432 * because we dont lock socket, we might find a transient negative value
2434 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2436 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2437 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2438 i, src, srcp, dest, destp, sk->sk_state,
2439 tp->write_seq - tp->snd_una,
2442 jiffies_to_clock_t(timer_expires - jiffies),
2443 icsk->icsk_retransmits,
2445 icsk->icsk_probes_out,
2447 atomic_read(&sk->sk_refcnt), sk,
2448 jiffies_to_clock_t(icsk->icsk_rto),
2449 jiffies_to_clock_t(icsk->icsk_ack.ato),
2450 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2452 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2456 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2457 struct seq_file *f, int i, int *len)
2461 int ttd = tw->tw_ttd - jiffies;
2466 dest = tw->tw_daddr;
2467 src = tw->tw_rcv_saddr;
2468 destp = ntohs(tw->tw_dport);
2469 srcp = ntohs(tw->tw_sport);
2471 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2472 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2473 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2474 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2475 atomic_read(&tw->tw_refcnt), tw, len);
2480 static int tcp4_seq_show(struct seq_file *seq, void *v)
2482 struct tcp_iter_state *st;
2485 if (v == SEQ_START_TOKEN) {
2486 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2487 " sl local_address rem_address st tx_queue "
2488 "rx_queue tr tm->when retrnsmt uid timeout "
2494 switch (st->state) {
2495 case TCP_SEQ_STATE_LISTENING:
2496 case TCP_SEQ_STATE_ESTABLISHED:
2497 get_tcp4_sock(v, seq, st->num, &len);
2499 case TCP_SEQ_STATE_OPENREQ:
2500 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2502 case TCP_SEQ_STATE_TIME_WAIT:
2503 get_timewait4_sock(v, seq, st->num, &len);
2506 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2511 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2515 .owner = THIS_MODULE,
2518 .show = tcp4_seq_show,
2522 static int __net_init tcp4_proc_init_net(struct net *net)
2524 return tcp_proc_register(net, &tcp4_seq_afinfo);
2527 static void __net_exit tcp4_proc_exit_net(struct net *net)
2529 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2532 static struct pernet_operations tcp4_net_ops = {
2533 .init = tcp4_proc_init_net,
2534 .exit = tcp4_proc_exit_net,
2537 int __init tcp4_proc_init(void)
2539 return register_pernet_subsys(&tcp4_net_ops);
2542 void tcp4_proc_exit(void)
2544 unregister_pernet_subsys(&tcp4_net_ops);
2546 #endif /* CONFIG_PROC_FS */
2548 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2550 struct iphdr *iph = skb_gro_network_header(skb);
2552 switch (skb->ip_summed) {
2553 case CHECKSUM_COMPLETE:
2554 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2556 skb->ip_summed = CHECKSUM_UNNECESSARY;
2562 NAPI_GRO_CB(skb)->flush = 1;
2566 return tcp_gro_receive(head, skb);
2568 EXPORT_SYMBOL(tcp4_gro_receive);
2570 int tcp4_gro_complete(struct sk_buff *skb)
2572 struct iphdr *iph = ip_hdr(skb);
2573 struct tcphdr *th = tcp_hdr(skb);
2575 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2576 iph->saddr, iph->daddr, 0);
2577 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2579 return tcp_gro_complete(skb);
2581 EXPORT_SYMBOL(tcp4_gro_complete);
2583 struct proto tcp_prot = {
2585 .owner = THIS_MODULE,
2587 .connect = tcp_v4_connect,
2588 .disconnect = tcp_disconnect,
2589 .accept = inet_csk_accept,
2591 .init = tcp_v4_init_sock,
2592 .destroy = tcp_v4_destroy_sock,
2593 .shutdown = tcp_shutdown,
2594 .setsockopt = tcp_setsockopt,
2595 .getsockopt = tcp_getsockopt,
2596 .recvmsg = tcp_recvmsg,
2597 .backlog_rcv = tcp_v4_do_rcv,
2599 .unhash = inet_unhash,
2600 .get_port = inet_csk_get_port,
2601 .enter_memory_pressure = tcp_enter_memory_pressure,
2602 .sockets_allocated = &tcp_sockets_allocated,
2603 .orphan_count = &tcp_orphan_count,
2604 .memory_allocated = &tcp_memory_allocated,
2605 .memory_pressure = &tcp_memory_pressure,
2606 .sysctl_mem = sysctl_tcp_mem,
2607 .sysctl_wmem = sysctl_tcp_wmem,
2608 .sysctl_rmem = sysctl_tcp_rmem,
2609 .max_header = MAX_TCP_HEADER,
2610 .obj_size = sizeof(struct tcp_sock),
2611 .slab_flags = SLAB_DESTROY_BY_RCU,
2612 .twsk_prot = &tcp_timewait_sock_ops,
2613 .rsk_prot = &tcp_request_sock_ops,
2614 .h.hashinfo = &tcp_hashinfo,
2615 #ifdef CONFIG_COMPAT
2616 .compat_setsockopt = compat_tcp_setsockopt,
2617 .compat_getsockopt = compat_tcp_getsockopt,
2622 static int __net_init tcp_sk_init(struct net *net)
2624 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2625 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2628 static void __net_exit tcp_sk_exit(struct net *net)
2630 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2633 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2635 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2638 static struct pernet_operations __net_initdata tcp_sk_ops = {
2639 .init = tcp_sk_init,
2640 .exit = tcp_sk_exit,
2641 .exit_batch = tcp_sk_exit_batch,
2644 void __init tcp_v4_init(void)
2646 inet_hashinfo_init(&tcp_hashinfo);
2647 if (register_pernet_subsys(&tcp_sk_ops))
2648 panic("Failed to create the TCP control socket.\n");
2651 EXPORT_SYMBOL(ipv4_specific);
2652 EXPORT_SYMBOL(tcp_hashinfo);
2653 EXPORT_SYMBOL(tcp_prot);
2654 EXPORT_SYMBOL(tcp_v4_conn_request);
2655 EXPORT_SYMBOL(tcp_v4_connect);
2656 EXPORT_SYMBOL(tcp_v4_do_rcv);
2657 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2658 EXPORT_SYMBOL(tcp_v4_send_check);
2659 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2661 #ifdef CONFIG_PROC_FS
2662 EXPORT_SYMBOL(tcp_proc_register);
2663 EXPORT_SYMBOL(tcp_proc_unregister);
2665 EXPORT_SYMBOL(sysctl_tcp_low_latency);