2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
64 #include <net/net_namespace.h>
66 #include <net/inet_hashtables.h>
68 #include <net/transp_v6.h>
70 #include <net/inet_common.h>
71 #include <net/timewait_sock.h>
73 #include <net/netdma.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 #include <linux/crypto.h>
82 #include <linux/scatterlist.h>
84 int sysctl_tcp_tw_reuse __read_mostly;
85 int sysctl_tcp_low_latency __read_mostly;
88 #ifdef CONFIG_TCP_MD5SIG
89 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
95 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
101 struct inet_hashinfo tcp_hashinfo;
103 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
108 tcp_hdr(skb)->source);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
142 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144 /* This will initiate an outgoing connection. */
145 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 __be32 daddr, nexthop;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
165 nexthop = inet->opt->faddr;
168 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 inet->inet_sport, usin->sin_port, sk, 1);
173 if (tmp == -ENETUNREACH)
174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 if (!inet->opt || !inet->opt->srr)
186 if (!inet->inet_saddr)
187 inet->inet_saddr = rt->rt_src;
188 inet->inet_rcv_saddr = inet->inet_saddr;
190 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
197 if (tcp_death_row.sysctl_tw_recycle &&
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
213 inet->inet_dport = usin->sin_port;
214 inet->inet_daddr = daddr;
216 inet_csk(sk)->icsk_ext_hdr_len = 0;
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
220 tp->rx_opt.mss_clamp = 536;
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
227 tcp_set_state(sk, TCP_SYN_SENT);
228 err = inet_hash_connect(&tcp_death_row, sk);
232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->inet_sport, inet->inet_dport, sk);
237 /* OK, now commit destination to socket. */
238 sk->sk_gso_type = SKB_GSO_TCPV4;
239 sk_setup_caps(sk, &rt->u.dst);
242 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_id = tp->write_seq ^ jiffies;
249 err = tcp_connect(sk);
258 * This unhashes the socket and releases the local port,
261 tcp_set_state(sk, TCP_CLOSE);
263 sk->sk_route_caps = 0;
264 inet->inet_dport = 0;
269 * This routine does path mtu discovery as defined in RFC1191.
271 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
280 if (sk->sk_state == TCP_LISTEN)
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
286 * There is a small race when the user changes this flag in the
287 * route, but I think that's acceptable.
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
292 dst->ops->update_pmtu(dst, mtu);
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
331 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335 struct inet_connection_sock *icsk;
337 struct inet_sock *inet;
338 const int type = icmp_hdr(icmp_skb)->type;
339 const int code = icmp_hdr(icmp_skb)->code;
345 struct net *net = dev_net(icmp_skb->dev);
347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
353 iph->saddr, th->source, inet_iif(icmp_skb));
355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
386 case ICMP_PARAMETERPROB:
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
399 err = icmp_err_convert[code].errno;
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
413 skb = tcp_write_queue_head(sk);
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
434 case ICMP_TIME_EXCEEDED:
441 switch (sk->sk_state) {
442 struct request_sock *req, **prev;
444 if (sock_owned_by_user(sk))
447 req = inet_csk_search_req(sk, &prev, th->dest,
448 iph->daddr, iph->saddr);
452 /* ICMPs are not backlogged, hence we cannot get
453 an established socket here.
457 if (seq != tcp_rsk(req)->snt_isn) {
458 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
463 * Still in SYN_RECV, just remove it silently.
464 * There is no good way to pass the error to the newly
465 * created socket, and POSIX does not want network
466 * errors returned from accept().
468 inet_csk_reqsk_queue_drop(sk, req, prev);
472 case TCP_SYN_RECV: /* Cannot happen.
473 It can f.e. if SYNs crossed.
475 if (!sock_owned_by_user(sk)) {
478 sk->sk_error_report(sk);
482 sk->sk_err_soft = err;
487 /* If we've already connected we will keep trying
488 * until we time out, or the user gives up.
490 * rfc1122 4.2.3.9 allows to consider as hard errors
491 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
492 * but it is obsoleted by pmtu discovery).
494 * Note, that in modern internet, where routing is unreliable
495 * and in each dark corner broken firewalls sit, sending random
496 * errors ordered by their masters even this two messages finally lose
497 * their original sense (even Linux sends invalid PORT_UNREACHs)
499 * Now we are in compliance with RFCs.
504 if (!sock_owned_by_user(sk) && inet->recverr) {
506 sk->sk_error_report(sk);
507 } else { /* Only an error on timeout */
508 sk->sk_err_soft = err;
516 /* This routine computes an IPv4 TCP checksum. */
517 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
519 struct inet_sock *inet = inet_sk(sk);
520 struct tcphdr *th = tcp_hdr(skb);
522 if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 th->check = ~tcp_v4_check(len, inet->inet_saddr,
524 inet->inet_daddr, 0);
525 skb->csum_start = skb_transport_header(skb) - skb->head;
526 skb->csum_offset = offsetof(struct tcphdr, check);
528 th->check = tcp_v4_check(len, inet->inet_saddr,
536 int tcp_v4_gso_send_check(struct sk_buff *skb)
538 const struct iphdr *iph;
541 if (!pskb_may_pull(skb, sizeof(*th)))
548 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 skb->ip_summed = CHECKSUM_PARTIAL;
556 * This routine will send an RST to the other tcp.
558 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
560 * Answer: if a packet caused RST, it is not for a socket
561 * existing in our system, if it is matched to a socket,
562 * it is just duplicate segment or bug in other side's TCP.
563 * So that we build reply only basing on parameters
564 * arrived with segment.
565 * Exception: precedence violation. We do not implement it in any case.
568 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
570 struct tcphdr *th = tcp_hdr(skb);
573 #ifdef CONFIG_TCP_MD5SIG
574 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
577 struct ip_reply_arg arg;
578 #ifdef CONFIG_TCP_MD5SIG
579 struct tcp_md5sig_key *key;
583 /* Never send a reset in response to a reset. */
587 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
590 /* Swap the send and the receive. */
591 memset(&rep, 0, sizeof(rep));
592 rep.th.dest = th->source;
593 rep.th.source = th->dest;
594 rep.th.doff = sizeof(struct tcphdr) / 4;
598 rep.th.seq = th->ack_seq;
601 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
602 skb->len - (th->doff << 2));
605 memset(&arg, 0, sizeof(arg));
606 arg.iov[0].iov_base = (unsigned char *)&rep;
607 arg.iov[0].iov_len = sizeof(rep.th);
609 #ifdef CONFIG_TCP_MD5SIG
610 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
612 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
614 (TCPOPT_MD5SIG << 8) |
616 /* Update length and the length the header thinks exists */
617 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
618 rep.th.doff = arg.iov[0].iov_len / 4;
620 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
621 key, ip_hdr(skb)->saddr,
622 ip_hdr(skb)->daddr, &rep.th);
625 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
626 ip_hdr(skb)->saddr, /* XXX */
627 arg.iov[0].iov_len, IPPROTO_TCP, 0);
628 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
629 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
631 net = dev_net(skb_dst(skb)->dev);
632 ip_send_reply(net->ipv4.tcp_sock, skb,
633 &arg, arg.iov[0].iov_len);
635 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
636 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
639 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
640 outside socket context is ugly, certainly. What can I do?
643 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
644 u32 win, u32 ts, int oif,
645 struct tcp_md5sig_key *key,
648 struct tcphdr *th = tcp_hdr(skb);
651 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
652 #ifdef CONFIG_TCP_MD5SIG
653 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
657 struct ip_reply_arg arg;
658 struct net *net = dev_net(skb_dst(skb)->dev);
660 memset(&rep.th, 0, sizeof(struct tcphdr));
661 memset(&arg, 0, sizeof(arg));
663 arg.iov[0].iov_base = (unsigned char *)&rep;
664 arg.iov[0].iov_len = sizeof(rep.th);
666 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
667 (TCPOPT_TIMESTAMP << 8) |
669 rep.opt[1] = htonl(tcp_time_stamp);
670 rep.opt[2] = htonl(ts);
671 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
674 /* Swap the send and the receive. */
675 rep.th.dest = th->source;
676 rep.th.source = th->dest;
677 rep.th.doff = arg.iov[0].iov_len / 4;
678 rep.th.seq = htonl(seq);
679 rep.th.ack_seq = htonl(ack);
681 rep.th.window = htons(win);
683 #ifdef CONFIG_TCP_MD5SIG
685 int offset = (ts) ? 3 : 0;
687 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
689 (TCPOPT_MD5SIG << 8) |
691 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
692 rep.th.doff = arg.iov[0].iov_len/4;
694 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
695 key, ip_hdr(skb)->saddr,
696 ip_hdr(skb)->daddr, &rep.th);
699 arg.flags = reply_flags;
700 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
701 ip_hdr(skb)->saddr, /* XXX */
702 arg.iov[0].iov_len, IPPROTO_TCP, 0);
703 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
705 arg.bound_dev_if = oif;
707 ip_send_reply(net->ipv4.tcp_sock, skb,
708 &arg, arg.iov[0].iov_len);
710 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
715 struct inet_timewait_sock *tw = inet_twsk(sk);
716 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
718 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
719 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
722 tcp_twsk_md5_key(tcptw),
723 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
729 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
730 struct request_sock *req)
732 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
733 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
736 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
737 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
741 * Send a SYN-ACK after having received a SYN.
742 * This still operates on a request_sock only, not on a big
745 static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
746 struct dst_entry *dst)
748 const struct inet_request_sock *ireq = inet_rsk(req);
750 struct sk_buff * skb;
752 /* First, grab a route. */
753 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
756 skb = tcp_make_synack(sk, dst, req);
759 struct tcphdr *th = tcp_hdr(skb);
761 th->check = tcp_v4_check(skb->len,
764 csum_partial(th, skb->len,
767 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
770 err = net_xmit_eval(err);
777 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
779 return __tcp_v4_send_synack(sk, req, NULL);
783 * IPv4 request_sock destructor.
785 static void tcp_v4_reqsk_destructor(struct request_sock *req)
787 kfree(inet_rsk(req)->opt);
790 #ifdef CONFIG_SYN_COOKIES
791 static void syn_flood_warning(struct sk_buff *skb)
793 static unsigned long warntime;
795 if (time_after(jiffies, (warntime + HZ * 60))) {
798 "possible SYN flooding on port %d. Sending cookies.\n",
799 ntohs(tcp_hdr(skb)->dest));
805 * Save and compile IPv4 options into the request_sock if needed.
807 static struct ip_options *tcp_v4_save_options(struct sock *sk,
810 struct ip_options *opt = &(IPCB(skb)->opt);
811 struct ip_options *dopt = NULL;
813 if (opt && opt->optlen) {
814 int opt_size = optlength(opt);
815 dopt = kmalloc(opt_size, GFP_ATOMIC);
817 if (ip_options_echo(dopt, skb)) {
826 #ifdef CONFIG_TCP_MD5SIG
828 * RFC2385 MD5 checksumming requires a mapping of
829 * IP address->MD5 Key.
830 * We need to maintain these in the sk structure.
833 /* Find the Key structure for an address. */
834 static struct tcp_md5sig_key *
835 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
837 struct tcp_sock *tp = tcp_sk(sk);
840 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
842 for (i = 0; i < tp->md5sig_info->entries4; i++) {
843 if (tp->md5sig_info->keys4[i].addr == addr)
844 return &tp->md5sig_info->keys4[i].base;
849 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
850 struct sock *addr_sk)
852 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
855 EXPORT_SYMBOL(tcp_v4_md5_lookup);
857 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
858 struct request_sock *req)
860 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
863 /* This can be called on a newly created socket, from other files */
864 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
865 u8 *newkey, u8 newkeylen)
867 /* Add Key to the list */
868 struct tcp_md5sig_key *key;
869 struct tcp_sock *tp = tcp_sk(sk);
870 struct tcp4_md5sig_key *keys;
872 key = tcp_v4_md5_do_lookup(sk, addr);
874 /* Pre-existing entry - just update that one. */
877 key->keylen = newkeylen;
879 struct tcp_md5sig_info *md5sig;
881 if (!tp->md5sig_info) {
882 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
884 if (!tp->md5sig_info) {
888 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
890 if (tcp_alloc_md5sig_pool(sk) == NULL) {
894 md5sig = tp->md5sig_info;
896 if (md5sig->alloced4 == md5sig->entries4) {
897 keys = kmalloc((sizeof(*keys) *
898 (md5sig->entries4 + 1)), GFP_ATOMIC);
901 tcp_free_md5sig_pool();
905 if (md5sig->entries4)
906 memcpy(keys, md5sig->keys4,
907 sizeof(*keys) * md5sig->entries4);
909 /* Free old key list, and reference new one */
910 kfree(md5sig->keys4);
911 md5sig->keys4 = keys;
915 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
916 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
917 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
922 EXPORT_SYMBOL(tcp_v4_md5_do_add);
924 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
925 u8 *newkey, u8 newkeylen)
927 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
931 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
933 struct tcp_sock *tp = tcp_sk(sk);
936 for (i = 0; i < tp->md5sig_info->entries4; i++) {
937 if (tp->md5sig_info->keys4[i].addr == addr) {
939 kfree(tp->md5sig_info->keys4[i].base.key);
940 tp->md5sig_info->entries4--;
942 if (tp->md5sig_info->entries4 == 0) {
943 kfree(tp->md5sig_info->keys4);
944 tp->md5sig_info->keys4 = NULL;
945 tp->md5sig_info->alloced4 = 0;
946 } else if (tp->md5sig_info->entries4 != i) {
947 /* Need to do some manipulation */
948 memmove(&tp->md5sig_info->keys4[i],
949 &tp->md5sig_info->keys4[i+1],
950 (tp->md5sig_info->entries4 - i) *
951 sizeof(struct tcp4_md5sig_key));
953 tcp_free_md5sig_pool();
960 EXPORT_SYMBOL(tcp_v4_md5_do_del);
962 static void tcp_v4_clear_md5_list(struct sock *sk)
964 struct tcp_sock *tp = tcp_sk(sk);
966 /* Free each key, then the set of key keys,
967 * the crypto element, and then decrement our
968 * hold on the last resort crypto.
970 if (tp->md5sig_info->entries4) {
972 for (i = 0; i < tp->md5sig_info->entries4; i++)
973 kfree(tp->md5sig_info->keys4[i].base.key);
974 tp->md5sig_info->entries4 = 0;
975 tcp_free_md5sig_pool();
977 if (tp->md5sig_info->keys4) {
978 kfree(tp->md5sig_info->keys4);
979 tp->md5sig_info->keys4 = NULL;
980 tp->md5sig_info->alloced4 = 0;
984 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
987 struct tcp_md5sig cmd;
988 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
991 if (optlen < sizeof(cmd))
994 if (copy_from_user(&cmd, optval, sizeof(cmd)))
997 if (sin->sin_family != AF_INET)
1000 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1001 if (!tcp_sk(sk)->md5sig_info)
1003 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1006 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1009 if (!tcp_sk(sk)->md5sig_info) {
1010 struct tcp_sock *tp = tcp_sk(sk);
1011 struct tcp_md5sig_info *p;
1013 p = kzalloc(sizeof(*p), sk->sk_allocation);
1017 tp->md5sig_info = p;
1018 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1021 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1024 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1025 newkey, cmd.tcpm_keylen);
1028 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1029 __be32 daddr, __be32 saddr, int nbytes)
1031 struct tcp4_pseudohdr *bp;
1032 struct scatterlist sg;
1034 bp = &hp->md5_blk.ip4;
1037 * 1. the TCP pseudo-header (in the order: source IP address,
1038 * destination IP address, zero-padded protocol number, and
1044 bp->protocol = IPPROTO_TCP;
1045 bp->len = cpu_to_be16(nbytes);
1047 sg_init_one(&sg, bp, sizeof(*bp));
1048 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1051 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1052 __be32 daddr, __be32 saddr, struct tcphdr *th)
1054 struct tcp_md5sig_pool *hp;
1055 struct hash_desc *desc;
1057 hp = tcp_get_md5sig_pool();
1059 goto clear_hash_noput;
1060 desc = &hp->md5_desc;
1062 if (crypto_hash_init(desc))
1064 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1066 if (tcp_md5_hash_header(hp, th))
1068 if (tcp_md5_hash_key(hp, key))
1070 if (crypto_hash_final(desc, md5_hash))
1073 tcp_put_md5sig_pool();
1077 tcp_put_md5sig_pool();
1079 memset(md5_hash, 0, 16);
1083 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1084 struct sock *sk, struct request_sock *req,
1085 struct sk_buff *skb)
1087 struct tcp_md5sig_pool *hp;
1088 struct hash_desc *desc;
1089 struct tcphdr *th = tcp_hdr(skb);
1090 __be32 saddr, daddr;
1093 saddr = inet_sk(sk)->inet_saddr;
1094 daddr = inet_sk(sk)->inet_daddr;
1096 saddr = inet_rsk(req)->loc_addr;
1097 daddr = inet_rsk(req)->rmt_addr;
1099 const struct iphdr *iph = ip_hdr(skb);
1104 hp = tcp_get_md5sig_pool();
1106 goto clear_hash_noput;
1107 desc = &hp->md5_desc;
1109 if (crypto_hash_init(desc))
1112 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1114 if (tcp_md5_hash_header(hp, th))
1116 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1118 if (tcp_md5_hash_key(hp, key))
1120 if (crypto_hash_final(desc, md5_hash))
1123 tcp_put_md5sig_pool();
1127 tcp_put_md5sig_pool();
1129 memset(md5_hash, 0, 16);
1133 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1135 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1138 * This gets called for each TCP segment that arrives
1139 * so we want to be efficient.
1140 * We have 3 drop cases:
1141 * o No MD5 hash and one expected.
1142 * o MD5 hash and we're not expecting one.
1143 * o MD5 hash and its wrong.
1145 __u8 *hash_location = NULL;
1146 struct tcp_md5sig_key *hash_expected;
1147 const struct iphdr *iph = ip_hdr(skb);
1148 struct tcphdr *th = tcp_hdr(skb);
1150 unsigned char newhash[16];
1152 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1153 hash_location = tcp_parse_md5sig_option(th);
1155 /* We've parsed the options - do we have a hash? */
1156 if (!hash_expected && !hash_location)
1159 if (hash_expected && !hash_location) {
1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1164 if (!hash_expected && hash_location) {
1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1169 /* Okay, so this is hash_expected and hash_location -
1170 * so we need to calculate the checksum.
1172 genhash = tcp_v4_md5_hash_skb(newhash,
1176 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1177 if (net_ratelimit()) {
1178 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1179 &iph->saddr, ntohs(th->source),
1180 &iph->daddr, ntohs(th->dest),
1181 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1190 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1192 .obj_size = sizeof(struct tcp_request_sock),
1193 .rtx_syn_ack = tcp_v4_send_synack,
1194 .send_ack = tcp_v4_reqsk_send_ack,
1195 .destructor = tcp_v4_reqsk_destructor,
1196 .send_reset = tcp_v4_send_reset,
1199 #ifdef CONFIG_TCP_MD5SIG
1200 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1201 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1202 .calc_md5_hash = tcp_v4_md5_hash_skb,
1206 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1207 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1208 .twsk_unique = tcp_twsk_unique,
1209 .twsk_destructor= tcp_twsk_destructor,
1212 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1214 struct inet_request_sock *ireq;
1215 struct tcp_options_received tmp_opt;
1216 struct request_sock *req;
1217 __be32 saddr = ip_hdr(skb)->saddr;
1218 __be32 daddr = ip_hdr(skb)->daddr;
1219 __u32 isn = TCP_SKB_CB(skb)->when;
1220 struct dst_entry *dst = NULL;
1221 #ifdef CONFIG_SYN_COOKIES
1222 int want_cookie = 0;
1224 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1227 /* Never answer to SYNs send to broadcast or multicast */
1228 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1231 /* TW buckets are converted to open requests without
1232 * limitations, they conserve resources and peer is
1233 * evidently real one.
1235 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1236 #ifdef CONFIG_SYN_COOKIES
1237 if (sysctl_tcp_syncookies) {
1244 /* Accept backlog is full. If we have already queued enough
1245 * of warm entries in syn queue, drop request. It is better than
1246 * clogging syn queue with openreqs with exponentially increasing
1249 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1252 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1256 #ifdef CONFIG_TCP_MD5SIG
1257 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1260 tcp_clear_options(&tmp_opt);
1261 tmp_opt.mss_clamp = 536;
1262 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1264 tcp_parse_options(skb, &tmp_opt, 0);
1266 if (want_cookie && !tmp_opt.saw_tstamp)
1267 tcp_clear_options(&tmp_opt);
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1271 tcp_openreq_init(req, &tmp_opt, skb);
1273 ireq = inet_rsk(req);
1274 ireq->loc_addr = daddr;
1275 ireq->rmt_addr = saddr;
1276 ireq->no_srccheck = inet_sk(sk)->transparent;
1277 ireq->opt = tcp_v4_save_options(sk, skb);
1279 if (security_inet_conn_request(sk, skb, req))
1283 TCP_ECN_create_request(req, tcp_hdr(skb));
1286 #ifdef CONFIG_SYN_COOKIES
1287 syn_flood_warning(skb);
1288 req->cookie_ts = tmp_opt.tstamp_ok;
1290 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1292 struct inet_peer *peer = NULL;
1294 /* VJ's idea. We save last timestamp seen
1295 * from the destination in peer table, when entering
1296 * state TIME-WAIT, and check against it before
1297 * accepting new connection request.
1299 * If "isn" is not zero, this request hit alive
1300 * timewait bucket, so that all the necessary checks
1301 * are made in the function processing timewait state.
1303 if (tmp_opt.saw_tstamp &&
1304 tcp_death_row.sysctl_tw_recycle &&
1305 (dst = inet_csk_route_req(sk, req)) != NULL &&
1306 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1307 peer->v4daddr == saddr) {
1308 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1309 (s32)(peer->tcp_ts - req->ts_recent) >
1311 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1312 goto drop_and_release;
1315 /* Kill the following clause, if you dislike this way. */
1316 else if (!sysctl_tcp_syncookies &&
1317 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1318 (sysctl_max_syn_backlog >> 2)) &&
1319 (!peer || !peer->tcp_ts_stamp) &&
1320 (!dst || !dst_metric(dst, RTAX_RTT))) {
1321 /* Without syncookies last quarter of
1322 * backlog is filled with destinations,
1323 * proven to be alive.
1324 * It means that we continue to communicate
1325 * to destinations, already remembered
1326 * to the moment of synflood.
1328 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1329 &saddr, ntohs(tcp_hdr(skb)->source));
1330 goto drop_and_release;
1333 isn = tcp_v4_init_sequence(skb);
1335 tcp_rsk(req)->snt_isn = isn;
1337 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1340 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1353 * The three way handshake has completed - we got a valid synack -
1354 * now create the new socket.
1356 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1357 struct request_sock *req,
1358 struct dst_entry *dst)
1360 struct inet_request_sock *ireq;
1361 struct inet_sock *newinet;
1362 struct tcp_sock *newtp;
1364 #ifdef CONFIG_TCP_MD5SIG
1365 struct tcp_md5sig_key *key;
1368 if (sk_acceptq_is_full(sk))
1371 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1374 newsk = tcp_create_openreq_child(sk, req, skb);
1378 newsk->sk_gso_type = SKB_GSO_TCPV4;
1379 sk_setup_caps(newsk, dst);
1381 newtp = tcp_sk(newsk);
1382 newinet = inet_sk(newsk);
1383 ireq = inet_rsk(req);
1384 newinet->inet_daddr = ireq->rmt_addr;
1385 newinet->inet_rcv_saddr = ireq->loc_addr;
1386 newinet->inet_saddr = ireq->loc_addr;
1387 newinet->opt = ireq->opt;
1389 newinet->mc_index = inet_iif(skb);
1390 newinet->mc_ttl = ip_hdr(skb)->ttl;
1391 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1393 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1394 newinet->inet_id = newtp->write_seq ^ jiffies;
1396 tcp_mtup_init(newsk);
1397 tcp_sync_mss(newsk, dst_mtu(dst));
1398 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1399 if (tcp_sk(sk)->rx_opt.user_mss &&
1400 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1401 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1403 tcp_initialize_rcv_mss(newsk);
1405 #ifdef CONFIG_TCP_MD5SIG
1406 /* Copy over the MD5 key from the original socket */
1407 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1410 * We're using one, so create a matching key
1411 * on the newsk structure. If we fail to get
1412 * memory, then we end up not copying the key
1415 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1417 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1418 newkey, key->keylen);
1419 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1423 __inet_hash_nolisten(newsk);
1424 __inet_inherit_port(sk, newsk);
1429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1431 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1436 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1438 struct tcphdr *th = tcp_hdr(skb);
1439 const struct iphdr *iph = ip_hdr(skb);
1441 struct request_sock **prev;
1442 /* Find possible connection requests. */
1443 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1444 iph->saddr, iph->daddr);
1446 return tcp_check_req(sk, skb, req, prev);
1448 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1449 th->source, iph->daddr, th->dest, inet_iif(skb));
1452 if (nsk->sk_state != TCP_TIME_WAIT) {
1456 inet_twsk_put(inet_twsk(nsk));
1460 #ifdef CONFIG_SYN_COOKIES
1461 if (!th->rst && !th->syn && th->ack)
1462 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1467 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1469 const struct iphdr *iph = ip_hdr(skb);
1471 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1472 if (!tcp_v4_check(skb->len, iph->saddr,
1473 iph->daddr, skb->csum)) {
1474 skb->ip_summed = CHECKSUM_UNNECESSARY;
1479 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1480 skb->len, IPPROTO_TCP, 0);
1482 if (skb->len <= 76) {
1483 return __skb_checksum_complete(skb);
1489 /* The socket must have it's spinlock held when we get
1492 * We have a potential double-lock case here, so even when
1493 * doing backlog processing we use the BH locking scheme.
1494 * This is because we cannot sleep with the original spinlock
1497 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1500 #ifdef CONFIG_TCP_MD5SIG
1502 * We really want to reject the packet as early as possible
1504 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1505 * o There is an MD5 option and we're not expecting one
1507 if (tcp_v4_inbound_md5_hash(sk, skb))
1511 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1512 TCP_CHECK_TIMER(sk);
1513 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1517 TCP_CHECK_TIMER(sk);
1521 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1524 if (sk->sk_state == TCP_LISTEN) {
1525 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1530 if (tcp_child_process(sk, nsk, skb)) {
1538 TCP_CHECK_TIMER(sk);
1539 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1543 TCP_CHECK_TIMER(sk);
1547 tcp_v4_send_reset(rsk, skb);
1550 /* Be careful here. If this function gets more complicated and
1551 * gcc suffers from register pressure on the x86, sk (in %ebx)
1552 * might be destroyed here. This current version compiles correctly,
1553 * but you have been warned.
1558 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1566 int tcp_v4_rcv(struct sk_buff *skb)
1568 const struct iphdr *iph;
1572 struct net *net = dev_net(skb->dev);
1574 if (skb->pkt_type != PACKET_HOST)
1577 /* Count it even if it's bad */
1578 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1580 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1585 if (th->doff < sizeof(struct tcphdr) / 4)
1587 if (!pskb_may_pull(skb, th->doff * 4))
1590 /* An explanation is required here, I think.
1591 * Packet length and doff are validated by header prediction,
1592 * provided case of th->doff==0 is eliminated.
1593 * So, we defer the checks. */
1594 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1599 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1600 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1601 skb->len - th->doff * 4);
1602 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1603 TCP_SKB_CB(skb)->when = 0;
1604 TCP_SKB_CB(skb)->flags = iph->tos;
1605 TCP_SKB_CB(skb)->sacked = 0;
1607 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1612 if (sk->sk_state == TCP_TIME_WAIT)
1615 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1616 goto discard_and_relse;
1619 if (sk_filter(sk, skb))
1620 goto discard_and_relse;
1624 bh_lock_sock_nested(sk);
1626 if (!sock_owned_by_user(sk)) {
1627 #ifdef CONFIG_NET_DMA
1628 struct tcp_sock *tp = tcp_sk(sk);
1629 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1630 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1631 if (tp->ucopy.dma_chan)
1632 ret = tcp_v4_do_rcv(sk, skb);
1636 if (!tcp_prequeue(sk, skb))
1637 ret = tcp_v4_do_rcv(sk, skb);
1640 sk_add_backlog(sk, skb);
1648 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1651 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1653 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1655 tcp_v4_send_reset(NULL, skb);
1659 /* Discard frame. */
1668 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1669 inet_twsk_put(inet_twsk(sk));
1673 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1674 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1675 inet_twsk_put(inet_twsk(sk));
1678 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1680 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1682 iph->daddr, th->dest,
1685 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1686 inet_twsk_put(inet_twsk(sk));
1690 /* Fall through to ACK */
1693 tcp_v4_timewait_ack(sk, skb);
1697 case TCP_TW_SUCCESS:;
1702 /* VJ's idea. Save last timestamp seen from this destination
1703 * and hold it at least for normal timewait interval to use for duplicate
1704 * segment detection in subsequent connections, before they enter synchronized
1708 int tcp_v4_remember_stamp(struct sock *sk)
1710 struct inet_sock *inet = inet_sk(sk);
1711 struct tcp_sock *tp = tcp_sk(sk);
1712 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1713 struct inet_peer *peer = NULL;
1716 if (!rt || rt->rt_dst != inet->inet_daddr) {
1717 peer = inet_getpeer(inet->inet_daddr, 1);
1721 rt_bind_peer(rt, 1);
1726 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1727 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1728 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1729 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1730 peer->tcp_ts = tp->rx_opt.ts_recent;
1740 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1742 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1745 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1747 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1748 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1749 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1750 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1751 peer->tcp_ts = tcptw->tw_ts_recent;
1760 const struct inet_connection_sock_af_ops ipv4_specific = {
1761 .queue_xmit = ip_queue_xmit,
1762 .send_check = tcp_v4_send_check,
1763 .rebuild_header = inet_sk_rebuild_header,
1764 .conn_request = tcp_v4_conn_request,
1765 .syn_recv_sock = tcp_v4_syn_recv_sock,
1766 .remember_stamp = tcp_v4_remember_stamp,
1767 .net_header_len = sizeof(struct iphdr),
1768 .setsockopt = ip_setsockopt,
1769 .getsockopt = ip_getsockopt,
1770 .addr2sockaddr = inet_csk_addr2sockaddr,
1771 .sockaddr_len = sizeof(struct sockaddr_in),
1772 .bind_conflict = inet_csk_bind_conflict,
1773 #ifdef CONFIG_COMPAT
1774 .compat_setsockopt = compat_ip_setsockopt,
1775 .compat_getsockopt = compat_ip_getsockopt,
1779 #ifdef CONFIG_TCP_MD5SIG
1780 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1781 .md5_lookup = tcp_v4_md5_lookup,
1782 .calc_md5_hash = tcp_v4_md5_hash_skb,
1783 .md5_add = tcp_v4_md5_add_func,
1784 .md5_parse = tcp_v4_parse_md5_keys,
1788 /* NOTE: A lot of things set to zero explicitly by call to
1789 * sk_alloc() so need not be done here.
1791 static int tcp_v4_init_sock(struct sock *sk)
1793 struct inet_connection_sock *icsk = inet_csk(sk);
1794 struct tcp_sock *tp = tcp_sk(sk);
1796 skb_queue_head_init(&tp->out_of_order_queue);
1797 tcp_init_xmit_timers(sk);
1798 tcp_prequeue_init(tp);
1800 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1801 tp->mdev = TCP_TIMEOUT_INIT;
1803 /* So many TCP implementations out there (incorrectly) count the
1804 * initial SYN frame in their delayed-ACK and congestion control
1805 * algorithms that we must have the following bandaid to talk
1806 * efficiently to them. -DaveM
1810 /* See draft-stevens-tcpca-spec-01 for discussion of the
1811 * initialization of these values.
1813 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1814 tp->snd_cwnd_clamp = ~0;
1815 tp->mss_cache = 536;
1817 tp->reordering = sysctl_tcp_reordering;
1818 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1820 sk->sk_state = TCP_CLOSE;
1822 sk->sk_write_space = sk_stream_write_space;
1823 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1825 icsk->icsk_af_ops = &ipv4_specific;
1826 icsk->icsk_sync_mss = tcp_sync_mss;
1827 #ifdef CONFIG_TCP_MD5SIG
1828 tp->af_specific = &tcp_sock_ipv4_specific;
1831 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1832 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1835 percpu_counter_inc(&tcp_sockets_allocated);
1841 void tcp_v4_destroy_sock(struct sock *sk)
1843 struct tcp_sock *tp = tcp_sk(sk);
1845 tcp_clear_xmit_timers(sk);
1847 tcp_cleanup_congestion_control(sk);
1849 /* Cleanup up the write buffer. */
1850 tcp_write_queue_purge(sk);
1852 /* Cleans up our, hopefully empty, out_of_order_queue. */
1853 __skb_queue_purge(&tp->out_of_order_queue);
1855 #ifdef CONFIG_TCP_MD5SIG
1856 /* Clean up the MD5 key list, if any */
1857 if (tp->md5sig_info) {
1858 tcp_v4_clear_md5_list(sk);
1859 kfree(tp->md5sig_info);
1860 tp->md5sig_info = NULL;
1864 #ifdef CONFIG_NET_DMA
1865 /* Cleans up our sk_async_wait_queue */
1866 __skb_queue_purge(&sk->sk_async_wait_queue);
1869 /* Clean prequeue, it must be empty really */
1870 __skb_queue_purge(&tp->ucopy.prequeue);
1872 /* Clean up a referenced TCP bind bucket. */
1873 if (inet_csk(sk)->icsk_bind_hash)
1877 * If sendmsg cached page exists, toss it.
1879 if (sk->sk_sndmsg_page) {
1880 __free_page(sk->sk_sndmsg_page);
1881 sk->sk_sndmsg_page = NULL;
1884 percpu_counter_dec(&tcp_sockets_allocated);
1887 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1889 #ifdef CONFIG_PROC_FS
1890 /* Proc filesystem TCP sock list dumping. */
1892 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1894 return hlist_nulls_empty(head) ? NULL :
1895 list_entry(head->first, struct inet_timewait_sock, tw_node);
1898 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1900 return !is_a_nulls(tw->tw_node.next) ?
1901 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1904 static void *listening_get_next(struct seq_file *seq, void *cur)
1906 struct inet_connection_sock *icsk;
1907 struct hlist_nulls_node *node;
1908 struct sock *sk = cur;
1909 struct inet_listen_hashbucket *ilb;
1910 struct tcp_iter_state *st = seq->private;
1911 struct net *net = seq_file_net(seq);
1915 ilb = &tcp_hashinfo.listening_hash[0];
1916 spin_lock_bh(&ilb->lock);
1917 sk = sk_nulls_head(&ilb->head);
1920 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1923 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1924 struct request_sock *req = cur;
1926 icsk = inet_csk(st->syn_wait_sk);
1930 if (req->rsk_ops->family == st->family) {
1936 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1939 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1941 sk = sk_next(st->syn_wait_sk);
1942 st->state = TCP_SEQ_STATE_LISTENING;
1943 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1945 icsk = inet_csk(sk);
1946 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1947 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1949 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1953 sk_nulls_for_each_from(sk, node) {
1954 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1958 icsk = inet_csk(sk);
1959 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1960 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1962 st->uid = sock_i_uid(sk);
1963 st->syn_wait_sk = sk;
1964 st->state = TCP_SEQ_STATE_OPENREQ;
1968 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1970 spin_unlock_bh(&ilb->lock);
1971 if (++st->bucket < INET_LHTABLE_SIZE) {
1972 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1973 spin_lock_bh(&ilb->lock);
1974 sk = sk_nulls_head(&ilb->head);
1982 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1984 void *rc = listening_get_next(seq, NULL);
1986 while (rc && *pos) {
1987 rc = listening_get_next(seq, rc);
1993 static inline int empty_bucket(struct tcp_iter_state *st)
1995 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1996 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1999 static void *established_get_first(struct seq_file *seq)
2001 struct tcp_iter_state *st = seq->private;
2002 struct net *net = seq_file_net(seq);
2005 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2007 struct hlist_nulls_node *node;
2008 struct inet_timewait_sock *tw;
2009 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2011 /* Lockless fast path for the common case of empty buckets */
2012 if (empty_bucket(st))
2016 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2017 if (sk->sk_family != st->family ||
2018 !net_eq(sock_net(sk), net)) {
2024 st->state = TCP_SEQ_STATE_TIME_WAIT;
2025 inet_twsk_for_each(tw, node,
2026 &tcp_hashinfo.ehash[st->bucket].twchain) {
2027 if (tw->tw_family != st->family ||
2028 !net_eq(twsk_net(tw), net)) {
2034 spin_unlock_bh(lock);
2035 st->state = TCP_SEQ_STATE_ESTABLISHED;
2041 static void *established_get_next(struct seq_file *seq, void *cur)
2043 struct sock *sk = cur;
2044 struct inet_timewait_sock *tw;
2045 struct hlist_nulls_node *node;
2046 struct tcp_iter_state *st = seq->private;
2047 struct net *net = seq_file_net(seq);
2051 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2055 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2062 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2063 st->state = TCP_SEQ_STATE_ESTABLISHED;
2065 /* Look for next non empty bucket */
2066 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2069 if (st->bucket > tcp_hashinfo.ehash_mask)
2072 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2073 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2075 sk = sk_nulls_next(sk);
2077 sk_nulls_for_each_from(sk, node) {
2078 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2082 st->state = TCP_SEQ_STATE_TIME_WAIT;
2083 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2091 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2093 void *rc = established_get_first(seq);
2096 rc = established_get_next(seq, rc);
2102 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2105 struct tcp_iter_state *st = seq->private;
2107 st->state = TCP_SEQ_STATE_LISTENING;
2108 rc = listening_get_idx(seq, &pos);
2111 st->state = TCP_SEQ_STATE_ESTABLISHED;
2112 rc = established_get_idx(seq, pos);
2118 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2120 struct tcp_iter_state *st = seq->private;
2121 st->state = TCP_SEQ_STATE_LISTENING;
2123 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2126 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2129 struct tcp_iter_state *st;
2131 if (v == SEQ_START_TOKEN) {
2132 rc = tcp_get_idx(seq, 0);
2137 switch (st->state) {
2138 case TCP_SEQ_STATE_OPENREQ:
2139 case TCP_SEQ_STATE_LISTENING:
2140 rc = listening_get_next(seq, v);
2142 st->state = TCP_SEQ_STATE_ESTABLISHED;
2143 rc = established_get_first(seq);
2146 case TCP_SEQ_STATE_ESTABLISHED:
2147 case TCP_SEQ_STATE_TIME_WAIT:
2148 rc = established_get_next(seq, v);
2156 static void tcp_seq_stop(struct seq_file *seq, void *v)
2158 struct tcp_iter_state *st = seq->private;
2160 switch (st->state) {
2161 case TCP_SEQ_STATE_OPENREQ:
2163 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2164 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2166 case TCP_SEQ_STATE_LISTENING:
2167 if (v != SEQ_START_TOKEN)
2168 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2170 case TCP_SEQ_STATE_TIME_WAIT:
2171 case TCP_SEQ_STATE_ESTABLISHED:
2173 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2178 static int tcp_seq_open(struct inode *inode, struct file *file)
2180 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2181 struct tcp_iter_state *s;
2184 err = seq_open_net(inode, file, &afinfo->seq_ops,
2185 sizeof(struct tcp_iter_state));
2189 s = ((struct seq_file *)file->private_data)->private;
2190 s->family = afinfo->family;
2194 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2197 struct proc_dir_entry *p;
2199 afinfo->seq_fops.open = tcp_seq_open;
2200 afinfo->seq_fops.read = seq_read;
2201 afinfo->seq_fops.llseek = seq_lseek;
2202 afinfo->seq_fops.release = seq_release_net;
2204 afinfo->seq_ops.start = tcp_seq_start;
2205 afinfo->seq_ops.next = tcp_seq_next;
2206 afinfo->seq_ops.stop = tcp_seq_stop;
2208 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2209 &afinfo->seq_fops, afinfo);
2215 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2217 proc_net_remove(net, afinfo->name);
2220 static void get_openreq4(struct sock *sk, struct request_sock *req,
2221 struct seq_file *f, int i, int uid, int *len)
2223 const struct inet_request_sock *ireq = inet_rsk(req);
2224 int ttd = req->expires - jiffies;
2226 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2227 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2230 ntohs(inet_sk(sk)->inet_sport),
2232 ntohs(ireq->rmt_port),
2234 0, 0, /* could print option size, but that is af dependent. */
2235 1, /* timers active (only the expire timer) */
2236 jiffies_to_clock_t(ttd),
2239 0, /* non standard timer */
2240 0, /* open_requests have no inode */
2241 atomic_read(&sk->sk_refcnt),
2246 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2249 unsigned long timer_expires;
2250 struct tcp_sock *tp = tcp_sk(sk);
2251 const struct inet_connection_sock *icsk = inet_csk(sk);
2252 struct inet_sock *inet = inet_sk(sk);
2253 __be32 dest = inet->inet_daddr;
2254 __be32 src = inet->inet_rcv_saddr;
2255 __u16 destp = ntohs(inet->inet_dport);
2256 __u16 srcp = ntohs(inet->inet_sport);
2258 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2260 timer_expires = icsk->icsk_timeout;
2261 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2263 timer_expires = icsk->icsk_timeout;
2264 } else if (timer_pending(&sk->sk_timer)) {
2266 timer_expires = sk->sk_timer.expires;
2269 timer_expires = jiffies;
2272 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2273 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2274 i, src, srcp, dest, destp, sk->sk_state,
2275 tp->write_seq - tp->snd_una,
2276 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2277 (tp->rcv_nxt - tp->copied_seq),
2279 jiffies_to_clock_t(timer_expires - jiffies),
2280 icsk->icsk_retransmits,
2282 icsk->icsk_probes_out,
2284 atomic_read(&sk->sk_refcnt), sk,
2285 jiffies_to_clock_t(icsk->icsk_rto),
2286 jiffies_to_clock_t(icsk->icsk_ack.ato),
2287 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2289 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2293 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2294 struct seq_file *f, int i, int *len)
2298 int ttd = tw->tw_ttd - jiffies;
2303 dest = tw->tw_daddr;
2304 src = tw->tw_rcv_saddr;
2305 destp = ntohs(tw->tw_dport);
2306 srcp = ntohs(tw->tw_sport);
2308 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2309 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2310 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2311 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2312 atomic_read(&tw->tw_refcnt), tw, len);
2317 static int tcp4_seq_show(struct seq_file *seq, void *v)
2319 struct tcp_iter_state *st;
2322 if (v == SEQ_START_TOKEN) {
2323 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2324 " sl local_address rem_address st tx_queue "
2325 "rx_queue tr tm->when retrnsmt uid timeout "
2331 switch (st->state) {
2332 case TCP_SEQ_STATE_LISTENING:
2333 case TCP_SEQ_STATE_ESTABLISHED:
2334 get_tcp4_sock(v, seq, st->num, &len);
2336 case TCP_SEQ_STATE_OPENREQ:
2337 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2339 case TCP_SEQ_STATE_TIME_WAIT:
2340 get_timewait4_sock(v, seq, st->num, &len);
2343 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2348 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2352 .owner = THIS_MODULE,
2355 .show = tcp4_seq_show,
2359 static int tcp4_proc_init_net(struct net *net)
2361 return tcp_proc_register(net, &tcp4_seq_afinfo);
2364 static void tcp4_proc_exit_net(struct net *net)
2366 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2369 static struct pernet_operations tcp4_net_ops = {
2370 .init = tcp4_proc_init_net,
2371 .exit = tcp4_proc_exit_net,
2374 int __init tcp4_proc_init(void)
2376 return register_pernet_subsys(&tcp4_net_ops);
2379 void tcp4_proc_exit(void)
2381 unregister_pernet_subsys(&tcp4_net_ops);
2383 #endif /* CONFIG_PROC_FS */
2385 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2387 struct iphdr *iph = skb_gro_network_header(skb);
2389 switch (skb->ip_summed) {
2390 case CHECKSUM_COMPLETE:
2391 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2393 skb->ip_summed = CHECKSUM_UNNECESSARY;
2399 NAPI_GRO_CB(skb)->flush = 1;
2403 return tcp_gro_receive(head, skb);
2405 EXPORT_SYMBOL(tcp4_gro_receive);
2407 int tcp4_gro_complete(struct sk_buff *skb)
2409 struct iphdr *iph = ip_hdr(skb);
2410 struct tcphdr *th = tcp_hdr(skb);
2412 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2413 iph->saddr, iph->daddr, 0);
2414 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2416 return tcp_gro_complete(skb);
2418 EXPORT_SYMBOL(tcp4_gro_complete);
2420 struct proto tcp_prot = {
2422 .owner = THIS_MODULE,
2424 .connect = tcp_v4_connect,
2425 .disconnect = tcp_disconnect,
2426 .accept = inet_csk_accept,
2428 .init = tcp_v4_init_sock,
2429 .destroy = tcp_v4_destroy_sock,
2430 .shutdown = tcp_shutdown,
2431 .setsockopt = tcp_setsockopt,
2432 .getsockopt = tcp_getsockopt,
2433 .recvmsg = tcp_recvmsg,
2434 .backlog_rcv = tcp_v4_do_rcv,
2436 .unhash = inet_unhash,
2437 .get_port = inet_csk_get_port,
2438 .enter_memory_pressure = tcp_enter_memory_pressure,
2439 .sockets_allocated = &tcp_sockets_allocated,
2440 .orphan_count = &tcp_orphan_count,
2441 .memory_allocated = &tcp_memory_allocated,
2442 .memory_pressure = &tcp_memory_pressure,
2443 .sysctl_mem = sysctl_tcp_mem,
2444 .sysctl_wmem = sysctl_tcp_wmem,
2445 .sysctl_rmem = sysctl_tcp_rmem,
2446 .max_header = MAX_TCP_HEADER,
2447 .obj_size = sizeof(struct tcp_sock),
2448 .slab_flags = SLAB_DESTROY_BY_RCU,
2449 .twsk_prot = &tcp_timewait_sock_ops,
2450 .rsk_prot = &tcp_request_sock_ops,
2451 .h.hashinfo = &tcp_hashinfo,
2452 #ifdef CONFIG_COMPAT
2453 .compat_setsockopt = compat_tcp_setsockopt,
2454 .compat_getsockopt = compat_tcp_getsockopt,
2459 static int __net_init tcp_sk_init(struct net *net)
2461 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2462 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2465 static void __net_exit tcp_sk_exit(struct net *net)
2467 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2468 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2471 static struct pernet_operations __net_initdata tcp_sk_ops = {
2472 .init = tcp_sk_init,
2473 .exit = tcp_sk_exit,
2476 void __init tcp_v4_init(void)
2478 inet_hashinfo_init(&tcp_hashinfo);
2479 if (register_pernet_subsys(&tcp_sk_ops))
2480 panic("Failed to create the TCP control socket.\n");
2483 EXPORT_SYMBOL(ipv4_specific);
2484 EXPORT_SYMBOL(tcp_hashinfo);
2485 EXPORT_SYMBOL(tcp_prot);
2486 EXPORT_SYMBOL(tcp_v4_conn_request);
2487 EXPORT_SYMBOL(tcp_v4_connect);
2488 EXPORT_SYMBOL(tcp_v4_do_rcv);
2489 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2490 EXPORT_SYMBOL(tcp_v4_send_check);
2491 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2493 #ifdef CONFIG_PROC_FS
2494 EXPORT_SYMBOL(tcp_proc_register);
2495 EXPORT_SYMBOL(tcp_proc_unregister);
2497 EXPORT_SYMBOL(sysctl_tcp_low_latency);