3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static void __tcp_v6_send_check(struct sk_buff *skb,
80 struct in6_addr *saddr,
81 struct in6_addr *daddr);
83 static const struct inet_connection_sock_af_ops ipv6_mapped;
84 static const struct inet_connection_sock_af_ops ipv6_specific;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
90 struct in6_addr *addr)
96 static void tcp_v6_hash(struct sock *sk)
98 if (sk->sk_state != TCP_CLOSE) {
99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
104 __inet6_hash(sk, NULL);
109 static __inline__ __sum16 tcp_v6_check(int len,
110 struct in6_addr *saddr,
111 struct in6_addr *daddr,
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
117 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->source);
125 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
129 struct inet_sock *inet = inet_sk(sk);
130 struct inet_connection_sock *icsk = inet_csk(sk);
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
133 struct in6_addr *saddr = NULL, *final_p, final;
136 struct dst_entry *dst;
140 if (addr_len < SIN6_LEN_RFC2133)
143 if (usin->sin6_family != AF_INET6)
144 return -EAFNOSUPPORT;
146 memset(&fl, 0, sizeof(fl));
149 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl.fl6_flowlabel);
151 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
152 struct ip6_flowlabel *flowlabel;
153 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
154 if (flowlabel == NULL)
156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 fl6_sock_release(flowlabel);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
170 if(addr_type & IPV6_ADDR_MULTICAST)
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
199 np->flow_label = fl.fl6_flowlabel;
205 if (addr_type == IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk))
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
246 fl.proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
248 ipv6_addr_copy(&fl.fl6_src,
249 (saddr ? saddr : &np->saddr));
250 fl.oif = sk->sk_bound_dev_if;
251 fl.mark = sk->sk_mark;
252 fl.fl_ip_dport = usin->sin6_port;
253 fl.fl_ip_sport = inet->inet_sport;
255 final_p = fl6_update_dst(&fl, np->opt, &final);
257 security_sk_classify_flow(sk, &fl);
259 err = ip6_dst_lookup(sk, &dst, &fl);
263 ipv6_addr_copy(&fl.fl6_dst, final_p);
265 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
268 err = ip6_dst_blackhole(sk, &dst, &fl);
275 ipv6_addr_copy(&np->rcv_saddr, saddr);
278 /* set the source address */
279 ipv6_addr_copy(&np->saddr, saddr);
280 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
282 sk->sk_gso_type = SKB_GSO_TCPV6;
283 __ip6_dst_store(sk, dst, NULL, NULL);
285 rt = (struct rt6_info *) dst;
286 if (tcp_death_row.sysctl_tw_recycle &&
287 !tp->rx_opt.ts_recent_stamp &&
288 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
289 struct inet_peer *peer = rt6_get_peer(rt);
291 * VJ's idea. We save last timestamp seen from
292 * the destination in peer table, when entering state
293 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
294 * when trying new connection.
297 inet_peer_refcheck(peer);
298 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
299 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
300 tp->rx_opt.ts_recent = peer->tcp_ts;
305 icsk->icsk_ext_hdr_len = 0;
307 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
310 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
312 inet->inet_dport = usin->sin6_port;
314 tcp_set_state(sk, TCP_SYN_SENT);
315 err = inet6_hash_connect(&tcp_death_row, sk);
320 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
325 err = tcp_connect(sk);
332 tcp_set_state(sk, TCP_CLOSE);
335 inet->inet_dport = 0;
336 sk->sk_route_caps = 0;
340 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
341 u8 type, u8 code, int offset, __be32 info)
343 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
344 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
345 struct ipv6_pinfo *np;
350 struct net *net = dev_net(skb->dev);
352 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
353 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
356 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
361 if (sk->sk_state == TCP_TIME_WAIT) {
362 inet_twsk_put(inet_twsk(sk));
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
373 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
379 seq = ntohl(th->seq);
380 if (sk->sk_state != TCP_LISTEN &&
381 !between(seq, tp->snd_una, tp->snd_nxt)) {
382 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
388 if (type == ICMPV6_PKT_TOOBIG) {
389 struct dst_entry *dst = NULL;
391 if (sock_owned_by_user(sk))
393 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
396 /* icmp should have updated the destination cache entry */
397 dst = __sk_dst_check(sk, np->dst_cookie);
400 struct inet_sock *inet = inet_sk(sk);
403 /* BUGGG_FUTURE: Again, it is not clear how
404 to handle rthdr case. Ignore this complexity
407 memset(&fl, 0, sizeof(fl));
408 fl.proto = IPPROTO_TCP;
409 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
410 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
411 fl.oif = sk->sk_bound_dev_if;
412 fl.mark = sk->sk_mark;
413 fl.fl_ip_dport = inet->inet_dport;
414 fl.fl_ip_sport = inet->inet_sport;
415 security_skb_classify_flow(skb, &fl);
417 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
418 sk->sk_err_soft = -err;
422 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
423 sk->sk_err_soft = -err;
430 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
431 tcp_sync_mss(sk, dst_mtu(dst));
432 tcp_simple_retransmit(sk);
433 } /* else let the usual retransmit timer handle it */
438 icmpv6_err_convert(type, code, &err);
440 /* Might be for an request_sock */
441 switch (sk->sk_state) {
442 struct request_sock *req, **prev;
444 if (sock_owned_by_user(sk))
447 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
448 &hdr->saddr, inet6_iif(skb));
452 /* ICMPs are not backlogged, hence we cannot get
453 * an established socket here.
455 WARN_ON(req->sk != NULL);
457 if (seq != tcp_rsk(req)->snt_isn) {
458 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
462 inet_csk_reqsk_queue_drop(sk, req, prev);
466 case TCP_SYN_RECV: /* Cannot happen.
467 It can, it SYNs are crossed. --ANK */
468 if (!sock_owned_by_user(sk)) {
470 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
474 sk->sk_err_soft = err;
478 if (!sock_owned_by_user(sk) && np->recverr) {
480 sk->sk_error_report(sk);
482 sk->sk_err_soft = err;
490 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
491 struct request_values *rvp)
493 struct inet6_request_sock *treq = inet6_rsk(req);
494 struct ipv6_pinfo *np = inet6_sk(sk);
495 struct sk_buff * skb;
496 struct ipv6_txoptions *opt = NULL;
497 struct in6_addr * final_p, final;
499 struct dst_entry *dst;
502 memset(&fl, 0, sizeof(fl));
503 fl.proto = IPPROTO_TCP;
504 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
505 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
506 fl.fl6_flowlabel = 0;
508 fl.mark = sk->sk_mark;
509 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
510 fl.fl_ip_sport = inet_rsk(req)->loc_port;
511 security_req_classify_flow(req, &fl);
514 final_p = fl6_update_dst(&fl, opt, &final);
516 err = ip6_dst_lookup(sk, &dst, &fl);
520 ipv6_addr_copy(&fl.fl6_dst, final_p);
521 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
524 skb = tcp_make_synack(sk, dst, req, rvp);
526 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
528 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
529 err = ip6_xmit(sk, skb, &fl, opt);
530 err = net_xmit_eval(err);
534 if (opt && opt != np->opt)
535 sock_kfree_s(sk, opt, opt->tot_len);
540 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
541 struct request_values *rvp)
543 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
544 return tcp_v6_send_synack(sk, req, rvp);
547 static inline void syn_flood_warning(struct sk_buff *skb)
549 #ifdef CONFIG_SYN_COOKIES
550 if (sysctl_tcp_syncookies)
552 "TCPv6: Possible SYN flooding on port %d. "
553 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
557 "TCPv6: Possible SYN flooding on port %d. "
558 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
561 static void tcp_v6_reqsk_destructor(struct request_sock *req)
563 kfree_skb(inet6_rsk(req)->pktopts);
566 #ifdef CONFIG_TCP_MD5SIG
567 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
568 struct in6_addr *addr)
570 struct tcp_sock *tp = tcp_sk(sk);
575 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
578 for (i = 0; i < tp->md5sig_info->entries6; i++) {
579 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
580 return &tp->md5sig_info->keys6[i].base;
585 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
586 struct sock *addr_sk)
588 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
591 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
592 struct request_sock *req)
594 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
597 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
598 char *newkey, u8 newkeylen)
600 /* Add key to the list */
601 struct tcp_md5sig_key *key;
602 struct tcp_sock *tp = tcp_sk(sk);
603 struct tcp6_md5sig_key *keys;
605 key = tcp_v6_md5_do_lookup(sk, peer);
607 /* modify existing entry - just update that one */
610 key->keylen = newkeylen;
612 /* reallocate new list if current one is full. */
613 if (!tp->md5sig_info) {
614 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
615 if (!tp->md5sig_info) {
619 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
621 if (tcp_alloc_md5sig_pool(sk) == NULL) {
625 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
626 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
627 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
630 tcp_free_md5sig_pool();
635 if (tp->md5sig_info->entries6)
636 memmove(keys, tp->md5sig_info->keys6,
637 (sizeof (tp->md5sig_info->keys6[0]) *
638 tp->md5sig_info->entries6));
640 kfree(tp->md5sig_info->keys6);
641 tp->md5sig_info->keys6 = keys;
642 tp->md5sig_info->alloced6++;
645 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
647 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
648 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
650 tp->md5sig_info->entries6++;
655 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
656 u8 *newkey, __u8 newkeylen)
658 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
662 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
664 struct tcp_sock *tp = tcp_sk(sk);
667 for (i = 0; i < tp->md5sig_info->entries6; i++) {
668 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
670 kfree(tp->md5sig_info->keys6[i].base.key);
671 tp->md5sig_info->entries6--;
673 if (tp->md5sig_info->entries6 == 0) {
674 kfree(tp->md5sig_info->keys6);
675 tp->md5sig_info->keys6 = NULL;
676 tp->md5sig_info->alloced6 = 0;
678 /* shrink the database */
679 if (tp->md5sig_info->entries6 != i)
680 memmove(&tp->md5sig_info->keys6[i],
681 &tp->md5sig_info->keys6[i+1],
682 (tp->md5sig_info->entries6 - i)
683 * sizeof (tp->md5sig_info->keys6[0]));
685 tcp_free_md5sig_pool();
692 static void tcp_v6_clear_md5_list (struct sock *sk)
694 struct tcp_sock *tp = tcp_sk(sk);
697 if (tp->md5sig_info->entries6) {
698 for (i = 0; i < tp->md5sig_info->entries6; i++)
699 kfree(tp->md5sig_info->keys6[i].base.key);
700 tp->md5sig_info->entries6 = 0;
701 tcp_free_md5sig_pool();
704 kfree(tp->md5sig_info->keys6);
705 tp->md5sig_info->keys6 = NULL;
706 tp->md5sig_info->alloced6 = 0;
708 if (tp->md5sig_info->entries4) {
709 for (i = 0; i < tp->md5sig_info->entries4; i++)
710 kfree(tp->md5sig_info->keys4[i].base.key);
711 tp->md5sig_info->entries4 = 0;
712 tcp_free_md5sig_pool();
715 kfree(tp->md5sig_info->keys4);
716 tp->md5sig_info->keys4 = NULL;
717 tp->md5sig_info->alloced4 = 0;
720 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
723 struct tcp_md5sig cmd;
724 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
727 if (optlen < sizeof(cmd))
730 if (copy_from_user(&cmd, optval, sizeof(cmd)))
733 if (sin6->sin6_family != AF_INET6)
736 if (!cmd.tcpm_keylen) {
737 if (!tcp_sk(sk)->md5sig_info)
739 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
740 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
741 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
744 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
747 if (!tcp_sk(sk)->md5sig_info) {
748 struct tcp_sock *tp = tcp_sk(sk);
749 struct tcp_md5sig_info *p;
751 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
756 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
759 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
762 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
763 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
764 newkey, cmd.tcpm_keylen);
766 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
769 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
770 struct in6_addr *daddr,
771 struct in6_addr *saddr, int nbytes)
773 struct tcp6_pseudohdr *bp;
774 struct scatterlist sg;
776 bp = &hp->md5_blk.ip6;
777 /* 1. TCP pseudo-header (RFC2460) */
778 ipv6_addr_copy(&bp->saddr, saddr);
779 ipv6_addr_copy(&bp->daddr, daddr);
780 bp->protocol = cpu_to_be32(IPPROTO_TCP);
781 bp->len = cpu_to_be32(nbytes);
783 sg_init_one(&sg, bp, sizeof(*bp));
784 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
787 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
788 struct in6_addr *daddr, struct in6_addr *saddr,
791 struct tcp_md5sig_pool *hp;
792 struct hash_desc *desc;
794 hp = tcp_get_md5sig_pool();
796 goto clear_hash_noput;
797 desc = &hp->md5_desc;
799 if (crypto_hash_init(desc))
801 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
803 if (tcp_md5_hash_header(hp, th))
805 if (tcp_md5_hash_key(hp, key))
807 if (crypto_hash_final(desc, md5_hash))
810 tcp_put_md5sig_pool();
814 tcp_put_md5sig_pool();
816 memset(md5_hash, 0, 16);
820 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
821 struct sock *sk, struct request_sock *req,
824 struct in6_addr *saddr, *daddr;
825 struct tcp_md5sig_pool *hp;
826 struct hash_desc *desc;
827 struct tcphdr *th = tcp_hdr(skb);
830 saddr = &inet6_sk(sk)->saddr;
831 daddr = &inet6_sk(sk)->daddr;
833 saddr = &inet6_rsk(req)->loc_addr;
834 daddr = &inet6_rsk(req)->rmt_addr;
836 struct ipv6hdr *ip6h = ipv6_hdr(skb);
837 saddr = &ip6h->saddr;
838 daddr = &ip6h->daddr;
841 hp = tcp_get_md5sig_pool();
843 goto clear_hash_noput;
844 desc = &hp->md5_desc;
846 if (crypto_hash_init(desc))
849 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
851 if (tcp_md5_hash_header(hp, th))
853 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
855 if (tcp_md5_hash_key(hp, key))
857 if (crypto_hash_final(desc, md5_hash))
860 tcp_put_md5sig_pool();
864 tcp_put_md5sig_pool();
866 memset(md5_hash, 0, 16);
870 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
872 __u8 *hash_location = NULL;
873 struct tcp_md5sig_key *hash_expected;
874 struct ipv6hdr *ip6h = ipv6_hdr(skb);
875 struct tcphdr *th = tcp_hdr(skb);
879 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
880 hash_location = tcp_parse_md5sig_option(th);
882 /* We've parsed the options - do we have a hash? */
883 if (!hash_expected && !hash_location)
886 if (hash_expected && !hash_location) {
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
891 if (!hash_expected && hash_location) {
892 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
896 /* check the signature */
897 genhash = tcp_v6_md5_hash_skb(newhash,
901 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
902 if (net_ratelimit()) {
903 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
904 genhash ? "failed" : "mismatch",
905 &ip6h->saddr, ntohs(th->source),
906 &ip6h->daddr, ntohs(th->dest));
914 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
916 .obj_size = sizeof(struct tcp6_request_sock),
917 .rtx_syn_ack = tcp_v6_rtx_synack,
918 .send_ack = tcp_v6_reqsk_send_ack,
919 .destructor = tcp_v6_reqsk_destructor,
920 .send_reset = tcp_v6_send_reset,
921 .syn_ack_timeout = tcp_syn_ack_timeout,
924 #ifdef CONFIG_TCP_MD5SIG
925 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
926 .md5_lookup = tcp_v6_reqsk_md5_lookup,
927 .calc_md5_hash = tcp_v6_md5_hash_skb,
931 static void __tcp_v6_send_check(struct sk_buff *skb,
932 struct in6_addr *saddr, struct in6_addr *daddr)
934 struct tcphdr *th = tcp_hdr(skb);
936 if (skb->ip_summed == CHECKSUM_PARTIAL) {
937 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
938 skb->csum_start = skb_transport_header(skb) - skb->head;
939 skb->csum_offset = offsetof(struct tcphdr, check);
941 th->check = tcp_v6_check(skb->len, saddr, daddr,
942 csum_partial(th, th->doff << 2,
947 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
949 struct ipv6_pinfo *np = inet6_sk(sk);
951 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
954 static int tcp_v6_gso_send_check(struct sk_buff *skb)
956 struct ipv6hdr *ipv6h;
959 if (!pskb_may_pull(skb, sizeof(*th)))
962 ipv6h = ipv6_hdr(skb);
966 skb->ip_summed = CHECKSUM_PARTIAL;
967 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
971 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
974 struct ipv6hdr *iph = skb_gro_network_header(skb);
976 switch (skb->ip_summed) {
977 case CHECKSUM_COMPLETE:
978 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
980 skb->ip_summed = CHECKSUM_UNNECESSARY;
986 NAPI_GRO_CB(skb)->flush = 1;
990 return tcp_gro_receive(head, skb);
993 static int tcp6_gro_complete(struct sk_buff *skb)
995 struct ipv6hdr *iph = ipv6_hdr(skb);
996 struct tcphdr *th = tcp_hdr(skb);
998 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
999 &iph->saddr, &iph->daddr, 0);
1000 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1002 return tcp_gro_complete(skb);
1005 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1006 u32 ts, struct tcp_md5sig_key *key, int rst)
1008 struct tcphdr *th = tcp_hdr(skb), *t1;
1009 struct sk_buff *buff;
1011 struct net *net = dev_net(skb_dst(skb)->dev);
1012 struct sock *ctl_sk = net->ipv6.tcp_sk;
1013 unsigned int tot_len = sizeof(struct tcphdr);
1014 struct dst_entry *dst;
1018 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1019 #ifdef CONFIG_TCP_MD5SIG
1021 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1024 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1029 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1031 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1032 skb_reset_transport_header(buff);
1034 /* Swap the send and the receive. */
1035 memset(t1, 0, sizeof(*t1));
1036 t1->dest = th->source;
1037 t1->source = th->dest;
1038 t1->doff = tot_len / 4;
1039 t1->seq = htonl(seq);
1040 t1->ack_seq = htonl(ack);
1041 t1->ack = !rst || !th->ack;
1043 t1->window = htons(win);
1045 topt = (__be32 *)(t1 + 1);
1048 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1049 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1050 *topt++ = htonl(tcp_time_stamp);
1051 *topt++ = htonl(ts);
1054 #ifdef CONFIG_TCP_MD5SIG
1056 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1057 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1058 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1059 &ipv6_hdr(skb)->saddr,
1060 &ipv6_hdr(skb)->daddr, t1);
1064 memset(&fl, 0, sizeof(fl));
1065 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1066 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1068 buff->ip_summed = CHECKSUM_PARTIAL;
1071 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1073 fl.proto = IPPROTO_TCP;
1074 fl.oif = inet6_iif(skb);
1075 fl.fl_ip_dport = t1->dest;
1076 fl.fl_ip_sport = t1->source;
1077 security_skb_classify_flow(skb, &fl);
1079 /* Pass a socket to ip6_dst_lookup either it is for RST
1080 * Underlying function will use this to retrieve the network
1083 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1084 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1085 skb_dst_set(buff, dst);
1086 ip6_xmit(ctl_sk, buff, &fl, NULL);
1087 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1089 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1097 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1099 struct tcphdr *th = tcp_hdr(skb);
1100 u32 seq = 0, ack_seq = 0;
1101 struct tcp_md5sig_key *key = NULL;
1106 if (!ipv6_unicast_destination(skb))
1109 #ifdef CONFIG_TCP_MD5SIG
1111 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1115 seq = ntohl(th->ack_seq);
1117 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1120 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1123 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1124 struct tcp_md5sig_key *key)
1126 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1129 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1131 struct inet_timewait_sock *tw = inet_twsk(sk);
1132 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1134 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1135 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1136 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1141 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1142 struct request_sock *req)
1144 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1145 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1149 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1151 struct request_sock *req, **prev;
1152 const struct tcphdr *th = tcp_hdr(skb);
1155 /* Find possible connection requests. */
1156 req = inet6_csk_search_req(sk, &prev, th->source,
1157 &ipv6_hdr(skb)->saddr,
1158 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1160 return tcp_check_req(sk, skb, req, prev);
1162 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1163 &ipv6_hdr(skb)->saddr, th->source,
1164 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1167 if (nsk->sk_state != TCP_TIME_WAIT) {
1171 inet_twsk_put(inet_twsk(nsk));
1175 #ifdef CONFIG_SYN_COOKIES
1177 sk = cookie_v6_check(sk, skb);
1182 /* FIXME: this is substantially similar to the ipv4 code.
1183 * Can some kind of merge be done? -- erics
1185 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1187 struct tcp_extend_values tmp_ext;
1188 struct tcp_options_received tmp_opt;
1190 struct request_sock *req;
1191 struct inet6_request_sock *treq;
1192 struct ipv6_pinfo *np = inet6_sk(sk);
1193 struct tcp_sock *tp = tcp_sk(sk);
1194 __u32 isn = TCP_SKB_CB(skb)->when;
1195 struct dst_entry *dst = NULL;
1196 #ifdef CONFIG_SYN_COOKIES
1197 int want_cookie = 0;
1199 #define want_cookie 0
1202 if (skb->protocol == htons(ETH_P_IP))
1203 return tcp_v4_conn_request(sk, skb);
1205 if (!ipv6_unicast_destination(skb))
1208 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1209 if (net_ratelimit())
1210 syn_flood_warning(skb);
1211 #ifdef CONFIG_SYN_COOKIES
1212 if (sysctl_tcp_syncookies)
1219 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1222 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1226 #ifdef CONFIG_TCP_MD5SIG
1227 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1230 tcp_clear_options(&tmp_opt);
1231 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1232 tmp_opt.user_mss = tp->rx_opt.user_mss;
1233 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1235 if (tmp_opt.cookie_plus > 0 &&
1236 tmp_opt.saw_tstamp &&
1237 !tp->rx_opt.cookie_out_never &&
1238 (sysctl_tcp_cookie_size > 0 ||
1239 (tp->cookie_values != NULL &&
1240 tp->cookie_values->cookie_desired > 0))) {
1243 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1244 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1246 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1249 /* Secret recipe starts with IP addresses */
1250 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1255 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1261 /* plus variable length Initiator Cookie */
1264 *c++ ^= *hash_location++;
1266 #ifdef CONFIG_SYN_COOKIES
1267 want_cookie = 0; /* not our kind of cookie */
1269 tmp_ext.cookie_out_never = 0; /* false */
1270 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1271 } else if (!tp->rx_opt.cookie_in_always) {
1272 /* redundant indications, but ensure initialization. */
1273 tmp_ext.cookie_out_never = 1; /* true */
1274 tmp_ext.cookie_plus = 0;
1278 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1280 if (want_cookie && !tmp_opt.saw_tstamp)
1281 tcp_clear_options(&tmp_opt);
1283 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1284 tcp_openreq_init(req, &tmp_opt, skb);
1286 treq = inet6_rsk(req);
1287 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1288 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1289 if (!want_cookie || tmp_opt.tstamp_ok)
1290 TCP_ECN_create_request(req, tcp_hdr(skb));
1293 struct inet_peer *peer = NULL;
1295 if (ipv6_opt_accepted(sk, skb) ||
1296 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1297 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1298 atomic_inc(&skb->users);
1299 treq->pktopts = skb;
1301 treq->iif = sk->sk_bound_dev_if;
1303 /* So that link locals have meaning */
1304 if (!sk->sk_bound_dev_if &&
1305 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1306 treq->iif = inet6_iif(skb);
1309 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1310 req->cookie_ts = tmp_opt.tstamp_ok;
1314 /* VJ's idea. We save last timestamp seen
1315 * from the destination in peer table, when entering
1316 * state TIME-WAIT, and check against it before
1317 * accepting new connection request.
1319 * If "isn" is not zero, this request hit alive
1320 * timewait bucket, so that all the necessary checks
1321 * are made in the function processing timewait state.
1323 if (tmp_opt.saw_tstamp &&
1324 tcp_death_row.sysctl_tw_recycle &&
1325 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1326 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1327 ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
1329 inet_peer_refcheck(peer);
1330 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1331 (s32)(peer->tcp_ts - req->ts_recent) >
1333 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1334 goto drop_and_release;
1337 /* Kill the following clause, if you dislike this way. */
1338 else if (!sysctl_tcp_syncookies &&
1339 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1340 (sysctl_max_syn_backlog >> 2)) &&
1341 (!peer || !peer->tcp_ts_stamp) &&
1342 (!dst || !dst_metric(dst, RTAX_RTT))) {
1343 /* Without syncookies last quarter of
1344 * backlog is filled with destinations,
1345 * proven to be alive.
1346 * It means that we continue to communicate
1347 * to destinations, already remembered
1348 * to the moment of synflood.
1350 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1351 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1352 goto drop_and_release;
1355 isn = tcp_v6_init_sequence(skb);
1358 tcp_rsk(req)->snt_isn = isn;
1360 security_inet_conn_request(sk, skb, req);
1362 if (tcp_v6_send_synack(sk, req,
1363 (struct request_values *)&tmp_ext) ||
1367 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1375 return 0; /* don't send reset */
1378 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1379 struct request_sock *req,
1380 struct dst_entry *dst)
1382 struct inet6_request_sock *treq;
1383 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1384 struct tcp6_sock *newtcp6sk;
1385 struct inet_sock *newinet;
1386 struct tcp_sock *newtp;
1388 struct ipv6_txoptions *opt;
1389 #ifdef CONFIG_TCP_MD5SIG
1390 struct tcp_md5sig_key *key;
1393 if (skb->protocol == htons(ETH_P_IP)) {
1398 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1403 newtcp6sk = (struct tcp6_sock *)newsk;
1404 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1406 newinet = inet_sk(newsk);
1407 newnp = inet6_sk(newsk);
1408 newtp = tcp_sk(newsk);
1410 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1412 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1414 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1416 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1418 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1419 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1420 #ifdef CONFIG_TCP_MD5SIG
1421 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1424 newnp->pktoptions = NULL;
1426 newnp->mcast_oif = inet6_iif(skb);
1427 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1430 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1431 * here, tcp_create_openreq_child now does this for us, see the comment in
1432 * that function for the gory details. -acme
1435 /* It is tricky place. Until this moment IPv4 tcp
1436 worked with IPv6 icsk.icsk_af_ops.
1439 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1444 treq = inet6_rsk(req);
1447 if (sk_acceptq_is_full(sk))
1451 dst = inet6_csk_route_req(sk, req);
1456 newsk = tcp_create_openreq_child(sk, req, skb);
1461 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1462 * count here, tcp_create_openreq_child now does this for us, see the
1463 * comment in that function for the gory details. -acme
1466 newsk->sk_gso_type = SKB_GSO_TCPV6;
1467 __ip6_dst_store(newsk, dst, NULL, NULL);
1469 newtcp6sk = (struct tcp6_sock *)newsk;
1470 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1472 newtp = tcp_sk(newsk);
1473 newinet = inet_sk(newsk);
1474 newnp = inet6_sk(newsk);
1476 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1478 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1479 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1480 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1481 newsk->sk_bound_dev_if = treq->iif;
1483 /* Now IPv6 options...
1485 First: no IPv4 options.
1487 newinet->opt = NULL;
1488 newnp->ipv6_fl_list = NULL;
1491 newnp->rxopt.all = np->rxopt.all;
1493 /* Clone pktoptions received with SYN */
1494 newnp->pktoptions = NULL;
1495 if (treq->pktopts != NULL) {
1496 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1497 kfree_skb(treq->pktopts);
1498 treq->pktopts = NULL;
1499 if (newnp->pktoptions)
1500 skb_set_owner_r(newnp->pktoptions, newsk);
1503 newnp->mcast_oif = inet6_iif(skb);
1504 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1506 /* Clone native IPv6 options from listening socket (if any)
1508 Yes, keeping reference count would be much more clever,
1509 but we make one more one thing there: reattach optmem
1513 newnp->opt = ipv6_dup_options(newsk, opt);
1515 sock_kfree_s(sk, opt, opt->tot_len);
1518 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1520 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1521 newnp->opt->opt_flen);
1523 tcp_mtup_init(newsk);
1524 tcp_sync_mss(newsk, dst_mtu(dst));
1525 newtp->advmss = dst_metric_advmss(dst);
1526 tcp_initialize_rcv_mss(newsk);
1528 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1529 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1531 #ifdef CONFIG_TCP_MD5SIG
1532 /* Copy over the MD5 key from the original socket */
1533 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1534 /* We're using one, so create a matching key
1535 * on the newsk structure. If we fail to get
1536 * memory, then we end up not copying the key
1539 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1541 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1542 newkey, key->keylen);
1546 if (__inet_inherit_port(sk, newsk) < 0) {
1550 __inet6_hash(newsk, NULL);
1555 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1557 if (opt && opt != np->opt)
1558 sock_kfree_s(sk, opt, opt->tot_len);
1561 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1565 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1567 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1568 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1569 &ipv6_hdr(skb)->daddr, skb->csum)) {
1570 skb->ip_summed = CHECKSUM_UNNECESSARY;
1575 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1576 &ipv6_hdr(skb)->saddr,
1577 &ipv6_hdr(skb)->daddr, 0));
1579 if (skb->len <= 76) {
1580 return __skb_checksum_complete(skb);
1585 /* The socket must have it's spinlock held when we get
1588 * We have a potential double-lock case here, so even when
1589 * doing backlog processing we use the BH locking scheme.
1590 * This is because we cannot sleep with the original spinlock
1593 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1595 struct ipv6_pinfo *np = inet6_sk(sk);
1596 struct tcp_sock *tp;
1597 struct sk_buff *opt_skb = NULL;
1599 /* Imagine: socket is IPv6. IPv4 packet arrives,
1600 goes to IPv4 receive handler and backlogged.
1601 From backlog it always goes here. Kerboom...
1602 Fortunately, tcp_rcv_established and rcv_established
1603 handle them correctly, but it is not case with
1604 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1607 if (skb->protocol == htons(ETH_P_IP))
1608 return tcp_v4_do_rcv(sk, skb);
1610 #ifdef CONFIG_TCP_MD5SIG
1611 if (tcp_v6_inbound_md5_hash (sk, skb))
1615 if (sk_filter(sk, skb))
1619 * socket locking is here for SMP purposes as backlog rcv
1620 * is currently called with bh processing disabled.
1623 /* Do Stevens' IPV6_PKTOPTIONS.
1625 Yes, guys, it is the only place in our code, where we
1626 may make it not affecting IPv4.
1627 The rest of code is protocol independent,
1628 and I do not like idea to uglify IPv4.
1630 Actually, all the idea behind IPV6_PKTOPTIONS
1631 looks not very well thought. For now we latch
1632 options, received in the last packet, enqueued
1633 by tcp. Feel free to propose better solution.
1637 opt_skb = skb_clone(skb, GFP_ATOMIC);
1639 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1640 TCP_CHECK_TIMER(sk);
1641 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1643 TCP_CHECK_TIMER(sk);
1645 goto ipv6_pktoptions;
1649 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1652 if (sk->sk_state == TCP_LISTEN) {
1653 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1658 * Queue it on the new socket if the new socket is active,
1659 * otherwise we just shortcircuit this and continue with
1663 if (tcp_child_process(sk, nsk, skb))
1666 __kfree_skb(opt_skb);
1671 TCP_CHECK_TIMER(sk);
1672 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1674 TCP_CHECK_TIMER(sk);
1676 goto ipv6_pktoptions;
1680 tcp_v6_send_reset(sk, skb);
1683 __kfree_skb(opt_skb);
1687 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1692 /* Do you ask, what is it?
1694 1. skb was enqueued by tcp.
1695 2. skb is added to tail of read queue, rather than out of order.
1696 3. socket is not in passive state.
1697 4. Finally, it really contains options, which user wants to receive.
1700 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1701 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1702 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1703 np->mcast_oif = inet6_iif(opt_skb);
1704 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1705 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1706 if (ipv6_opt_accepted(sk, opt_skb)) {
1707 skb_set_owner_r(opt_skb, sk);
1708 opt_skb = xchg(&np->pktoptions, opt_skb);
1710 __kfree_skb(opt_skb);
1711 opt_skb = xchg(&np->pktoptions, NULL);
1719 static int tcp_v6_rcv(struct sk_buff *skb)
1722 struct ipv6hdr *hdr;
1725 struct net *net = dev_net(skb->dev);
1727 if (skb->pkt_type != PACKET_HOST)
1731 * Count it even if it's bad.
1733 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1735 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1740 if (th->doff < sizeof(struct tcphdr)/4)
1742 if (!pskb_may_pull(skb, th->doff*4))
1745 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1749 hdr = ipv6_hdr(skb);
1750 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1751 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1752 skb->len - th->doff*4);
1753 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1754 TCP_SKB_CB(skb)->when = 0;
1755 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1756 TCP_SKB_CB(skb)->sacked = 0;
1758 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1763 if (sk->sk_state == TCP_TIME_WAIT)
1766 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1767 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1768 goto discard_and_relse;
1771 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1772 goto discard_and_relse;
1774 if (sk_filter(sk, skb))
1775 goto discard_and_relse;
1779 bh_lock_sock_nested(sk);
1781 if (!sock_owned_by_user(sk)) {
1782 #ifdef CONFIG_NET_DMA
1783 struct tcp_sock *tp = tcp_sk(sk);
1784 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1785 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1786 if (tp->ucopy.dma_chan)
1787 ret = tcp_v6_do_rcv(sk, skb);
1791 if (!tcp_prequeue(sk, skb))
1792 ret = tcp_v6_do_rcv(sk, skb);
1794 } else if (unlikely(sk_add_backlog(sk, skb))) {
1796 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1797 goto discard_and_relse;
1802 return ret ? -1 : 0;
1805 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1808 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1810 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1812 tcp_v6_send_reset(NULL, skb);
1829 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1830 inet_twsk_put(inet_twsk(sk));
1834 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1835 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1836 inet_twsk_put(inet_twsk(sk));
1840 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1845 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1846 &ipv6_hdr(skb)->daddr,
1847 ntohs(th->dest), inet6_iif(skb));
1849 struct inet_timewait_sock *tw = inet_twsk(sk);
1850 inet_twsk_deschedule(tw, &tcp_death_row);
1855 /* Fall through to ACK */
1858 tcp_v6_timewait_ack(sk, skb);
1862 case TCP_TW_SUCCESS:;
1867 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1869 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1870 struct ipv6_pinfo *np = inet6_sk(sk);
1871 struct inet_peer *peer;
1874 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1875 peer = inet_getpeer_v6(&np->daddr, 1);
1879 rt6_bind_peer(rt, 1);
1880 peer = rt->rt6i_peer;
1881 *release_it = false;
1887 static void *tcp_v6_tw_get_peer(struct sock *sk)
1889 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1890 struct inet_timewait_sock *tw = inet_twsk(sk);
1892 if (tw->tw_family == AF_INET)
1893 return tcp_v4_tw_get_peer(sk);
1895 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1898 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1899 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1900 .twsk_unique = tcp_twsk_unique,
1901 .twsk_destructor= tcp_twsk_destructor,
1902 .twsk_getpeer = tcp_v6_tw_get_peer,
1905 static const struct inet_connection_sock_af_ops ipv6_specific = {
1906 .queue_xmit = inet6_csk_xmit,
1907 .send_check = tcp_v6_send_check,
1908 .rebuild_header = inet6_sk_rebuild_header,
1909 .conn_request = tcp_v6_conn_request,
1910 .syn_recv_sock = tcp_v6_syn_recv_sock,
1911 .get_peer = tcp_v6_get_peer,
1912 .net_header_len = sizeof(struct ipv6hdr),
1913 .setsockopt = ipv6_setsockopt,
1914 .getsockopt = ipv6_getsockopt,
1915 .addr2sockaddr = inet6_csk_addr2sockaddr,
1916 .sockaddr_len = sizeof(struct sockaddr_in6),
1917 .bind_conflict = inet6_csk_bind_conflict,
1918 #ifdef CONFIG_COMPAT
1919 .compat_setsockopt = compat_ipv6_setsockopt,
1920 .compat_getsockopt = compat_ipv6_getsockopt,
1924 #ifdef CONFIG_TCP_MD5SIG
1925 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1926 .md5_lookup = tcp_v6_md5_lookup,
1927 .calc_md5_hash = tcp_v6_md5_hash_skb,
1928 .md5_add = tcp_v6_md5_add_func,
1929 .md5_parse = tcp_v6_parse_md5_keys,
1934 * TCP over IPv4 via INET6 API
1937 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1938 .queue_xmit = ip_queue_xmit,
1939 .send_check = tcp_v4_send_check,
1940 .rebuild_header = inet_sk_rebuild_header,
1941 .conn_request = tcp_v6_conn_request,
1942 .syn_recv_sock = tcp_v6_syn_recv_sock,
1943 .get_peer = tcp_v4_get_peer,
1944 .net_header_len = sizeof(struct iphdr),
1945 .setsockopt = ipv6_setsockopt,
1946 .getsockopt = ipv6_getsockopt,
1947 .addr2sockaddr = inet6_csk_addr2sockaddr,
1948 .sockaddr_len = sizeof(struct sockaddr_in6),
1949 .bind_conflict = inet6_csk_bind_conflict,
1950 #ifdef CONFIG_COMPAT
1951 .compat_setsockopt = compat_ipv6_setsockopt,
1952 .compat_getsockopt = compat_ipv6_getsockopt,
1956 #ifdef CONFIG_TCP_MD5SIG
1957 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1958 .md5_lookup = tcp_v4_md5_lookup,
1959 .calc_md5_hash = tcp_v4_md5_hash_skb,
1960 .md5_add = tcp_v6_md5_add_func,
1961 .md5_parse = tcp_v6_parse_md5_keys,
1965 /* NOTE: A lot of things set to zero explicitly by call to
1966 * sk_alloc() so need not be done here.
1968 static int tcp_v6_init_sock(struct sock *sk)
1970 struct inet_connection_sock *icsk = inet_csk(sk);
1971 struct tcp_sock *tp = tcp_sk(sk);
1973 skb_queue_head_init(&tp->out_of_order_queue);
1974 tcp_init_xmit_timers(sk);
1975 tcp_prequeue_init(tp);
1977 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1978 tp->mdev = TCP_TIMEOUT_INIT;
1980 /* So many TCP implementations out there (incorrectly) count the
1981 * initial SYN frame in their delayed-ACK and congestion control
1982 * algorithms that we must have the following bandaid to talk
1983 * efficiently to them. -DaveM
1987 /* See draft-stevens-tcpca-spec-01 for discussion of the
1988 * initialization of these values.
1990 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1991 tp->snd_cwnd_clamp = ~0;
1992 tp->mss_cache = TCP_MSS_DEFAULT;
1994 tp->reordering = sysctl_tcp_reordering;
1996 sk->sk_state = TCP_CLOSE;
1998 icsk->icsk_af_ops = &ipv6_specific;
1999 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
2000 icsk->icsk_sync_mss = tcp_sync_mss;
2001 sk->sk_write_space = sk_stream_write_space;
2002 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2004 #ifdef CONFIG_TCP_MD5SIG
2005 tp->af_specific = &tcp_sock_ipv6_specific;
2008 /* TCP Cookie Transactions */
2009 if (sysctl_tcp_cookie_size > 0) {
2010 /* Default, cookies without s_data_payload. */
2012 kzalloc(sizeof(*tp->cookie_values),
2014 if (tp->cookie_values != NULL)
2015 kref_init(&tp->cookie_values->kref);
2017 /* Presumed zeroed, in order of appearance:
2018 * cookie_in_always, cookie_out_never,
2019 * s_data_constant, s_data_in, s_data_out
2021 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2022 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2025 percpu_counter_inc(&tcp_sockets_allocated);
2031 static void tcp_v6_destroy_sock(struct sock *sk)
2033 #ifdef CONFIG_TCP_MD5SIG
2034 /* Clean up the MD5 key list */
2035 if (tcp_sk(sk)->md5sig_info)
2036 tcp_v6_clear_md5_list(sk);
2038 tcp_v4_destroy_sock(sk);
2039 inet6_destroy_sock(sk);
2042 #ifdef CONFIG_PROC_FS
2043 /* Proc filesystem TCPv6 sock list dumping. */
2044 static void get_openreq6(struct seq_file *seq,
2045 struct sock *sk, struct request_sock *req, int i, int uid)
2047 int ttd = req->expires - jiffies;
2048 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2049 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2055 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2056 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2058 src->s6_addr32[0], src->s6_addr32[1],
2059 src->s6_addr32[2], src->s6_addr32[3],
2060 ntohs(inet_rsk(req)->loc_port),
2061 dest->s6_addr32[0], dest->s6_addr32[1],
2062 dest->s6_addr32[2], dest->s6_addr32[3],
2063 ntohs(inet_rsk(req)->rmt_port),
2065 0,0, /* could print option size, but that is af dependent. */
2066 1, /* timers active (only the expire timer) */
2067 jiffies_to_clock_t(ttd),
2070 0, /* non standard timer */
2071 0, /* open_requests have no inode */
2075 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2077 struct in6_addr *dest, *src;
2080 unsigned long timer_expires;
2081 struct inet_sock *inet = inet_sk(sp);
2082 struct tcp_sock *tp = tcp_sk(sp);
2083 const struct inet_connection_sock *icsk = inet_csk(sp);
2084 struct ipv6_pinfo *np = inet6_sk(sp);
2087 src = &np->rcv_saddr;
2088 destp = ntohs(inet->inet_dport);
2089 srcp = ntohs(inet->inet_sport);
2091 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2093 timer_expires = icsk->icsk_timeout;
2094 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2096 timer_expires = icsk->icsk_timeout;
2097 } else if (timer_pending(&sp->sk_timer)) {
2099 timer_expires = sp->sk_timer.expires;
2102 timer_expires = jiffies;
2106 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2107 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2109 src->s6_addr32[0], src->s6_addr32[1],
2110 src->s6_addr32[2], src->s6_addr32[3], srcp,
2111 dest->s6_addr32[0], dest->s6_addr32[1],
2112 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2114 tp->write_seq-tp->snd_una,
2115 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2117 jiffies_to_clock_t(timer_expires - jiffies),
2118 icsk->icsk_retransmits,
2120 icsk->icsk_probes_out,
2122 atomic_read(&sp->sk_refcnt), sp,
2123 jiffies_to_clock_t(icsk->icsk_rto),
2124 jiffies_to_clock_t(icsk->icsk_ack.ato),
2125 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2127 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2131 static void get_timewait6_sock(struct seq_file *seq,
2132 struct inet_timewait_sock *tw, int i)
2134 struct in6_addr *dest, *src;
2136 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2137 int ttd = tw->tw_ttd - jiffies;
2142 dest = &tw6->tw_v6_daddr;
2143 src = &tw6->tw_v6_rcv_saddr;
2144 destp = ntohs(tw->tw_dport);
2145 srcp = ntohs(tw->tw_sport);
2148 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2149 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2151 src->s6_addr32[0], src->s6_addr32[1],
2152 src->s6_addr32[2], src->s6_addr32[3], srcp,
2153 dest->s6_addr32[0], dest->s6_addr32[1],
2154 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2155 tw->tw_substate, 0, 0,
2156 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2157 atomic_read(&tw->tw_refcnt), tw);
2160 static int tcp6_seq_show(struct seq_file *seq, void *v)
2162 struct tcp_iter_state *st;
2164 if (v == SEQ_START_TOKEN) {
2169 "st tx_queue rx_queue tr tm->when retrnsmt"
2170 " uid timeout inode\n");
2175 switch (st->state) {
2176 case TCP_SEQ_STATE_LISTENING:
2177 case TCP_SEQ_STATE_ESTABLISHED:
2178 get_tcp6_sock(seq, v, st->num);
2180 case TCP_SEQ_STATE_OPENREQ:
2181 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2183 case TCP_SEQ_STATE_TIME_WAIT:
2184 get_timewait6_sock(seq, v, st->num);
2191 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2195 .owner = THIS_MODULE,
2198 .show = tcp6_seq_show,
2202 int __net_init tcp6_proc_init(struct net *net)
2204 return tcp_proc_register(net, &tcp6_seq_afinfo);
2207 void tcp6_proc_exit(struct net *net)
2209 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2213 struct proto tcpv6_prot = {
2215 .owner = THIS_MODULE,
2217 .connect = tcp_v6_connect,
2218 .disconnect = tcp_disconnect,
2219 .accept = inet_csk_accept,
2221 .init = tcp_v6_init_sock,
2222 .destroy = tcp_v6_destroy_sock,
2223 .shutdown = tcp_shutdown,
2224 .setsockopt = tcp_setsockopt,
2225 .getsockopt = tcp_getsockopt,
2226 .recvmsg = tcp_recvmsg,
2227 .sendmsg = tcp_sendmsg,
2228 .sendpage = tcp_sendpage,
2229 .backlog_rcv = tcp_v6_do_rcv,
2230 .hash = tcp_v6_hash,
2231 .unhash = inet_unhash,
2232 .get_port = inet_csk_get_port,
2233 .enter_memory_pressure = tcp_enter_memory_pressure,
2234 .sockets_allocated = &tcp_sockets_allocated,
2235 .memory_allocated = &tcp_memory_allocated,
2236 .memory_pressure = &tcp_memory_pressure,
2237 .orphan_count = &tcp_orphan_count,
2238 .sysctl_mem = sysctl_tcp_mem,
2239 .sysctl_wmem = sysctl_tcp_wmem,
2240 .sysctl_rmem = sysctl_tcp_rmem,
2241 .max_header = MAX_TCP_HEADER,
2242 .obj_size = sizeof(struct tcp6_sock),
2243 .slab_flags = SLAB_DESTROY_BY_RCU,
2244 .twsk_prot = &tcp6_timewait_sock_ops,
2245 .rsk_prot = &tcp6_request_sock_ops,
2246 .h.hashinfo = &tcp_hashinfo,
2247 .no_autobind = true,
2248 #ifdef CONFIG_COMPAT
2249 .compat_setsockopt = compat_tcp_setsockopt,
2250 .compat_getsockopt = compat_tcp_getsockopt,
2254 static const struct inet6_protocol tcpv6_protocol = {
2255 .handler = tcp_v6_rcv,
2256 .err_handler = tcp_v6_err,
2257 .gso_send_check = tcp_v6_gso_send_check,
2258 .gso_segment = tcp_tso_segment,
2259 .gro_receive = tcp6_gro_receive,
2260 .gro_complete = tcp6_gro_complete,
2261 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2264 static struct inet_protosw tcpv6_protosw = {
2265 .type = SOCK_STREAM,
2266 .protocol = IPPROTO_TCP,
2267 .prot = &tcpv6_prot,
2268 .ops = &inet6_stream_ops,
2270 .flags = INET_PROTOSW_PERMANENT |
2274 static int __net_init tcpv6_net_init(struct net *net)
2276 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2277 SOCK_RAW, IPPROTO_TCP, net);
2280 static void __net_exit tcpv6_net_exit(struct net *net)
2282 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2285 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2287 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2290 static struct pernet_operations tcpv6_net_ops = {
2291 .init = tcpv6_net_init,
2292 .exit = tcpv6_net_exit,
2293 .exit_batch = tcpv6_net_exit_batch,
2296 int __init tcpv6_init(void)
2300 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2304 /* register inet6 protocol */
2305 ret = inet6_register_protosw(&tcpv6_protosw);
2307 goto out_tcpv6_protocol;
2309 ret = register_pernet_subsys(&tcpv6_net_ops);
2311 goto out_tcpv6_protosw;
2316 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2318 inet6_unregister_protosw(&tcpv6_protosw);
2322 void tcpv6_exit(void)
2324 unregister_pernet_subsys(&tcpv6_net_ops);
2325 inet6_unregister_protosw(&tcpv6_protosw);
2326 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);