net: Compute protocol sequence numbers and fragment IDs using MD5, CVE-2011-3188
[linux-flexiantxendom0-natty.git] / net / ipv6 / tcp_ipv6.c
index 424d9c4..869772c 100644 (file)
@@ -23,6 +23,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 
+#include <linux/bottom_half.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -37,6 +38,7 @@
 #include <linux/jhash.h>
 #include <linux/ipsec.h>
 #include <linux/times.h>
+#include <linux/slab.h>
 
 #include <linux/ipv6.h>
 #include <linux/icmpv6.h>
@@ -59,6 +61,7 @@
 #include <net/timewait_sock.h>
 #include <net/netdma.h>
 #include <net/inet_common.h>
+#include <net/secure_seq.h>
 
 #include <asm/uaccess.h>
 
@@ -73,12 +76,15 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                      struct request_sock *req);
 
 static int     tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+static void    __tcp_v6_send_check(struct sk_buff *skb,
+                                   struct in6_addr *saddr,
+                                   struct in6_addr *daddr);
 
-static struct inet_connection_sock_af_ops ipv6_mapped;
-static struct inet_connection_sock_af_ops ipv6_specific;
+static const struct inet_connection_sock_af_ops ipv6_mapped;
+static const struct inet_connection_sock_af_ops ipv6_specific;
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
-static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
+static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
+static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 #else
 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
                                                   struct in6_addr *addr)
@@ -95,12 +101,12 @@ static void tcp_v6_hash(struct sock *sk)
                        return;
                }
                local_bh_disable();
-               __inet6_hash(sk);
+               __inet6_hash(sk, NULL);
                local_bh_enable();
        }
 }
 
-static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
+static __inline__ __sum16 tcp_v6_check(int len,
                                   struct in6_addr *saddr,
                                   struct in6_addr *daddr,
                                   __wsum base)
@@ -124,7 +130,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       struct in6_addr *saddr = NULL, *final_p = NULL, final;
+       struct in6_addr *saddr = NULL, *final_p, final;
+       struct rt6_info *rt;
        struct flowi fl;
        struct dst_entry *dst;
        int addr_type;
@@ -134,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                return -EINVAL;
 
        if (usin->sin6_family != AF_INET6)
-               return(-EAFNOSUPPORT);
+               return -EAFNOSUPPORT;
 
        memset(&fl, 0, sizeof(fl));
 
@@ -225,10 +232,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 #endif
                        goto failure;
                } else {
-                       ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
-                                     inet->saddr);
-                       ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
-                                     inet->rcv_saddr);
+                       ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
+                       ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
+                                              &np->rcv_saddr);
                }
 
                return err;
@@ -242,15 +248,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        ipv6_addr_copy(&fl.fl6_src,
                       (saddr ? saddr : &np->saddr));
        fl.oif = sk->sk_bound_dev_if;
+       fl.mark = sk->sk_mark;
        fl.fl_ip_dport = usin->sin6_port;
-       fl.fl_ip_sport = inet->sport;
+       fl.fl_ip_sport = inet->inet_sport;
 
-       if (np->opt && np->opt->srcrt) {
-               struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-               ipv6_addr_copy(&final, &fl.fl6_dst);
-               ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-               final_p = &final;
-       }
+       final_p = fl6_update_dst(&fl, np->opt, &final);
 
        security_sk_classify_flow(sk, &fl);
 
@@ -260,7 +262,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (final_p)
                ipv6_addr_copy(&fl.fl6_dst, final_p);
 
-       if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
+       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
+       if (err < 0) {
                if (err == -EREMOTE)
                        err = ip6_dst_blackhole(sk, &dst, &fl);
                if (err < 0)
@@ -274,11 +277,31 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        /* set the source address */
        ipv6_addr_copy(&np->saddr, saddr);
-       inet->rcv_saddr = LOOPBACK4_IPV6;
+       inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        sk->sk_gso_type = SKB_GSO_TCPV6;
        __ip6_dst_store(sk, dst, NULL, NULL);
 
+       rt = (struct rt6_info *) dst;
+       if (tcp_death_row.sysctl_tw_recycle &&
+           !tp->rx_opt.ts_recent_stamp &&
+           ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
+               struct inet_peer *peer = rt6_get_peer(rt);
+               /*
+                * VJ's idea. We save last timestamp seen from
+                * the destination in peer table, when entering state
+                * TIME-WAIT * and initialize rx_opt.ts_recent from it,
+                * when trying new connection.
+                */
+               if (peer) {
+                       inet_peer_refcheck(peer);
+                       if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
+                               tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
+                               tp->rx_opt.ts_recent = peer->tcp_ts;
+                       }
+               }
+       }
+
        icsk->icsk_ext_hdr_len = 0;
        if (np->opt)
                icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
@@ -286,7 +309,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 
-       inet->dport = usin->sin6_port;
+       inet->inet_dport = usin->sin6_port;
 
        tcp_set_state(sk, TCP_SYN_SENT);
        err = inet6_hash_connect(&tcp_death_row, sk);
@@ -296,8 +319,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (!tp->write_seq)
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
                                                             np->daddr.s6_addr32,
-                                                            inet->sport,
-                                                            inet->dport);
+                                                            inet->inet_sport,
+                                                            inet->inet_dport);
 
        err = tcp_connect(sk);
        if (err)
@@ -309,13 +332,13 @@ late_failure:
        tcp_set_state(sk, TCP_CLOSE);
        __sk_dst_reset(sk);
 failure:
-       inet->dport = 0;
+       inet->inet_dport = 0;
        sk->sk_route_caps = 0;
        return err;
 }
 
 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               int type, int code, int offset, __be32 info)
+               u8 type, u8 code, int offset, __be32 info)
 {
        struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
        const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
@@ -347,6 +370,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (sk->sk_state == TCP_CLOSE)
                goto out;
 
+       if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
+               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               goto out;
+       }
+
        tp = tcp_sk(sk);
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
@@ -381,8 +409,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                        ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
                        ipv6_addr_copy(&fl.fl6_src, &np->saddr);
                        fl.oif = sk->sk_bound_dev_if;
-                       fl.fl_ip_dport = inet->dport;
-                       fl.fl_ip_sport = inet->sport;
+                       fl.mark = sk->sk_mark;
+                       fl.fl_ip_dport = inet->inet_dport;
+                       fl.fl_ip_sport = inet->inet_sport;
                        security_skb_classify_flow(skb, &fl);
 
                        if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
@@ -390,7 +419,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                goto out;
                        }
 
-                       if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
+                       if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
                                sk->sk_err_soft = -err;
                                goto out;
                        }
@@ -458,13 +487,14 @@ out:
 }
 
 
-static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
+static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
+                             struct request_values *rvp)
 {
        struct inet6_request_sock *treq = inet6_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sk_buff * skb;
        struct ipv6_txoptions *opt = NULL;
-       struct in6_addr * final_p = NULL, final;
+       struct in6_addr * final_p, final;
        struct flowi fl;
        struct dst_entry *dst;
        int err = -1;
@@ -475,36 +505,28 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
        ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
        fl.fl6_flowlabel = 0;
        fl.oif = treq->iif;
+       fl.mark = sk->sk_mark;
        fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-       fl.fl_ip_sport = inet_sk(sk)->sport;
+       fl.fl_ip_sport = inet_rsk(req)->loc_port;
        security_req_classify_flow(req, &fl);
 
        opt = np->opt;
-       if (opt && opt->srcrt) {
-               struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-               ipv6_addr_copy(&final, &fl.fl6_dst);
-               ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-               final_p = &final;
-       }
+       final_p = fl6_update_dst(&fl, opt, &final);
 
        err = ip6_dst_lookup(sk, &dst, &fl);
        if (err)
                goto done;
        if (final_p)
                ipv6_addr_copy(&fl.fl6_dst, final_p);
-       if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+       if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req);
+       skb = tcp_make_synack(sk, dst, req, rvp);
        if (skb) {
-               struct tcphdr *th = tcp_hdr(skb);
-
-               th->check = tcp_v6_check(th, skb->len,
-                                        &treq->loc_addr, &treq->rmt_addr,
-                                        csum_partial((char *)th, skb->len, skb->csum));
+               __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
                ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-               err = ip6_xmit(sk, skb, &fl, opt, 0);
+               err = ip6_xmit(sk, skb, &fl, opt);
                err = net_xmit_eval(err);
        }
 
@@ -515,6 +537,13 @@ done:
        return err;
 }
 
+static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
+                            struct request_values *rvp)
+{
+       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+       return tcp_v6_send_synack(sk, req, rvp);
+}
+
 static inline void syn_flood_warning(struct sk_buff *skb)
 {
 #ifdef CONFIG_SYN_COOKIES
@@ -531,8 +560,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
-       if (inet6_rsk(req)->pktopts)
-               kfree_skb(inet6_rsk(req)->pktopts);
+       kfree_skb(inet6_rsk(req)->pktopts);
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -588,9 +616,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
                                kfree(newkey);
                                return -ENOMEM;
                        }
-                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+                       sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                }
-               if (tcp_alloc_md5sig_pool() == NULL) {
+               if (tcp_alloc_md5sig_pool(sk) == NULL) {
                        kfree(newkey);
                        return -ENOMEM;
                }
@@ -725,7 +753,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
                        return -ENOMEM;
 
                tp->md5sig_info = p;
-               sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+               sk_nocaps_add(sk, NETIF_F_GSO_MASK);
        }
 
        newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
@@ -872,12 +900,10 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                if (net_ratelimit()) {
-                       printk(KERN_INFO "MD5 Hash %s for "
-                              "(" NIP6_FMT ", %u)->"
-                              "(" NIP6_FMT ", %u)\n",
+                       printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
                               genhash ? "failed" : "mismatch",
-                              NIP6(ip6h->saddr), ntohs(th->source),
-                              NIP6(ip6h->daddr), ntohs(th->dest));
+                              &ip6h->saddr, ntohs(th->source),
+                              &ip6h->daddr, ntohs(th->dest));
                }
                return 1;
        }
@@ -888,40 +914,43 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
        .family         =       AF_INET6,
        .obj_size       =       sizeof(struct tcp6_request_sock),
-       .rtx_syn_ack    =       tcp_v6_send_synack,
+       .rtx_syn_ack    =       tcp_v6_rtx_synack,
        .send_ack       =       tcp_v6_reqsk_send_ack,
        .destructor     =       tcp_v6_reqsk_destructor,
-       .send_reset     =       tcp_v6_send_reset
+       .send_reset     =       tcp_v6_send_reset,
+       .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
        .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
+       .calc_md5_hash  =       tcp_v6_md5_hash_skb,
 };
 #endif
 
-static struct timewait_sock_ops tcp6_timewait_sock_ops = {
-       .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
-       .twsk_unique    = tcp_twsk_unique,
-       .twsk_destructor= tcp_twsk_destructor,
-};
-
-static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static void __tcp_v6_send_check(struct sk_buff *skb,
+                               struct in6_addr *saddr, struct in6_addr *daddr)
 {
-       struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
+               th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
-               th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
-                                           csum_partial((char *)th, th->doff<<2,
-                                                        skb->csum));
+               th->check = tcp_v6_check(skb->len, saddr, daddr,
+                                        csum_partial(th, th->doff << 2,
+                                                     skb->csum));
        }
 }
 
+static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+}
+
 static int tcp_v6_gso_send_check(struct sk_buff *skb)
 {
        struct ipv6hdr *ipv6h;
@@ -934,126 +963,55 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
        th = tcp_hdr(skb);
 
        th->check = 0;
-       th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
-                                    IPPROTO_TCP, 0);
-       skb->csum_start = skb_transport_header(skb) - skb->head;
-       skb->csum_offset = offsetof(struct tcphdr, check);
        skb->ip_summed = CHECKSUM_PARTIAL;
+       __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
        return 0;
 }
 
-static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
 {
-       struct tcphdr *th = tcp_hdr(skb), *t1;
-       struct sk_buff *buff;
-       struct flowi fl;
-       struct net *net = dev_net(skb->dst->dev);
-       struct sock *ctl_sk = net->ipv6.tcp_sk;
-       unsigned int tot_len = sizeof(*th);
-#ifdef CONFIG_TCP_MD5SIG
-       struct tcp_md5sig_key *key;
-#endif
-
-       if (th->rst)
-               return;
-
-       if (!ipv6_unicast_destination(skb))
-               return;
-
-#ifdef CONFIG_TCP_MD5SIG
-       if (sk)
-               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
-       else
-               key = NULL;
-
-       if (key)
-               tot_len += TCPOLEN_MD5SIG_ALIGNED;
-#endif
+       struct ipv6hdr *iph = skb_gro_network_header(skb);
 
-       /*
-        * We need to grab some memory, and put together an RST,
-        * and then put it into the queue to be sent.
-        */
-
-       buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
-                        GFP_ATOMIC);
-       if (buff == NULL)
-               return;
-
-       skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
-
-       t1 = (struct tcphdr *) skb_push(buff, tot_len);
-
-       /* Swap the send and the receive. */
-       memset(t1, 0, sizeof(*t1));
-       t1->dest = th->source;
-       t1->source = th->dest;
-       t1->doff = tot_len / 4;
-       t1->rst = 1;
-
-       if(th->ack) {
-               t1->seq = th->ack_seq;
-       } else {
-               t1->ack = 1;
-               t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
-                                   + skb->len - (th->doff<<2));
-       }
+       switch (skb->ip_summed) {
+       case CHECKSUM_COMPLETE:
+               if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
+                                 skb->csum)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       break;
+               }
 
-#ifdef CONFIG_TCP_MD5SIG
-       if (key) {
-               __be32 *opt = (__be32*)(t1 + 1);
-               opt[0] = htonl((TCPOPT_NOP << 24) |
-                              (TCPOPT_NOP << 16) |
-                              (TCPOPT_MD5SIG << 8) |
-                              TCPOLEN_MD5SIG);
-               tcp_v6_md5_hash_hdr((__u8 *)&opt[1], key,
-                                   &ipv6_hdr(skb)->daddr,
-                                   &ipv6_hdr(skb)->saddr, t1);
+               /* fall through */
+       case CHECKSUM_NONE:
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
        }
-#endif
-
-       buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
-
-       memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
-
-       t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
-                                   sizeof(*t1), IPPROTO_TCP,
-                                   buff->csum);
 
-       fl.proto = IPPROTO_TCP;
-       fl.oif = inet6_iif(skb);
-       fl.fl_ip_dport = t1->dest;
-       fl.fl_ip_sport = t1->source;
-       security_skb_classify_flow(skb, &fl);
+       return tcp_gro_receive(head, skb);
+}
 
-       /* Pass a socket to ip6_dst_lookup either it is for RST
-        * Underlying function will use this to retrieve the network
-        * namespace
-        */
-       if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
+static int tcp6_gro_complete(struct sk_buff *skb)
+{
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct tcphdr *th = tcp_hdr(skb);
 
-               if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
-                       ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
-                       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-                       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
-                       return;
-               }
-       }
+       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+                                 &iph->saddr, &iph->daddr, 0);
+       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 
-       kfree_skb(buff);
+       return tcp_gro_complete(skb);
 }
 
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
-                           struct tcp_md5sig_key *key)
+static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+                                u32 ts, struct tcp_md5sig_key *key, int rst)
 {
        struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
        struct flowi fl;
-       struct net *net = dev_net(skb->dst->dev);
+       struct net *net = dev_net(skb_dst(skb)->dev);
        struct sock *ctl_sk = net->ipv6.tcp_sk;
        unsigned int tot_len = sizeof(struct tcphdr);
+       struct dst_entry *dst;
        __be32 *topt;
 
        if (ts)
@@ -1070,16 +1028,18 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
 
        skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 
-       t1 = (struct tcphdr *) skb_push(buff,tot_len);
+       t1 = (struct tcphdr *) skb_push(buff, tot_len);
+       skb_reset_transport_header(buff);
 
        /* Swap the send and the receive. */
        memset(t1, 0, sizeof(*t1));
        t1->dest = th->source;
        t1->source = th->dest;
-       t1->doff = tot_len/4;
+       t1->doff = tot_len / 4;
        t1->seq = htonl(seq);
        t1->ack_seq = htonl(ack);
-       t1->ack = 1;
+       t1->ack = !rst || !th->ack;
+       t1->rst = rst;
        t1->window = htons(win);
 
        topt = (__be32 *)(t1 + 1);
@@ -1088,7 +1048,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
                *topt++ = htonl(tcp_time_stamp);
-               *topt = htonl(ts);
+               *topt++ = htonl(ts);
        }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1101,15 +1061,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
        }
 #endif
 
-       buff->csum = csum_partial((char *)t1, tot_len, 0);
-
        memset(&fl, 0, sizeof(fl));
        ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
        ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
 
-       t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
-                                   tot_len, IPPROTO_TCP,
-                                   buff->csum);
+       buff->ip_summed = CHECKSUM_PARTIAL;
+       buff->csum = 0;
+
+       __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
 
        fl.proto = IPPROTO_TCP;
        fl.oif = inet6_iif(skb);
@@ -1117,10 +1076,17 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
        fl.fl_ip_sport = t1->source;
        security_skb_classify_flow(skb, &fl);
 
-       if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
-               if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
-                       ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
+       /* Pass a socket to ip6_dst_lookup either it is for RST
+        * Underlying function will use this to retrieve the network
+        * namespace
+        */
+       if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
+               if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
+                       skb_dst_set(buff, dst);
+                       ip6_xmit(ctl_sk, buff, &fl, NULL);
                        TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+                       if (rst)
+                               TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
                        return;
                }
        }
@@ -1128,6 +1094,38 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
        kfree_skb(buff);
 }
 
+static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcphdr *th = tcp_hdr(skb);
+       u32 seq = 0, ack_seq = 0;
+       struct tcp_md5sig_key *key = NULL;
+
+       if (th->rst)
+               return;
+
+       if (!ipv6_unicast_destination(skb))
+               return;
+
+#ifdef CONFIG_TCP_MD5SIG
+       if (sk)
+               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
+#endif
+
+       if (th->ack)
+               seq = ntohl(th->ack_seq);
+       else
+               ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
+                         (th->doff << 2);
+
+       tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
+}
+
+static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
+                           struct tcp_md5sig_key *key)
+{
+       tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
+}
+
 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 {
        struct inet_timewait_sock *tw = inet_twsk(sk);
@@ -1175,7 +1173,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
        }
 
 #ifdef CONFIG_SYN_COOKIES
-       if (!th->rst && !th->syn && th->ack)
+       if (!th->syn)
                sk = cookie_v6_check(sk, skb);
 #endif
        return sk;
@@ -1186,12 +1184,15 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
  */
 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_extend_values tmp_ext;
+       struct tcp_options_received tmp_opt;
+       u8 *hash_location;
+       struct request_sock *req;
        struct inet6_request_sock *treq;
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct tcp_options_received tmp_opt;
        struct tcp_sock *tp = tcp_sk(sk);
-       struct request_sock *req = NULL;
        __u32 isn = TCP_SKB_CB(skb)->when;
+       struct dst_entry *dst = NULL;
 #ifdef CONFIG_SYN_COOKIES
        int want_cookie = 0;
 #else
@@ -1229,8 +1230,52 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
        tmp_opt.user_mss = tp->rx_opt.user_mss;
+       tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+
+       if (tmp_opt.cookie_plus > 0 &&
+           tmp_opt.saw_tstamp &&
+           !tp->rx_opt.cookie_out_never &&
+           (sysctl_tcp_cookie_size > 0 ||
+            (tp->cookie_values != NULL &&
+             tp->cookie_values->cookie_desired > 0))) {
+               u8 *c;
+               u32 *d;
+               u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
+               int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
+
+               if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
+                       goto drop_and_free;
+
+               /* Secret recipe starts with IP addresses */
+               d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+               d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+               *mess++ ^= *d++;
+
+               /* plus variable length Initiator Cookie */
+               c = (u8 *)mess;
+               while (l-- > 0)
+                       *c++ ^= *hash_location++;
 
-       tcp_parse_options(skb, &tmp_opt, 0);
+#ifdef CONFIG_SYN_COOKIES
+               want_cookie = 0;        /* not our kind of cookie */
+#endif
+               tmp_ext.cookie_out_never = 0; /* false */
+               tmp_ext.cookie_plus = tmp_opt.cookie_plus;
+       } else if (!tp->rx_opt.cookie_in_always) {
+               /* redundant indications, but ensure initialization. */
+               tmp_ext.cookie_out_never = 1; /* true */
+               tmp_ext.cookie_plus = 0;
+       } else {
+               goto drop_and_free;
+       }
+       tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
 
        if (want_cookie && !tmp_opt.saw_tstamp)
                tcp_clear_options(&tmp_opt);
@@ -1241,13 +1286,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        treq = inet6_rsk(req);
        ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
        ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
-       if (!want_cookie)
+       if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, tcp_hdr(skb));
 
-       if (want_cookie) {
-               isn = cookie_v6_init_sequence(sk, skb, &req->mss);
-               req->cookie_ts = tmp_opt.tstamp_ok;
-       } else if (!isn) {
+       if (!isn) {
+               struct inet_peer *peer = NULL;
+
                if (ipv6_opt_accepted(sk, skb) ||
                    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
                    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1261,25 +1305,73 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
                        treq->iif = inet6_iif(skb);
 
+               if (want_cookie) {
+                       isn = cookie_v6_init_sequence(sk, skb, &req->mss);
+                       req->cookie_ts = tmp_opt.tstamp_ok;
+                       goto have_isn;
+               }
+
+               /* VJ's idea. We save last timestamp seen
+                * from the destination in peer table, when entering
+                * state TIME-WAIT, and check against it before
+                * accepting new connection request.
+                *
+                * If "isn" is not zero, this request hit alive
+                * timewait bucket, so that all the necessary checks
+                * are made in the function processing timewait state.
+                */
+               if (tmp_opt.saw_tstamp &&
+                   tcp_death_row.sysctl_tw_recycle &&
+                   (dst = inet6_csk_route_req(sk, req)) != NULL &&
+                   (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
+                   ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+                                   &treq->rmt_addr)) {
+                       inet_peer_refcheck(peer);
+                       if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
+                           (s32)(peer->tcp_ts - req->ts_recent) >
+                                                       TCP_PAWS_WINDOW) {
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+                               goto drop_and_release;
+                       }
+               }
+               /* Kill the following clause, if you dislike this way. */
+               else if (!sysctl_tcp_syncookies &&
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (sysctl_max_syn_backlog >> 2)) &&
+                        (!peer || !peer->tcp_ts_stamp) &&
+                        (!dst || !dst_metric(dst, RTAX_RTT))) {
+                       /* Without syncookies last quarter of
+                        * backlog is filled with destinations,
+                        * proven to be alive.
+                        * It means that we continue to communicate
+                        * to destinations, already remembered
+                        * to the moment of synflood.
+                        */
+                       LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
+                                      &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+                       goto drop_and_release;
+               }
+
                isn = tcp_v6_init_sequence(skb);
        }
-
+have_isn:
        tcp_rsk(req)->snt_isn = isn;
 
        security_inet_conn_request(sk, skb, req);
 
-       if (tcp_v6_send_synack(sk, req))
-               goto drop;
+       if (tcp_v6_send_synack(sk, req,
+                              (struct request_values *)&tmp_ext) ||
+           want_cookie)
+               goto drop_and_free;
 
-       if (!want_cookie) {
-               inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-               return 0;
-       }
+       inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       return 0;
 
+drop_and_release:
+       dst_release(dst);
+drop_and_free:
+       reqsk_free(req);
 drop:
-       if (req)
-               reqsk_free(req);
-
        return 0; /* don't send reset */
 }
 
@@ -1317,11 +1409,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
-                             newinet->daddr);
+               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
 
-               ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
-                             newinet->saddr);
+               ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
                ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
 
@@ -1357,38 +1447,15 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        if (sk_acceptq_is_full(sk))
                goto out_overflow;
 
-       if (dst == NULL) {
-               struct in6_addr *final_p = NULL, final;
-               struct flowi fl;
-
-               memset(&fl, 0, sizeof(fl));
-               fl.proto = IPPROTO_TCP;
-               ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-               if (opt && opt->srcrt) {
-                       struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-                       ipv6_addr_copy(&final, &fl.fl6_dst);
-                       ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-                       final_p = &final;
-               }
-               ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
-               fl.oif = sk->sk_bound_dev_if;
-               fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-               fl.fl_ip_sport = inet_sk(sk)->sport;
-               security_req_classify_flow(req, &fl);
-
-               if (ip6_dst_lookup(sk, &dst, &fl))
-                       goto out;
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-               if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+       if (!dst) {
+               dst = inet6_csk_route_req(sk, req);
+               if (!dst)
                        goto out;
        }
 
        newsk = tcp_create_openreq_child(sk, req, skb);
        if (newsk == NULL)
-               goto out;
+               goto out_nonewsk;
 
        /*
         * No need to charge this sock to the relevant IPv6 refcnt debug socks
@@ -1455,10 +1522,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        tcp_mtup_init(newsk);
        tcp_sync_mss(newsk, dst_mtu(dst));
-       newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+       newtp->advmss = dst_metric_advmss(dst);
        tcp_initialize_rcv_mss(newsk);
 
-       newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
+       newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
+       newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
@@ -1470,37 +1538,41 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                 */
                char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
                if (newkey != NULL)
-                       tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
+                       tcp_v6_md5_do_add(newsk, &newnp->daddr,
                                          newkey, key->keylen);
        }
 #endif
 
-       __inet6_hash(newsk);
-       __inet_inherit_port(sk, newsk);
+       if (__inet_inherit_port(sk, newsk) < 0) {
+               sock_put(newsk);
+               goto out;
+       }
+       __inet6_hash(newsk, NULL);
 
        return newsk;
 
 out_overflow:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-out:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+out_nonewsk:
        if (opt && opt != np->opt)
                sock_kfree_s(sk, opt, opt->tot_len);
        dst_release(dst);
+out:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 }
 
 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
 {
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
+               if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
                                  &ipv6_hdr(skb)->daddr, skb->csum)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        return 0;
                }
        }
 
-       skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
+       skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
                                              &ipv6_hdr(skb)->saddr,
                                              &ipv6_hdr(skb)->daddr, 0));
 
@@ -1640,14 +1712,14 @@ ipv6_pktoptions:
                }
        }
 
-       if (opt_skb)
-               kfree_skb(opt_skb);
+       kfree_skb(opt_skb);
        return 0;
 }
 
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
        struct tcphdr *th;
+       struct ipv6hdr *hdr;
        struct sock *sk;
        int ret;
        struct net *net = dev_net(skb->dev);
@@ -1674,12 +1746,13 @@ static int tcp_v6_rcv(struct sk_buff *skb)
                goto bad_packet;
 
        th = tcp_hdr(skb);
+       hdr = ipv6_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
                                    skb->len - th->doff*4);
        TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
        TCP_SKB_CB(skb)->when = 0;
-       TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
+       TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
        TCP_SKB_CB(skb)->sacked = 0;
 
        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1690,6 +1763,11 @@ process:
        if (sk->sk_state == TCP_TIME_WAIT)
                goto do_time_wait;
 
+       if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               goto discard_and_relse;
+       }
+
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
@@ -1704,7 +1782,7 @@ process:
 #ifdef CONFIG_NET_DMA
                struct tcp_sock *tp = tcp_sk(sk);
                if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                       tp->ucopy.dma_chan = get_softnet_dma();
+                       tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
                if (tp->ucopy.dma_chan)
                        ret = tcp_v6_do_rcv(sk, skb);
                else
@@ -1713,8 +1791,11 @@ process:
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v6_do_rcv(sk, skb);
                }
-       } else
-               sk_add_backlog(sk, skb);
+       } else if (unlikely(sk_add_backlog(sk, skb))) {
+               bh_unlock_sock(sk);
+               NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+               goto discard_and_relse;
+       }
        bh_unlock_sock(sk);
 
        sock_put(sk);
@@ -1783,19 +1864,51 @@ do_time_wait:
        goto discard_it;
 }
 
-static int tcp_v6_remember_stamp(struct sock *sk)
+static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
 {
-       /* Alas, not yet... */
-       return 0;
+       struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct inet_peer *peer;
+
+       if (!rt ||
+           !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
+               peer = inet_getpeer_v6(&np->daddr, 1);
+               *release_it = true;
+       } else {
+               if (!rt->rt6i_peer)
+                       rt6_bind_peer(rt, 1);
+               peer = rt->rt6i_peer;
+               *release_it = false;
+       }
+
+       return peer;
 }
 
-static struct inet_connection_sock_af_ops ipv6_specific = {
+static void *tcp_v6_tw_get_peer(struct sock *sk)
+{
+       struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
+       struct inet_timewait_sock *tw = inet_twsk(sk);
+
+       if (tw->tw_family == AF_INET)
+               return tcp_v4_tw_get_peer(sk);
+
+       return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
+}
+
+static struct timewait_sock_ops tcp6_timewait_sock_ops = {
+       .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
+       .twsk_unique    = tcp_twsk_unique,
+       .twsk_destructor= tcp_twsk_destructor,
+       .twsk_getpeer   = tcp_v6_tw_get_peer,
+};
+
+static const struct inet_connection_sock_af_ops ipv6_specific = {
        .queue_xmit        = inet6_csk_xmit,
        .send_check        = tcp_v6_send_check,
        .rebuild_header    = inet6_sk_rebuild_header,
        .conn_request      = tcp_v6_conn_request,
        .syn_recv_sock     = tcp_v6_syn_recv_sock,
-       .remember_stamp    = tcp_v6_remember_stamp,
+       .get_peer          = tcp_v6_get_peer,
        .net_header_len    = sizeof(struct ipv6hdr),
        .setsockopt        = ipv6_setsockopt,
        .getsockopt        = ipv6_getsockopt,
@@ -1809,7 +1922,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
 };
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
+static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
        .md5_lookup     =       tcp_v6_md5_lookup,
        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
        .md5_add        =       tcp_v6_md5_add_func,
@@ -1821,13 +1934,13 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
  *     TCP over IPv4 via INET6 API
  */
 
-static struct inet_connection_sock_af_ops ipv6_mapped = {
+static const struct inet_connection_sock_af_ops ipv6_mapped = {
        .queue_xmit        = ip_queue_xmit,
        .send_check        = tcp_v4_send_check,
        .rebuild_header    = inet_sk_rebuild_header,
        .conn_request      = tcp_v6_conn_request,
        .syn_recv_sock     = tcp_v6_syn_recv_sock,
-       .remember_stamp    = tcp_v4_remember_stamp,
+       .get_peer          = tcp_v4_get_peer,
        .net_header_len    = sizeof(struct iphdr),
        .setsockopt        = ipv6_setsockopt,
        .getsockopt        = ipv6_getsockopt,
@@ -1841,7 +1954,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
 };
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
+static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
        .md5_lookup     =       tcp_v4_md5_lookup,
        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
        .md5_add        =       tcp_v6_md5_add_func,
@@ -1874,9 +1987,9 @@ static int tcp_v6_init_sock(struct sock *sk)
        /* See draft-stevens-tcpca-spec-01 for discussion of the
         * initialization of these values.
         */
-       tp->snd_ssthresh = 0x7fffffff;
+       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_clamp = ~0;
-       tp->mss_cache = 536;
+       tp->mss_cache = TCP_MSS_DEFAULT;
 
        tp->reordering = sysctl_tcp_reordering;
 
@@ -1892,10 +2005,25 @@ static int tcp_v6_init_sock(struct sock *sk)
        tp->af_specific = &tcp_sock_ipv6_specific;
 #endif
 
+       /* TCP Cookie Transactions */
+       if (sysctl_tcp_cookie_size > 0) {
+               /* Default, cookies without s_data_payload. */
+               tp->cookie_values =
+                       kzalloc(sizeof(*tp->cookie_values),
+                               sk->sk_allocation);
+               if (tp->cookie_values != NULL)
+                       kref_init(&tp->cookie_values->kref);
+       }
+       /* Presumed zeroed, in order of appearance:
+        *      cookie_in_always, cookie_out_never,
+        *      s_data_constant, s_data_in, s_data_out
+        */
        sk->sk_sndbuf = sysctl_tcp_wmem[1];
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
-       atomic_inc(&tcp_sockets_allocated);
+       local_bh_disable();
+       percpu_counter_inc(&tcp_sockets_allocated);
+       local_bh_enable();
 
        return 0;
 }
@@ -1925,11 +2053,11 @@ static void get_openreq6(struct seq_file *seq,
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3],
-                  ntohs(inet_sk(sk)->sport),
+                  ntohs(inet_rsk(req)->loc_port),
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3],
                   ntohs(inet_rsk(req)->rmt_port),
@@ -1957,8 +2085,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 
        dest  = &np->daddr;
        src   = &np->rcv_saddr;
-       destp = ntohs(inet->dport);
-       srcp  = ntohs(inet->sport);
+       destp = ntohs(inet->inet_dport);
+       srcp  = ntohs(inet->inet_sport);
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                timer_active    = 1;
@@ -1976,7 +2104,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -1995,7 +2123,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   jiffies_to_clock_t(icsk->icsk_rto),
                   jiffies_to_clock_t(icsk->icsk_ack.ato),
                   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
-                  tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
+                  tp->snd_cwnd,
+                  tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
                   );
 }
 
@@ -2017,7 +2146,7 @@ static void get_timewait6_sock(struct seq_file *seq,
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -2070,7 +2199,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
        },
 };
 
-int tcp6_proc_init(struct net *net)
+int __net_init tcp6_proc_init(struct net *net)
 {
        return tcp_proc_register(net, &tcp6_seq_afinfo);
 }
@@ -2095,6 +2224,8 @@ struct proto tcpv6_prot = {
        .setsockopt             = tcp_setsockopt,
        .getsockopt             = tcp_getsockopt,
        .recvmsg                = tcp_recvmsg,
+       .sendmsg                = tcp_sendmsg,
+       .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v6_do_rcv,
        .hash                   = tcp_v6_hash,
        .unhash                 = inet_unhash,
@@ -2109,20 +2240,24 @@ struct proto tcpv6_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp6_sock),
+       .slab_flags             = SLAB_DESTROY_BY_RCU,
        .twsk_prot              = &tcp6_timewait_sock_ops,
        .rsk_prot               = &tcp6_request_sock_ops,
        .h.hashinfo             = &tcp_hashinfo,
+       .no_autobind            = true,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
 };
 
-static struct inet6_protocol tcpv6_protocol = {
+static const struct inet6_protocol tcpv6_protocol = {
        .handler        =       tcp_v6_rcv,
        .err_handler    =       tcp_v6_err,
        .gso_send_check =       tcp_v6_gso_send_check,
        .gso_segment    =       tcp_tso_segment,
+       .gro_receive    =       tcp6_gro_receive,
+       .gro_complete   =       tcp6_gro_complete,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
@@ -2131,27 +2266,31 @@ static struct inet_protosw tcpv6_protosw = {
        .protocol       =       IPPROTO_TCP,
        .prot           =       &tcpv6_prot,
        .ops            =       &inet6_stream_ops,
-       .capability     =       -1,
        .no_check       =       0,
        .flags          =       INET_PROTOSW_PERMANENT |
                                INET_PROTOSW_ICSK,
 };
 
-static int tcpv6_net_init(struct net *net)
+static int __net_init tcpv6_net_init(struct net *net)
 {
        return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
                                    SOCK_RAW, IPPROTO_TCP, net);
 }
 
-static void tcpv6_net_exit(struct net *net)
+static void __net_exit tcpv6_net_exit(struct net *net)
 {
        inet_ctl_sock_destroy(net->ipv6.tcp_sk);
-       inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
+}
+
+static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
+{
+       inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
 }
 
 static struct pernet_operations tcpv6_net_ops = {
-       .init = tcpv6_net_init,
-       .exit = tcpv6_net_exit,
+       .init       = tcpv6_net_init,
+       .exit       = tcpv6_net_exit,
+       .exit_batch = tcpv6_net_exit_batch,
 };
 
 int __init tcpv6_init(void)