tcp: tcp_sendpages() should call tcp_push() once
[linux-flexiantxendom0-3.2.10.git] / net / ipv4 / tcp.c
index cf13726..5d54ed3 100644 (file)
  *     TCP_CLOSE               socket is finished
  */
 
+#define pr_fmt(fmt) "TCP: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/cache.h>
 #include <linux/err.h>
 #include <linux/crypto.h>
+#include <linux/time.h>
+#include <linux/slab.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
@@ -280,15 +284,13 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-int sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
-EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
-atomic_t tcp_memory_allocated; /* Current allocated memory. */
+atomic_long_t tcp_memory_allocated;    /* Current allocated memory. */
 EXPORT_SYMBOL(tcp_memory_allocated);
 
 /*
@@ -313,7 +315,6 @@ struct tcp_splice_state {
  * is strict, actions are advisory and have some latency.
  */
 int tcp_memory_pressure __read_mostly;
-
 EXPORT_SYMBOL(tcp_memory_pressure);
 
 void tcp_enter_memory_pressure(struct sock *sk)
@@ -323,9 +324,45 @@ void tcp_enter_memory_pressure(struct sock *sk)
                tcp_memory_pressure = 1;
        }
 }
-
 EXPORT_SYMBOL(tcp_enter_memory_pressure);
 
+/* Convert seconds to retransmits based on initial and max timeout */
+static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
+{
+       u8 res = 0;
+
+       if (seconds > 0) {
+               int period = timeout;
+
+               res = 1;
+               while (seconds > period && res < 255) {
+                       res++;
+                       timeout <<= 1;
+                       if (timeout > rto_max)
+                               timeout = rto_max;
+                       period += timeout;
+               }
+       }
+       return res;
+}
+
+/* Convert retransmits to seconds based on initial and max timeout */
+static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
+{
+       int period = 0;
+
+       if (retrans > 0) {
+               period = timeout;
+               while (--retrans) {
+                       timeout <<= 1;
+                       if (timeout > rto_max)
+                               timeout = rto_max;
+                       period += timeout;
+               }
+       }
+       return period;
+}
+
 /*
  *     Wait for a TCP event.
  *
@@ -337,9 +374,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        unsigned int mask;
        struct sock *sk = sock->sk;
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
 
-       sock_poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -349,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
         */
 
        mask = 0;
-       if (sk->sk_err)
-               mask = POLLERR;
 
        /*
         * POLLHUP is certainly not done right. But poll() doesn't
@@ -391,7 +426,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                if (tp->urg_seq == tp->copied_seq &&
                    !sock_flag(sk, SOCK_URGINLINE) &&
                    tp->urg_data)
-                       target--;
+                       target++;
 
                /* Potential race condition. If read of tp below will
                 * escape above sk->sk_state, we can be illegally awaken
@@ -414,13 +449,20 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                                if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
-               }
+               } else
+                       mask |= POLLOUT | POLLWRNORM;
 
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
        }
+       /* This barrier is coupled with smp_wmb() in tcp_reset() */
+       smp_rmb();
+       if (sk->sk_err)
+               mask |= POLLERR;
+
        return mask;
 }
+EXPORT_SYMBOL(tcp_poll);
 
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -463,20 +505,30 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                else
                        answ = tp->write_seq - tp->snd_una;
                break;
+       case SIOCOUTQNSD:
+               if (sk->sk_state == TCP_LISTEN)
+                       return -EINVAL;
+
+               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+                       answ = 0;
+               else
+                       answ = tp->write_seq - tp->snd_nxt;
+               break;
        default:
                return -ENOIOCTLCMD;
        }
 
        return put_user(answ, (int __user *)arg);
 }
+EXPORT_SYMBOL(tcp_ioctl);
 
 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+       TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
        tp->pushed_seq = tp->write_seq;
 }
 
-static inline int forced_push(struct tcp_sock *tp)
+static inline int forced_push(const struct tcp_sock *tp)
 {
        return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 }
@@ -488,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 
        skb->csum    = 0;
        tcb->seq     = tcb->end_seq = tp->write_seq;
-       tcb->flags   = TCPCB_FLAG_ACK;
+       tcb->tcp_flags = TCPHDR_ACK;
        tcb->sacked  = 0;
        skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
@@ -498,8 +550,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
 }
 
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
-                               struct sk_buff *skb)
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 {
        if (flags & MSG_OOB)
                tp->snd_up = tp->write_seq;
@@ -508,13 +559,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
                            int nonagle)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-
        if (tcp_send_head(sk)) {
-               struct sk_buff *skb = tcp_write_queue_tail(sk);
+               struct tcp_sock *tp = tcp_sk(sk);
+
                if (!(flags & MSG_MORE) || forced_push(tp))
-                       tcp_mark_push(tp, skb);
-               tcp_mark_urg(tp, flags, skb);
+                       tcp_mark_push(tp, tcp_write_queue_tail(sk));
+
+               tcp_mark_urg(tp, flags);
                __tcp_push_pending_frames(sk, mss_now,
                                          (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
        }
@@ -570,6 +621,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
        ssize_t spliced;
        int ret;
 
+       sock_rps_record_flow(sk);
        /*
         * We can't seek on a socket input
         */
@@ -637,6 +689,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 
        return ret;
 }
+EXPORT_SYMBOL(tcp_splice_read);
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
 {
@@ -760,7 +813,7 @@ new_segment:
                        goto wait_for_memory;
 
                if (can_coalesce) {
-                       skb_shinfo(skb)->frags[i - 1].size += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                } else {
                        get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, copy);
@@ -777,7 +830,7 @@ new_segment:
                skb_shinfo(skb)->gso_segs = 0;
 
                if (!copied)
-                       TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+                       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
                copied += copy;
                poffset += copy;
@@ -807,7 +860,7 @@ wait_for_memory:
        }
 
 out:
-       if (copied)
+       if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
                tcp_push(sk, flags, mss_now, tp->nonagle);
        return copied;
 
@@ -818,36 +871,35 @@ out_err:
        return sk_stream_error(sk, flags, err);
 }
 
-ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
-                    size_t size, int flags)
+int tcp_sendpage(struct sock *sk, struct page *page, int offset,
+                size_t size, int flags)
 {
        ssize_t res;
-       struct sock *sk = sock->sk;
 
        if (!(sk->sk_route_caps & NETIF_F_SG) ||
            !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
-               return sock_no_sendpage(sock, page, offset, size, flags);
+               return sock_no_sendpage(sk->sk_socket, page, offset, size,
+                                       flags);
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
        res = do_tcp_sendpages(sk, &page, offset, size, flags);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return res;
 }
+EXPORT_SYMBOL(tcp_sendpage);
 
-#define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
-#define TCP_OFF(sk)    (sk->sk_sndmsg_off)
-
-static inline int select_size(struct sock *sk)
+static inline int select_size(const struct sock *sk, bool sg)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
-       if (sk->sk_route_caps & NETIF_F_SG) {
-               if (sk_can_gso(sk))
-                       tmp = 0;
-               else {
+       if (sg) {
+               if (sk_can_gso(sk)) {
+                       /* Small frames wont use a full page:
+                        * Payload will immediately follow tcp header.
+                        */
+                       tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+               } else {
                        int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 
                        if (tmp >= pgbreak &&
@@ -859,20 +911,18 @@ static inline int select_size(struct sock *sk)
        return tmp;
 }
 
-int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                size_t size)
 {
-       struct sock *sk = sock->sk;
        struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int iovlen, flags;
+       int iovlen, flags, err, copied;
        int mss_now, size_goal;
-       int err, copied;
+       bool sg;
        long timeo;
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
 
        flags = msg->msg_flags;
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -896,8 +946,10 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
+       sg = !!(sk->sk_route_caps & NETIF_F_SG);
+
        while (--iovlen >= 0) {
-               int seglen = iov->iov_len;
+               size_t seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
 
                iov++;
@@ -921,8 +973,9 @@ new_segment:
                                if (!sk_stream_memory_free(sk))
                                        goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_skb(sk, select_size(sk),
-                                               sk->sk_allocation);
+                               skb = sk_stream_alloc_skb(sk,
+                                                         select_size(sk, sg),
+                                                         sk->sk_allocation);
                                if (!skb)
                                        goto wait_for_memory;
 
@@ -946,22 +999,26 @@ new_segment:
                                /* We have some space in skb head. Superb! */
                                if (copy > skb_tailroom(skb))
                                        copy = skb_tailroom(skb);
-                               if ((err = skb_add_data(skb, from, copy)) != 0)
+                               err = skb_add_data_nocache(sk, skb, from, copy);
+                               if (err)
                                        goto do_fault;
                        } else {
                                int merge = 0;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = TCP_PAGE(sk);
-                               int off = TCP_OFF(sk);
+                               struct page *page = sk->sk_sndmsg_page;
+                               int off;
+
+                               if (page && page_count(page) == 1)
+                                       sk->sk_sndmsg_off = 0;
+
+                               off = sk->sk_sndmsg_off;
 
                                if (skb_can_coalesce(skb, i, page, off) &&
                                    off != PAGE_SIZE) {
                                        /* We can extend the last page
                                         * fragment. */
                                        merge = 1;
-                               } else if (i == MAX_SKB_FRAGS ||
-                                          (!i &&
-                                          !(sk->sk_route_caps & NETIF_F_SG))) {
+                               } else if (i == MAX_SKB_FRAGS || !sg) {
                                        /* Need to add new fragment and cannot
                                         * do this because interface is non-SG,
                                         * or because all the page slots are
@@ -971,7 +1028,7 @@ new_segment:
                                } else if (page) {
                                        if (off == PAGE_SIZE) {
                                                put_page(page);
-                                               TCP_PAGE(sk) = page = NULL;
+                                               sk->sk_sndmsg_page = page = NULL;
                                                off = 0;
                                        }
                                } else
@@ -991,38 +1048,37 @@ new_segment:
 
                                /* Time to copy data. We are close to
                                 * the end! */
-                               err = skb_copy_to_page(sk, from, skb, page,
-                                                      off, copy);
+                               err = skb_copy_to_page_nocache(sk, from, skb,
+                                                              page, off, copy);
                                if (err) {
                                        /* If this page was new, give it to the
                                         * socket so it does not get leaked.
                                         */
-                                       if (!TCP_PAGE(sk)) {
-                                               TCP_PAGE(sk) = page;
-                                               TCP_OFF(sk) = 0;
+                                       if (!sk->sk_sndmsg_page) {
+                                               sk->sk_sndmsg_page = page;
+                                               sk->sk_sndmsg_off = 0;
                                        }
                                        goto do_error;
                                }
 
                                /* Update the skb. */
                                if (merge) {
-                                       skb_shinfo(skb)->frags[i - 1].size +=
-                                                                       copy;
+                                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
                                        skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (TCP_PAGE(sk)) {
+                                       if (sk->sk_sndmsg_page) {
                                                get_page(page);
                                        } else if (off + copy < PAGE_SIZE) {
                                                get_page(page);
-                                               TCP_PAGE(sk) = page;
+                                               sk->sk_sndmsg_page = page;
                                        }
                                }
 
-                               TCP_OFF(sk) = off + copy;
+                               sk->sk_sndmsg_off = off + copy;
                        }
 
                        if (!copied)
-                               TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+                               TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
                        tp->write_seq += copy;
                        TCP_SKB_CB(skb)->end_seq += copy;
@@ -1059,7 +1115,6 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
@@ -1078,10 +1133,10 @@ do_error:
                goto out;
 out_err:
        err = sk_stream_error(sk, flags, err);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 }
+EXPORT_SYMBOL(tcp_sendmsg);
 
 /*
  *     Handle reading urgent data. BSD has very simple semantics for
@@ -1143,11 +1198,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
        struct tcp_sock *tp = tcp_sk(sk);
        int time_to_ack = 0;
 
-#if TCP_DEBUG
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
-       WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
-#endif
+       WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+            "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 
        if (inet_csk_ack_scheduled(sk)) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1214,6 +1269,39 @@ static void tcp_prequeue_process(struct sock *sk)
        tp->ucopy.memory = 0;
 }
 
+#ifdef CONFIG_NET_DMA
+static void tcp_service_net_dma(struct sock *sk, bool wait)
+{
+       dma_cookie_t done, used;
+       dma_cookie_t last_issued;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (!tp->ucopy.dma_chan)
+               return;
+
+       last_issued = tp->ucopy.dma_cookie;
+       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
+       do {
+               if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
+                                             last_issued, &done,
+                                             &used) == DMA_SUCCESS) {
+                       /* Safe to free early-copied skbs now */
+                       __skb_queue_purge(&sk->sk_async_wait_queue);
+                       break;
+               } else {
+                       struct sk_buff *skb;
+                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
+                              (dma_async_is_complete(skb->dma_cookie, done,
+                                                     used) == DMA_SUCCESS)) {
+                               __skb_dequeue(&sk->sk_async_wait_queue);
+                               kfree_skb(skb);
+                       }
+               }
+       } while (wait);
+}
+#endif
+
 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 {
        struct sk_buff *skb;
@@ -1295,6 +1383,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                sk_eat_skb(sk, skb, 0);
                if (!desc->count)
                        break;
+               tp->copied_seq = seq;
        }
        tp->copied_seq = seq;
 
@@ -1305,6 +1394,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                tcp_cleanup_rbuf(sk, copied);
        return copied;
 }
+EXPORT_SYMBOL(tcp_read_sock);
 
 /*
  *     This routine copies from a sock struct into the user buffer.
@@ -1332,8 +1422,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        lock_sock(sk);
 
-       TCP_CHECK_TIMER(sk);
-
        err = -ENOTCONN;
        if (sk->sk_state == TCP_LISTEN)
                goto out;
@@ -1393,11 +1481,12 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* Now that we have two receive queues this
                         * shouldn't happen.
                         */
-                       if (before(*seq, TCP_SKB_CB(skb)->seq)) {
-                               printk(KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+                       if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+                                flags))
                                break;
-                       }
+
                        offset = *seq - TCP_SKB_CB(skb)->seq;
                        if (tcp_hdr(skb)->syn)
                                offset--;
@@ -1405,7 +1494,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
-                       WARN_ON(!(flags & MSG_PEEK));
+                       WARN(!(flags & MSG_PEEK),
+                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
@@ -1501,6 +1592,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* __ Set realtime policy in scheduler __ */
                }
 
+#ifdef CONFIG_NET_DMA
+               if (tp->ucopy.dma_chan)
+                       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+#endif
                if (copied >= target) {
                        /* Do not sleep, just process backlog. */
                        release_sock(sk);
@@ -1509,6 +1604,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        sk_wait_data(sk, &timeo);
 
 #ifdef CONFIG_NET_DMA
+               tcp_service_net_dma(sk, false);  /* Don't block */
                tp->ucopy.wakeup = 0;
 #endif
 
@@ -1581,13 +1677,17 @@ do_prequeue:
 
                                if (tp->ucopy.dma_cookie < 0) {
 
-                                       printk(KERN_ALERT "dma_cookie < 0\n");
+                                       pr_alert("%s: dma_cookie < 0\n",
+                                                __func__);
 
                                        /* Exception. Bailout! */
                                        if (!copied)
                                                copied = -EFAULT;
                                        break;
                                }
+
+                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
                                if ((offset + used) == skb->len)
                                        copied_early = 1;
 
@@ -1657,27 +1757,9 @@ skip_copy:
        }
 
 #ifdef CONFIG_NET_DMA
-       if (tp->ucopy.dma_chan) {
-               dma_cookie_t done, used;
-
-               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
-
-               while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
-                                                tp->ucopy.dma_cookie, &done,
-                                                &used) == DMA_IN_PROGRESS) {
-                       /* do partial cleanup of sk_async_wait_queue */
-                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
-                              (dma_async_is_complete(skb->dma_cookie, done,
-                                                     used) == DMA_SUCCESS)) {
-                               __skb_dequeue(&sk->sk_async_wait_queue);
-                               kfree_skb(skb);
-                       }
-               }
+       tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
+       tp->ucopy.dma_chan = NULL;
 
-               /* Safe to free early-copied skbs now */
-               __skb_queue_purge(&sk->sk_async_wait_queue);
-               tp->ucopy.dma_chan = NULL;
-       }
        if (tp->ucopy.pinned_list) {
                dma_unpin_iovec_pages(tp->ucopy.pinned_list);
                tp->ucopy.pinned_list = NULL;
@@ -1691,12 +1773,10 @@ skip_copy:
        /* Clean up data we have read: This will do ACK frames. */
        tcp_cleanup_rbuf(sk, copied);
 
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
 out:
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 
@@ -1704,6 +1784,7 @@ recv_urg:
        err = tcp_recv_urg(sk, msg, len, flags);
        goto out;
 }
+EXPORT_SYMBOL(tcp_recvmsg);
 
 void tcp_set_state(struct sock *sk, int state)
 {
@@ -1796,6 +1877,21 @@ void tcp_shutdown(struct sock *sk, int how)
                        tcp_send_fin(sk);
        }
 }
+EXPORT_SYMBOL(tcp_shutdown);
+
+bool tcp_check_oom(struct sock *sk, int shift)
+{
+       bool too_many_orphans, out_of_socket_memory;
+
+       too_many_orphans = tcp_too_many_orphans(sk, shift);
+       out_of_socket_memory = tcp_out_of_memory(sk);
+
+       if (too_many_orphans && net_ratelimit())
+               pr_info("too many orphaned sockets\n");
+       if (out_of_socket_memory && net_ratelimit())
+               pr_info("out of memory -- consider tuning tcp_mem\n");
+       return too_many_orphans || out_of_socket_memory;
+}
 
 void tcp_close(struct sock *sk, long timeout)
 {
@@ -1828,6 +1924,10 @@ void tcp_close(struct sock *sk, long timeout)
 
        sk_mem_reclaim(sk);
 
+       /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
+       if (sk->sk_state == TCP_CLOSE)
+               goto adjudge_to_death;
+
        /* As outlined in RFC 2525, section 2.17, we send a RST here because
         * data was lost. To witness the awful effects of the old behavior of
         * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
@@ -1931,14 +2031,8 @@ adjudge_to_death:
                }
        }
        if (sk->sk_state != TCP_CLOSE) {
-               int orphan_count = percpu_counter_read_positive(
-                                               sk->sk_prot->orphan_count);
-
                sk_mem_reclaim(sk);
-               if (tcp_too_many_orphans(sk, orphan_count)) {
-                       if (net_ratelimit())
-                               printk(KERN_INFO "TCP: too many of orphaned "
-                                      "sockets\n");
+               if (tcp_check_oom(sk, 0)) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        NET_INC_STATS_BH(sock_net(sk),
@@ -1955,6 +2049,7 @@ out:
        local_bh_enable();
        sock_put(sk);
 }
+EXPORT_SYMBOL(tcp_close);
 
 /* These states need RST on ABORT according to RFC793 */
 
@@ -1998,7 +2093,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        __skb_queue_purge(&sk->sk_async_wait_queue);
 #endif
 
-       inet->dport = 0;
+       inet->inet_dport = 0;
 
        if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
                inet_reset_saddr(sk);
@@ -2015,6 +2110,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_cnt = 0;
        tp->bytes_acked = 0;
+       tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
@@ -2022,11 +2118,12 @@ int tcp_disconnect(struct sock *sk, int flags)
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
 
-       WARN_ON(inet->num && !icsk->icsk_bind_hash);
+       WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
 
        sk->sk_error_report(sk);
        return err;
 }
+EXPORT_SYMBOL(tcp_disconnect);
 
 /*
  *     Socket option code for TCP.
@@ -2039,8 +2136,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        int val;
        int err = 0;
 
-       /* This is a string value all the others are int's */
-       if (optname == TCP_CONGESTION) {
+       /* These are data/string values, all the others are ints */
+       switch (optname) {
+       case TCP_CONGESTION: {
                char name[TCP_CA_NAME_MAX];
 
                if (optlen < 1)
@@ -2057,6 +2155,96 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                release_sock(sk);
                return err;
        }
+       case TCP_COOKIE_TRANSACTIONS: {
+               struct tcp_cookie_transactions ctd;
+               struct tcp_cookie_values *cvp = NULL;
+
+               if (sizeof(ctd) > optlen)
+                       return -EINVAL;
+               if (copy_from_user(&ctd, optval, sizeof(ctd)))
+                       return -EFAULT;
+
+               if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
+                   ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
+                       return -EINVAL;
+
+               if (ctd.tcpct_cookie_desired == 0) {
+                       /* default to global value */
+               } else if ((0x1 & ctd.tcpct_cookie_desired) ||
+                          ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
+                          ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
+                       return -EINVAL;
+               }
+
+               if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
+                       /* Supercedes all other values */
+                       lock_sock(sk);
+                       if (tp->cookie_values != NULL) {
+                               kref_put(&tp->cookie_values->kref,
+                                        tcp_cookie_values_release);
+                               tp->cookie_values = NULL;
+                       }
+                       tp->rx_opt.cookie_in_always = 0; /* false */
+                       tp->rx_opt.cookie_out_never = 1; /* true */
+                       release_sock(sk);
+                       return err;
+               }
+
+               /* Allocate ancillary memory before locking.
+                */
+               if (ctd.tcpct_used > 0 ||
+                   (tp->cookie_values == NULL &&
+                    (sysctl_tcp_cookie_size > 0 ||
+                     ctd.tcpct_cookie_desired > 0 ||
+                     ctd.tcpct_s_data_desired > 0))) {
+                       cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
+                                     GFP_KERNEL);
+                       if (cvp == NULL)
+                               return -ENOMEM;
+
+                       kref_init(&cvp->kref);
+               }
+               lock_sock(sk);
+               tp->rx_opt.cookie_in_always =
+                       (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
+               tp->rx_opt.cookie_out_never = 0; /* false */
+
+               if (tp->cookie_values != NULL) {
+                       if (cvp != NULL) {
+                               /* Changed values are recorded by a changed
+                                * pointer, ensuring the cookie will differ,
+                                * without separately hashing each value later.
+                                */
+                               kref_put(&tp->cookie_values->kref,
+                                        tcp_cookie_values_release);
+                       } else {
+                               cvp = tp->cookie_values;
+                       }
+               }
+
+               if (cvp != NULL) {
+                       cvp->cookie_desired = ctd.tcpct_cookie_desired;
+
+                       if (ctd.tcpct_used > 0) {
+                               memcpy(cvp->s_data_payload, ctd.tcpct_value,
+                                      ctd.tcpct_used);
+                               cvp->s_data_desired = ctd.tcpct_used;
+                               cvp->s_data_constant = 1; /* true */
+                       } else {
+                               /* No constant payload data. */
+                               cvp->s_data_desired = ctd.tcpct_s_data_desired;
+                               cvp->s_data_constant = 0; /* false */
+                       }
+
+                       tp->cookie_values = cvp;
+               }
+               release_sock(sk);
+               return err;
+       }
+       default:
+               /* fallthru */
+               break;
+       }
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -2071,7 +2259,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                /* Values greater than interface MTU won't take effect. However
                 * at the point when this call is done we typically don't yet
                 * know which interface is going to be used */
-               if (val < 8 || val > MAX_TCP_WINDOW) {
+               if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
                        err = -EINVAL;
                        break;
                }
@@ -2095,6 +2283,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                }
                break;
 
+       case TCP_THIN_LINEAR_TIMEOUTS:
+               if (val < 0 || val > 1)
+                       err = -EINVAL;
+               else
+                       tp->thin_lto = val;
+               break;
+
+       case TCP_THIN_DUPACK:
+               if (val < 0 || val > 1)
+                       err = -EINVAL;
+               else
+                       tp->thin_dupack = val;
+               break;
+
        case TCP_CORK:
                /* When set indicates to always queue non-full frames.
                 * Later the user clears this option and we transmit
@@ -2125,7 +2327,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        if (sock_flag(sk, SOCK_KEEPOPEN) &&
                            !((1 << sk->sk_state) &
                              (TCPF_CLOSE | TCPF_LISTEN))) {
-                               __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
+                               u32 elapsed = keepalive_time_elapsed(tp);
                                if (tp->keepalive_time > elapsed)
                                        elapsed = tp->keepalive_time - elapsed;
                                else
@@ -2163,16 +2365,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                break;
 
        case TCP_DEFER_ACCEPT:
-               icsk->icsk_accept_queue.rskq_defer_accept = 0;
-               if (val > 0) {
-                       /* Translate value in seconds to number of
-                        * retransmits */
-                       while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
-                              val > ((TCP_TIMEOUT_INIT / HZ) <<
-                                      icsk->icsk_accept_queue.rskq_defer_accept))
-                               icsk->icsk_accept_queue.rskq_defer_accept++;
-                       icsk->icsk_accept_queue.rskq_defer_accept++;
-               }
+               /* Translate value in seconds to number of retransmits */
+               icsk->icsk_accept_queue.rskq_defer_accept =
+                       secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+                                       TCP_RTO_MAX / HZ);
                break;
 
        case TCP_WINDOW_CLAMP:
@@ -2209,7 +2405,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                err = tp->af_specific->md5_parse(sk, optval, optlen);
                break;
 #endif
-
+       case TCP_USER_TIMEOUT:
+               /* Cap the max timeout in ms TCP will retry/retrans
+                * before giving up and aborting (ETIMEDOUT) a connection.
+                */
+               icsk->icsk_user_timeout = msecs_to_jiffies(val);
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -2222,13 +2423,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
                   unsigned int optlen)
 {
-       struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (level != SOL_TCP)
                return icsk->icsk_af_ops->setsockopt(sk, level, optname,
                                                     optval, optlen);
        return do_tcp_setsockopt(sk, level, optname, optval, optlen);
 }
+EXPORT_SYMBOL(tcp_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -2239,14 +2441,13 @@ int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
                                                  optval, optlen);
        return do_tcp_setsockopt(sk, level, optname, optval, optlen);
 }
-
 EXPORT_SYMBOL(compat_tcp_setsockopt);
 #endif
 
 /* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(struct sock *sk, struct tcp_info *info)
+void tcp_get_info(const struct sock *sk, struct tcp_info *info)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
 
@@ -2268,8 +2469,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
                info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
        }
 
-       if (tp->ecn_flags&TCP_ECN_OK)
+       if (tp->ecn_flags & TCP_ECN_OK)
                info->tcpi_options |= TCPI_OPT_ECN;
+       if (tp->ecn_flags & TCP_ECN_SEEN)
+               info->tcpi_options |= TCPI_OPT_ECN_SEEN;
 
        info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
        info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2305,7 +2508,6 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 
        info->tcpi_total_retrans = tp->total_retrans;
 }
-
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
 static int do_tcp_getsockopt(struct sock *sk, int level,
@@ -2353,8 +2555,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                        val = (val ? : sysctl_tcp_fin_timeout) / HZ;
                break;
        case TCP_DEFER_ACCEPT:
-               val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
-                       ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
+               val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+                                     TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
                break;
        case TCP_WINDOW_CLAMP:
                val = tp->window_clamp;
@@ -2387,6 +2589,52 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
                        return -EFAULT;
                return 0;
+
+       case TCP_COOKIE_TRANSACTIONS: {
+               struct tcp_cookie_transactions ctd;
+               struct tcp_cookie_values *cvp = tp->cookie_values;
+
+               if (get_user(len, optlen))
+                       return -EFAULT;
+               if (len < sizeof(ctd))
+                       return -EINVAL;
+
+               memset(&ctd, 0, sizeof(ctd));
+               ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
+                                  TCP_COOKIE_IN_ALWAYS : 0)
+                               | (tp->rx_opt.cookie_out_never ?
+                                  TCP_COOKIE_OUT_NEVER : 0);
+
+               if (cvp != NULL) {
+                       ctd.tcpct_flags |= (cvp->s_data_in ?
+                                           TCP_S_DATA_IN : 0)
+                                        | (cvp->s_data_out ?
+                                           TCP_S_DATA_OUT : 0);
+
+                       ctd.tcpct_cookie_desired = cvp->cookie_desired;
+                       ctd.tcpct_s_data_desired = cvp->s_data_desired;
+
+                       memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
+                              cvp->cookie_pair_size);
+                       ctd.tcpct_used = cvp->cookie_pair_size;
+               }
+
+               if (put_user(sizeof(ctd), optlen))
+                       return -EFAULT;
+               if (copy_to_user(optval, &ctd, sizeof(ctd)))
+                       return -EFAULT;
+               return 0;
+       }
+       case TCP_THIN_LINEAR_TIMEOUTS:
+               val = tp->thin_lto;
+               break;
+       case TCP_THIN_DUPACK:
+               val = tp->thin_dupack;
+               break;
+
+       case TCP_USER_TIMEOUT:
+               val = jiffies_to_msecs(icsk->icsk_user_timeout);
+               break;
        default:
                return -ENOPROTOOPT;
        }
@@ -2408,6 +2656,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
                                                     optval, optlen);
        return do_tcp_getsockopt(sk, level, optname, optval, optlen);
 }
+EXPORT_SYMBOL(tcp_getsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -2418,11 +2667,11 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
                                                  optval, optlen);
        return do_tcp_getsockopt(sk, level, optname, optval, optlen);
 }
-
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct tcphdr *th;
@@ -2518,7 +2767,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        struct tcphdr *th2;
        unsigned int len;
        unsigned int thlen;
-       unsigned int flags;
+       __be32 flags;
        unsigned int mss = 1;
        unsigned int hlen;
        unsigned int off;
@@ -2568,10 +2817,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
 found:
        flush = NAPI_GRO_CB(p)->flush;
-       flush |= flags & TCP_FLAG_CWR;
-       flush |= (flags ^ tcp_flag_word(th2)) &
-                 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
-       flush |= th->ack_seq ^ th2->ack_seq;
+       flush |= (__force int)(flags & TCP_FLAG_CWR);
+       flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
+                 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
+       flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
        for (i = sizeof(*th); i < thlen; i += 4)
                flush |= *(u32 *)((u8 *)th + i) ^
                         *(u32 *)((u8 *)th2 + i);
@@ -2592,8 +2841,9 @@ found:
 
 out_check_final:
        flush = len < mss;
-       flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
-                         TCP_FLAG_SYN | TCP_FLAG_FIN);
+       flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
+                                       TCP_FLAG_RST | TCP_FLAG_SYN |
+                                       TCP_FLAG_FIN));
 
        if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
                pp = head;
@@ -2624,27 +2874,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
 
 #ifdef CONFIG_TCP_MD5SIG
 static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
 
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
 {
        int cpu;
+
        for_each_possible_cpu(cpu) {
-               struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
-               if (p) {
-                       if (p->md5_desc.tfm)
-                               crypto_free_hash(p->md5_desc.tfm);
-                       kfree(p);
-                       p = NULL;
-               }
+               struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+
+               if (p->md5_desc.tfm)
+                       crypto_free_hash(p->md5_desc.tfm);
        }
        free_percpu(pool);
 }
 
 void tcp_free_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool **pool = NULL;
+       struct tcp_md5sig_pool __percpu *pool = NULL;
 
        spin_lock_bh(&tcp_md5sig_pool_lock);
        if (--tcp_md5sig_users == 0) {
@@ -2655,32 +2903,26 @@ void tcp_free_md5sig_pool(void)
        if (pool)
                __tcp_free_md5sig_pool(pool);
 }
-
 EXPORT_SYMBOL(tcp_free_md5sig_pool);
 
-static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
+static struct tcp_md5sig_pool __percpu *
+__tcp_alloc_md5sig_pool(struct sock *sk)
 {
        int cpu;
-       struct tcp_md5sig_pool **pool;
+       struct tcp_md5sig_pool __percpu *pool;
 
-       pool = alloc_percpu(struct tcp_md5sig_pool *);
+       pool = alloc_percpu(struct tcp_md5sig_pool);
        if (!pool)
                return NULL;
 
        for_each_possible_cpu(cpu) {
-               struct tcp_md5sig_pool *p;
                struct crypto_hash *hash;
 
-               p = kzalloc(sizeof(*p), sk->sk_allocation);
-               if (!p)
-                       goto out_free;
-               *per_cpu_ptr(pool, cpu) = p;
-
                hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
                if (!hash || IS_ERR(hash))
                        goto out_free;
 
-               p->md5_desc.tfm = hash;
+               per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
        }
        return pool;
 out_free:
@@ -2688,9 +2930,9 @@ out_free:
        return NULL;
 }
 
-struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
 {
-       struct tcp_md5sig_pool **pool;
+       struct tcp_md5sig_pool __percpu *pool;
        int alloc = 0;
 
 retry:
@@ -2709,7 +2951,9 @@ retry:
 
        if (alloc) {
                /* we cannot hold spinlock here because this may sleep. */
-               struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
+               struct tcp_md5sig_pool __percpu *p;
+
+               p = __tcp_alloc_md5sig_pool(sk);
                spin_lock_bh(&tcp_md5sig_pool_lock);
                if (!p) {
                        tcp_md5sig_users--;
@@ -2728,48 +2972,63 @@ retry:
        }
        return pool;
 }
-
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
-struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+
+/**
+ *     tcp_get_md5sig_pool - get md5sig_pool for this user
+ *
+ *     We use percpu structure, so if we succeed, we exit with preemption
+ *     and BH disabled, to make sure another thread or softirq handling
+ *     wont try to get same context.
+ */
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool **p;
-       spin_lock_bh(&tcp_md5sig_pool_lock);
+       struct tcp_md5sig_pool __percpu *p;
+
+       local_bh_disable();
+
+       spin_lock(&tcp_md5sig_pool_lock);
        p = tcp_md5sig_pool;
        if (p)
                tcp_md5sig_users++;
-       spin_unlock_bh(&tcp_md5sig_pool_lock);
-       return (p ? *per_cpu_ptr(p, cpu) : NULL);
-}
+       spin_unlock(&tcp_md5sig_pool_lock);
 
-EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+       if (p)
+               return this_cpu_ptr(p);
+
+       local_bh_enable();
+       return NULL;
+}
+EXPORT_SYMBOL(tcp_get_md5sig_pool);
 
-void __tcp_put_md5sig_pool(void)
+void tcp_put_md5sig_pool(void)
 {
+       local_bh_enable();
        tcp_free_md5sig_pool();
 }
-
-EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+EXPORT_SYMBOL(tcp_put_md5sig_pool);
 
 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
-                       struct tcphdr *th)
+                       const struct tcphdr *th)
 {
        struct scatterlist sg;
+       struct tcphdr hdr;
        int err;
 
-       __sum16 old_checksum = th->check;
-       th->check = 0;
+       /* We are not allowed to change tcphdr, make a local copy */
+       memcpy(&hdr, th, sizeof(hdr));
+       hdr.check = 0;
+
        /* options aren't included in the hash */
-       sg_init_one(&sg, th, sizeof(struct tcphdr));
-       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
-       th->check = old_checksum;
+       sg_init_one(&sg, &hdr, sizeof(hdr));
+       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
        return err;
 }
-
 EXPORT_SYMBOL(tcp_md5_hash_header);
 
 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
-                         struct sk_buff *skb, unsigned header_len)
+                         const struct sk_buff *skb, unsigned int header_len)
 {
        struct scatterlist sg;
        const struct tcphdr *tp = tcp_hdr(skb);
@@ -2778,6 +3037,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
        const unsigned head_data_len = skb_headlen(skb) > header_len ?
                                       skb_headlen(skb) - header_len : 0;
        const struct skb_shared_info *shi = skb_shinfo(skb);
+       struct sk_buff *frag_iter;
 
        sg_init_table(&sg, 1);
 
@@ -2787,28 +3047,160 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
        for (i = 0; i < shi->nr_frags; ++i) {
                const struct skb_frag_struct *f = &shi->frags[i];
-               sg_set_page(&sg, f->page, f->size, f->page_offset);
-               if (crypto_hash_update(desc, &sg, f->size))
+               struct page *page = skb_frag_page(f);
+               sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+               if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
                        return 1;
        }
 
+       skb_walk_frags(skb, frag_iter)
+               if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
+                       return 1;
+
        return 0;
 }
-
 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
 
-int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
 {
        struct scatterlist sg;
 
        sg_init_one(&sg, key->key, key->keylen);
        return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
 }
-
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
 #endif
 
+/**
+ * Each Responder maintains up to two secret values concurrently for
+ * efficient secret rollover.  Each secret value has 4 states:
+ *
+ * Generating.  (tcp_secret_generating != tcp_secret_primary)
+ *    Generates new Responder-Cookies, but not yet used for primary
+ *    verification.  This is a short-term state, typically lasting only
+ *    one round trip time (RTT).
+ *
+ * Primary.  (tcp_secret_generating == tcp_secret_primary)
+ *    Used both for generation and primary verification.
+ *
+ * Retiring.  (tcp_secret_retiring != tcp_secret_secondary)
+ *    Used for verification, until the first failure that can be
+ *    verified by the newer Generating secret.  At that time, this
+ *    cookie's state is changed to Secondary, and the Generating
+ *    cookie's state is changed to Primary.  This is a short-term state,
+ *    typically lasting only one round trip time (RTT).
+ *
+ * Secondary.  (tcp_secret_retiring == tcp_secret_secondary)
+ *    Used for secondary verification, after primary verification
+ *    failures.  This state lasts no more than twice the Maximum Segment
+ *    Lifetime (2MSL).  Then, the secret is discarded.
+ */
+struct tcp_cookie_secret {
+       /* The secret is divided into two parts.  The digest part is the
+        * equivalent of previously hashing a secret and saving the state,
+        * and serves as an initialization vector (IV).  The message part
+        * serves as the trailing secret.
+        */
+       u32                             secrets[COOKIE_WORKSPACE_WORDS];
+       unsigned long                   expires;
+};
+
+#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
+#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
+#define TCP_SECRET_LIFE (HZ * 600)
+
+static struct tcp_cookie_secret tcp_secret_one;
+static struct tcp_cookie_secret tcp_secret_two;
+
+/* Essentially a circular list, without dynamic allocation. */
+static struct tcp_cookie_secret *tcp_secret_generating;
+static struct tcp_cookie_secret *tcp_secret_primary;
+static struct tcp_cookie_secret *tcp_secret_retiring;
+static struct tcp_cookie_secret *tcp_secret_secondary;
+
+static DEFINE_SPINLOCK(tcp_secret_locker);
+
+/* Select a pseudo-random word in the cookie workspace.
+ */
+static inline u32 tcp_cookie_work(const u32 *ws, const int n)
+{
+       return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
+}
+
+/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
+ * Called in softirq context.
+ * Returns: 0 for success.
+ */
+int tcp_cookie_generator(u32 *bakery)
+{
+       unsigned long jiffy = jiffies;
+
+       if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
+               spin_lock_bh(&tcp_secret_locker);
+               if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
+                       /* refreshed by another */
+                       memcpy(bakery,
+                              &tcp_secret_generating->secrets[0],
+                              COOKIE_WORKSPACE_WORDS);
+               } else {
+                       /* still needs refreshing */
+                       get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
+
+                       /* The first time, paranoia assumes that the
+                        * randomization function isn't as strong.  But,
+                        * this secret initialization is delayed until
+                        * the last possible moment (packet arrival).
+                        * Although that time is observable, it is
+                        * unpredictably variable.  Mash in the most
+                        * volatile clock bits available, and expire the
+                        * secret extra quickly.
+                        */
+                       if (unlikely(tcp_secret_primary->expires ==
+                                    tcp_secret_secondary->expires)) {
+                               struct timespec tv;
+
+                               getnstimeofday(&tv);
+                               bakery[COOKIE_DIGEST_WORDS+0] ^=
+                                       (u32)tv.tv_nsec;
+
+                               tcp_secret_secondary->expires = jiffy
+                                       + TCP_SECRET_1MSL
+                                       + (0x0f & tcp_cookie_work(bakery, 0));
+                       } else {
+                               tcp_secret_secondary->expires = jiffy
+                                       + TCP_SECRET_LIFE
+                                       + (0xff & tcp_cookie_work(bakery, 1));
+                               tcp_secret_primary->expires = jiffy
+                                       + TCP_SECRET_2MSL
+                                       + (0x1f & tcp_cookie_work(bakery, 2));
+                       }
+                       memcpy(&tcp_secret_secondary->secrets[0],
+                              bakery, COOKIE_WORKSPACE_WORDS);
+
+                       rcu_assign_pointer(tcp_secret_generating,
+                                          tcp_secret_secondary);
+                       rcu_assign_pointer(tcp_secret_retiring,
+                                          tcp_secret_primary);
+                       /*
+                        * Neither call_rcu() nor synchronize_rcu() needed.
+                        * Retiring data is not freed.  It is replaced after
+                        * further (locked) pointer updates, and a quiet time
+                        * (minimum 1MSL, maximum LIFE - 2MSL).
+                        */
+               }
+               spin_unlock_bh(&tcp_secret_locker);
+       } else {
+               rcu_read_lock_bh();
+               memcpy(bakery,
+                      &rcu_dereference(tcp_secret_generating)->secrets[0],
+                      COOKIE_WORKSPACE_WORDS);
+               rcu_read_unlock_bh();
+       }
+       return 0;
+}
+EXPORT_SYMBOL(tcp_cookie_generator);
+
 void tcp_done(struct sock *sk)
 {
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
@@ -2838,11 +3230,22 @@ static int __init set_thash_entries(char *str)
 }
 __setup("thash_entries=", set_thash_entries);
 
+void tcp_init_mem(struct net *net)
+{
+       unsigned long limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
+       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+       net->ipv4.sysctl_tcp_mem[1] = limit;
+       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+}
+
 void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
-       unsigned long nr_pages, limit;
-       int order, i, max_share;
+       unsigned long limit;
+       int max_share, cnt;
+       unsigned int i;
+       unsigned long jiffy = jiffies;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
 
@@ -2884,43 +3287,23 @@ void __init tcp_init(void)
                                        &tcp_hashinfo.bhash_size,
                                        NULL,
                                        64 * 1024);
-       tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
+       tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
        for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
                spin_lock_init(&tcp_hashinfo.bhash[i].lock);
                INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
        }
 
-       /* Try to be a bit smarter and adjust defaults depending
-        * on available memory.
-        */
-       for (order = 0; ((1 << order) << PAGE_SHIFT) <
-                       (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
-                       order++)
-               ;
-       if (order >= 4) {
-               tcp_death_row.sysctl_max_tw_buckets = 180000;
-               sysctl_tcp_max_orphans = 4096 << (order - 4);
-               sysctl_max_syn_backlog = 1024;
-       } else if (order < 3) {
-               tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
-               sysctl_tcp_max_orphans >>= (3 - order);
-               sysctl_max_syn_backlog = 128;
-       }
 
-       /* Set the pressure threshold to be a fraction of global memory that
-        * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
-        * memory, with a floor of 128 pages.
-        */
-       nr_pages = totalram_pages - totalhigh_pages;
-       limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
-       limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
-       limit = max(limit, 128UL);
-       sysctl_tcp_mem[0] = limit / 4 * 3;
-       sysctl_tcp_mem[1] = limit;
-       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
+       cnt = tcp_hashinfo.ehash_mask + 1;
 
+       tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+       sysctl_tcp_max_orphans = cnt / 2;
+       sysctl_max_syn_backlog = max(128, cnt / 256);
+
+       tcp_init_mem(&init_net);
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
-       limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+       limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
+       limit = max(limit, 128UL);
        max_share = min(4UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
@@ -2931,22 +3314,17 @@ void __init tcp_init(void)
        sysctl_tcp_rmem[1] = 87380;
        sysctl_tcp_rmem[2] = max(87380, max_share);
 
-       printk(KERN_INFO "TCP: Hash tables configured "
-              "(established %u bind %u)\n",
-              tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
+       pr_info("Hash tables configured (established %u bind %u)\n",
+               tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
 
        tcp_register_congestion_control(&tcp_reno);
-}
 
-EXPORT_SYMBOL(tcp_close);
-EXPORT_SYMBOL(tcp_disconnect);
-EXPORT_SYMBOL(tcp_getsockopt);
-EXPORT_SYMBOL(tcp_ioctl);
-EXPORT_SYMBOL(tcp_poll);
-EXPORT_SYMBOL(tcp_read_sock);
-EXPORT_SYMBOL(tcp_recvmsg);
-EXPORT_SYMBOL(tcp_sendmsg);
-EXPORT_SYMBOL(tcp_splice_read);
-EXPORT_SYMBOL(tcp_sendpage);
-EXPORT_SYMBOL(tcp_setsockopt);
-EXPORT_SYMBOL(tcp_shutdown);
+       memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
+       memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
+       tcp_secret_one.expires = jiffy; /* past due */
+       tcp_secret_two.expires = jiffy; /* past due */
+       tcp_secret_generating = &tcp_secret_one;
+       tcp_secret_primary = &tcp_secret_one;
+       tcp_secret_retiring = &tcp_secret_two;
+       tcp_secret_secondary = &tcp_secret_two;
+}