unsigned char tw_rcv_wscale;
__u16 tw_sport;
/* Socket demultiplex comparisons on incoming packets. */
- /* these five are in inet_opt */
+ /* these five are in inet_sock */
__u32 tw_daddr
__attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
__u32 tw_rcv_saddr;
extern atomic_t tcp_orphan_count;
extern int tcp_tw_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo);
-extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
TCP_ACK_PUSHED= 4
};
-static inline void tcp_schedule_ack(struct tcp_opt *tp)
+static inline void tcp_schedule_ack(struct tcp_sock *tp)
{
tp->ack.pending |= TCP_ACK_SCHED;
}
-static inline int tcp_ack_scheduled(struct tcp_opt *tp)
+static inline int tcp_ack_scheduled(struct tcp_sock *tp)
{
return tp->ack.pending&TCP_ACK_SCHED;
}
-static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
+static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp)
{
if (tp->ack.quick && --tp->ack.quick == 0) {
/* Leaving quickack mode we deflate ATO. */
}
}
-extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
+extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
-static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+static __inline__ void tcp_delack_init(struct tcp_sock *tp)
{
memset(&tp->ack, 0, sizeof(tp->ack));
}
-static inline void tcp_clear_options(struct tcp_opt *tp)
+static inline void tcp_clear_options(struct tcp_sock *tp)
{
tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
}
struct sk_buff *skb);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_opt *tp);
+extern void tcp_clear_retrans(struct tcp_sock *tp);
extern void tcp_update_metrics(struct sock *sk);
extern void tcp_close(struct sock *sk,
extern int tcp_listen_start(struct sock *sk);
extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_opt *tp,
+ struct tcp_sock *tp,
int estab);
/*
static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
switch (what) {
case TCP_TIME_RETRANS:
*/
static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (when > TCP_RTO_MAX) {
#ifdef TCP_DEBUG
static inline void tcp_initialize_rcv_mss(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min(tp->advmss, tp->mss_cache_std);
hint = min(hint, tp->rcv_wnd/2);
tp->ack.rcv_mss = hint;
}
-static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
}
-static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
{
__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
}
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
+static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
{
if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
tp->rcv_wnd &&
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
*/
-static __inline__ u32 tcp_receive_window(const struct tcp_opt *tp)
+static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
{
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
}
static inline void tcp_packets_out_inc(struct sock *sk,
- struct tcp_opt *tp,
+ struct tcp_sock *tp,
const struct sk_buff *skb)
{
int orig = tcp_get_pcount(&tp->packets_out);
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
}
-static inline void tcp_packets_out_dec(struct tcp_opt *tp,
+static inline void tcp_packets_out_dec(struct tcp_sock *tp,
const struct sk_buff *skb)
{
tcp_dec_pcount(&tp->packets_out, skb);
* "Packets left network, but not honestly ACKed yet" PLUS
* "Packets fast retransmitted"
*/
-static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_opt *tp)
+static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
return (tcp_get_pcount(&tp->packets_out) -
tcp_get_pcount(&tp->left_out) +
* behave like Reno until low_window is reached,
* then increase congestion window slowly
*/
-static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
+static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
{
if (tcp_is_bic(tp)) {
if (sysctl_tcp_bic_fast_convergence &&
/* Stop taking Vegas samples for now. */
#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
-static inline void tcp_vegas_enable(struct tcp_opt *tp)
+static inline void tcp_vegas_enable(struct tcp_sock *tp)
{
/* There are several situations when we must "re-start" Vegas:
*
/* Should we be taking Vegas samples right now? */
#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
-extern void tcp_ca_init(struct tcp_opt *tp);
+extern void tcp_ca_init(struct tcp_sock *tp);
-static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
+static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
{
if (tcp_is_vegas(tp)) {
if (ca_state == TCP_CA_Open)
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
*/
-static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
+static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
{
if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
return tp->snd_ssthresh;
(tp->snd_cwnd >> 2)));
}
-static inline void tcp_sync_left_out(struct tcp_opt *tp)
+static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
if (tp->sack_ok &&
(tcp_get_pcount(&tp->sacked_out) >=
/* Congestion window validation. (RFC2861) */
-static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
+static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
{
__u32 packets_out = tcp_get_pcount(&tp->packets_out);
}
/* Set slow start threshould and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct tcp_opt *tp)
+static inline void __tcp_enter_cwr(struct tcp_sock *tp)
{
tp->undo_marker = 0;
tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
TCP_ECN_queue_cwr(tp);
}
-static inline void tcp_enter_cwr(struct tcp_opt *tp)
+static inline void tcp_enter_cwr(struct tcp_sock *tp)
{
tp->prior_ssthresh = 0;
if (tp->ca_state < TCP_CA_CWR) {
}
}
-extern __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst);
+extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto".
*/
-static __inline__ __u32 tcp_max_burst(const struct tcp_opt *tp)
+static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
{
return 3;
}
-static __inline__ int tcp_minshall_check(const struct tcp_opt *tp)
+static __inline__ int tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml,tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
-static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss,
+static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
const struct sk_buff *skb)
{
if (skb->len < mss)
*/
static __inline__ int
-tcp_nagle_check(const struct tcp_opt *tp, const struct sk_buff *skb,
+tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned mss_now, int nonagle)
{
return (skb->len < mss_now &&
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
-static __inline__ int tcp_snd_test(const struct tcp_opt *tp,
+static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
struct sk_buff *skb,
unsigned cur_mss, int nonagle)
{
!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
}
-static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
+static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
{
if (!tcp_get_pcount(&tp->packets_out) && !tp->pending)
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
* The socket must be locked by the caller.
*/
static __inline__ void __tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp,
+ struct tcp_sock *tp,
unsigned cur_mss,
int nonagle)
{
}
static __inline__ void tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp)
+ struct tcp_sock *tp)
{
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
}
-static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
+static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
{
struct sk_buff *skb = sk->sk_send_head;
tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
}
-static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
/* Prequeue for VJ style copy to user, combined with checksumming. */
-static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
+static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
{
tp->ucopy.task = NULL;
tp->ucopy.len = 0;
*/
static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (!sysctl_tcp_low_latency && tp->ucopy.task) {
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tcp_destroy_sock(sk);
}
-static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
+static __inline__ void tcp_sack_reset(struct tcp_sock *tp)
{
tp->dsack = 0;
tp->eff_sacks = 0;
tp->num_sacks = 0;
}
-static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
+static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
{
if (tp->tstamp_ok) {
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
struct sock *child)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
req->sk = child;
sk_acceptq_added(sk);
return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
}
-static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
+static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
struct open_request **prev)
{
write_lock(&tp->syn_wait_lock);
}
static __inline__ void tcp_openreq_init(struct open_request *req,
- struct tcp_opt *tp,
+ struct tcp_sock *tp,
struct sk_buff *skb)
{
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
wake_up(&tcp_lhash_wait);
}
-static inline int keepalive_intvl_when(const struct tcp_opt *tp)
+static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
}
-static inline int keepalive_time_when(const struct tcp_opt *tp)
+static inline int keepalive_time_when(const struct tcp_sock *tp)
{
return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
}
-static inline int tcp_fin_time(const struct tcp_opt *tp)
+static inline int tcp_fin_time(const struct tcp_sock *tp)
{
int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
return fin_timeout;
}
-static inline int tcp_paws_check(const struct tcp_opt *tp, int rst)
+static inline int tcp_paws_check(const struct tcp_sock *tp, int rst)
{
if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
return 0;
static inline int tcp_use_frto(const struct sock *sk)
{
- const struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* F-RTO must be activated in sysctl and there must be some
* unsent new data, and the advertised window should allow
#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
-static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
+static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
{
if (tcp_is_westwood(tp))
tp->westwood.rtt = rtt_seq;
}
-void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
-void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
-
-static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (tcp_is_westwood(tcp_sk(sk)))
- __tcp_westwood_fast_bw(sk, skb);
-}
-
-static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (tcp_is_westwood(tcp_sk(sk)))
- __tcp_westwood_slow_bw(sk, skb);
-}
-
-static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
+static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
{
return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
(__u32) (tp->mss_cache_std),
2U);
}
-static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
+static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
{
return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
}
-static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
+static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
{
__u32 ssthresh = 0;
return (ssthresh != 0);
}
-static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
+static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
{
__u32 cwnd = 0;