X-Git-Url: http://git.alex.org.uk diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 3c513ce..824e5f1 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -3,8 +3,6 @@ * * This source is covered by the GNU GPL, the same as all kernel sources. * - * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ - * * Authors: Andrey V. Savochkin */ @@ -14,13 +12,14 @@ #include #include #include -#include #include #include #include #include #include +#include #include +#include /* * Theory of operations. @@ -53,58 +52,73 @@ * lookups performed with disabled BHs. * * Serialisation issues. - * 1. Nodes may appear in the tree only with the pool write lock held. - * 2. Nodes may disappear from the tree only with the pool write lock held + * 1. Nodes may appear in the tree only with the pool lock held. + * 2. Nodes may disappear from the tree only with the pool lock held * AND reference count being 0. * 3. Nodes appears and disappears from unused node list only under * "inet_peer_unused_lock". * 4. Global variable peer_total is modified under the pool lock. * 5. struct inet_peer fields modification: * avl_left, avl_right, avl_parent, avl_height: pool lock - * unused_next, unused_prevp: unused node list lock + * unused: unused node list lock * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * dtime: unused node list lock - * v4daddr: unchangeable - * ip_id_count: idlock + * daddr: unchangeable + * ip_id_count: atomic value (no lock needed) */ -/* Exported for inet_getid inline function. */ -DEFINE_SPINLOCK(inet_peer_idlock); - -static kmem_cache_t *peer_cachep; +static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height -static struct inet_peer peer_fake_node = { - .avl_left = &peer_fake_node, - .avl_right = &peer_fake_node, + +#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) +#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) +static const struct inet_peer peer_fake_node = { + .avl_left = peer_avl_empty_rcu, + .avl_right = peer_avl_empty_rcu, .avl_height = 0 }; -#define peer_avl_empty (&peer_fake_node) -static struct inet_peer *peer_root = peer_avl_empty; -static DEFINE_RWLOCK(peer_pool_lock); + +struct inet_peer_base { + struct inet_peer __rcu *root; + spinlock_t lock; + int total; +}; + +static struct inet_peer_base v4_peers = { + .root = peer_avl_empty_rcu, + .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock), + .total = 0, +}; + +static struct inet_peer_base v6_peers = { + .root = peer_avl_empty_rcu, + .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock), + .total = 0, +}; + #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ -static volatile int peer_total; /* Exported for sysctl_net_ipv4. */ -int inet_peer_threshold = 65536 + 128; /* start to throw entries more +int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more * aggressively at this stage */ -int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ -int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ - -static struct inet_peer *inet_peer_unused_head; -/* Exported for inet_putpeer inline function. */ -struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; -DEFINE_SPINLOCK(inet_peer_unused_lock); -#define PEER_MAX_CLEANUP_WORK 30 +int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ +int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ +int inet_peer_gc_mintime __read_mostly = 10 * HZ; +int inet_peer_gc_maxtime __read_mostly = 120 * HZ; + +static struct { + struct list_head list; + spinlock_t lock; +} unused_peers = { + .list = LIST_HEAD_INIT(unused_peers.list), + .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), +}; static void peer_check_expire(unsigned long dummy); -static struct timer_list peer_periodic_timer = - TIMER_INITIALIZER(peer_check_expire, 0, 0); +static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); -/* Exported for sysctl_net_ipv4. */ -int inet_peer_gc_mintime = 10 * HZ, - inet_peer_gc_maxtime = 120 * HZ; /* Called from ip_output.c:ip_init */ void __init inet_initpeers(void) @@ -126,11 +140,8 @@ void __init inet_initpeers(void) peer_cachep = kmem_cache_create("inet_peer_cache", sizeof(struct inet_peer), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - - if (!peer_cachep) - panic("cannot create inet_peer_cache"); + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, + NULL); /* All the timers, started at system startup tend to synchronize. Perturb it a bit. @@ -144,123 +155,194 @@ void __init inet_initpeers(void) /* Called with or without local BH being disabled. */ static void unlink_from_unused(struct inet_peer *p) { - spin_lock_bh(&inet_peer_unused_lock); - if (p->unused_prevp != NULL) { - /* On unused list. */ - *p->unused_prevp = p->unused_next; - if (p->unused_next != NULL) - p->unused_next->unused_prevp = p->unused_prevp; - else - inet_peer_unused_tailp = p->unused_prevp; - p->unused_prevp = NULL; /* mark it as removed */ + if (!list_empty(&p->unused)) { + spin_lock_bh(&unused_peers.lock); + list_del_init(&p->unused); + spin_unlock_bh(&unused_peers.lock); } - spin_unlock_bh(&inet_peer_unused_lock); } -/* Called with local BH disabled and the pool lock held. */ -#define lookup(daddr) \ +static int addr_compare(const struct inetpeer_addr *a, + const struct inetpeer_addr *b) +{ + int i, n = (a->family == AF_INET ? 1 : 4); + + for (i = 0; i < n; i++) { + if (a->a6[i] == b->a6[i]) + continue; + if (a->a6[i] < b->a6[i]) + return -1; + return 1; + } + + return 0; +} + +/* + * Called with local BH disabled and the pool lock held. + */ +#define lookup(_daddr, _stack, _base) \ ({ \ - struct inet_peer *u, **v; \ - stackptr = stack; \ - *stackptr++ = &peer_root; \ - for (u = peer_root; u != peer_avl_empty; ) { \ - if (daddr == u->v4daddr) \ + struct inet_peer *u; \ + struct inet_peer __rcu **v; \ + \ + stackptr = _stack; \ + *stackptr++ = &_base->root; \ + for (u = rcu_dereference_protected(_base->root, \ + lockdep_is_held(&_base->lock)); \ + u != peer_avl_empty; ) { \ + int cmp = addr_compare(_daddr, &u->daddr); \ + if (cmp == 0) \ break; \ - if (daddr < u->v4daddr) \ + if (cmp == -1) \ v = &u->avl_left; \ else \ v = &u->avl_right; \ *stackptr++ = v; \ - u = *v; \ + u = rcu_dereference_protected(*v, \ + lockdep_is_held(&_base->lock)); \ } \ u; \ }) -/* Called with local BH disabled and the pool write lock held. */ -#define lookup_rightempty(start) \ +/* + * Called with rcu_read_lock_bh() + * Because we hold no lock against a writer, its quite possible we fall + * in an endless loop. + * But every pointer we follow is guaranteed to be valid thanks to RCU. + * We exit from this function if number of links exceeds PEER_MAXDEPTH + */ +static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, + struct inet_peer_base *base) +{ + struct inet_peer *u = rcu_dereference_bh(base->root); + int count = 0; + + while (u != peer_avl_empty) { + int cmp = addr_compare(daddr, &u->daddr); + if (cmp == 0) { + /* Before taking a reference, check if this entry was + * deleted, unlink_from_pool() sets refcnt=-1 to make + * distinction between an unused entry (refcnt=0) and + * a freed one. + */ + if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) + u = NULL; + return u; + } + if (cmp == -1) + u = rcu_dereference_bh(u->avl_left); + else + u = rcu_dereference_bh(u->avl_right); + if (unlikely(++count == PEER_MAXDEPTH)) + break; + } + return NULL; +} + +/* Called with local BH disabled and the pool lock held. */ +#define lookup_rightempty(start, base) \ ({ \ - struct inet_peer *u, **v; \ + struct inet_peer *u; \ + struct inet_peer __rcu **v; \ *stackptr++ = &start->avl_left; \ v = &start->avl_left; \ - for (u = *v; u->avl_right != peer_avl_empty; ) { \ + for (u = rcu_dereference_protected(*v, \ + lockdep_is_held(&base->lock)); \ + u->avl_right != peer_avl_empty_rcu; ) { \ v = &u->avl_right; \ *stackptr++ = v; \ - u = *v; \ + u = rcu_dereference_protected(*v, \ + lockdep_is_held(&base->lock)); \ } \ u; \ }) -/* Called with local BH disabled and the pool write lock held. +/* Called with local BH disabled and the pool lock held. * Variable names are the proof of operation correctness. - * Look into mm/map_avl.c for more detail description of the ideas. */ -static void peer_avl_rebalance(struct inet_peer **stack[], - struct inet_peer ***stackend) + * Look into mm/map_avl.c for more detail description of the ideas. + */ +static void peer_avl_rebalance(struct inet_peer __rcu **stack[], + struct inet_peer __rcu ***stackend, + struct inet_peer_base *base) { - struct inet_peer **nodep, *node, *l, *r; + struct inet_peer __rcu **nodep; + struct inet_peer *node, *l, *r; int lh, rh; while (stackend > stack) { nodep = *--stackend; - node = *nodep; - l = node->avl_left; - r = node->avl_right; + node = rcu_dereference_protected(*nodep, + lockdep_is_held(&base->lock)); + l = rcu_dereference_protected(node->avl_left, + lockdep_is_held(&base->lock)); + r = rcu_dereference_protected(node->avl_right, + lockdep_is_held(&base->lock)); lh = node_height(l); rh = node_height(r); if (lh > rh + 1) { /* l: RH+2 */ struct inet_peer *ll, *lr, *lrl, *lrr; int lrh; - ll = l->avl_left; - lr = l->avl_right; + ll = rcu_dereference_protected(l->avl_left, + lockdep_is_held(&base->lock)); + lr = rcu_dereference_protected(l->avl_right, + lockdep_is_held(&base->lock)); lrh = node_height(lr); if (lrh <= node_height(ll)) { /* ll: RH+1 */ - node->avl_left = lr; /* lr: RH or RH+1 */ - node->avl_right = r; /* r: RH */ + RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ + RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ node->avl_height = lrh + 1; /* RH+1 or RH+2 */ - l->avl_left = ll; /* ll: RH+1 */ - l->avl_right = node; /* node: RH+1 or RH+2 */ + RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ + RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ l->avl_height = node->avl_height + 1; - *nodep = l; + RCU_INIT_POINTER(*nodep, l); } else { /* ll: RH, lr: RH+1 */ - lrl = lr->avl_left; /* lrl: RH or RH-1 */ - lrr = lr->avl_right; /* lrr: RH or RH-1 */ - node->avl_left = lrr; /* lrr: RH or RH-1 */ - node->avl_right = r; /* r: RH */ + lrl = rcu_dereference_protected(lr->avl_left, + lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */ + lrr = rcu_dereference_protected(lr->avl_right, + lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */ + RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ + RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ node->avl_height = rh + 1; /* node: RH+1 */ - l->avl_left = ll; /* ll: RH */ - l->avl_right = lrl; /* lrl: RH or RH-1 */ + RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ + RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ l->avl_height = rh + 1; /* l: RH+1 */ - lr->avl_left = l; /* l: RH+1 */ - lr->avl_right = node; /* node: RH+1 */ + RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ + RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ lr->avl_height = rh + 2; - *nodep = lr; + RCU_INIT_POINTER(*nodep, lr); } } else if (rh > lh + 1) { /* r: LH+2 */ struct inet_peer *rr, *rl, *rlr, *rll; int rlh; - rr = r->avl_right; - rl = r->avl_left; + rr = rcu_dereference_protected(r->avl_right, + lockdep_is_held(&base->lock)); + rl = rcu_dereference_protected(r->avl_left, + lockdep_is_held(&base->lock)); rlh = node_height(rl); if (rlh <= node_height(rr)) { /* rr: LH+1 */ - node->avl_right = rl; /* rl: LH or LH+1 */ - node->avl_left = l; /* l: LH */ + RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ + RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ node->avl_height = rlh + 1; /* LH+1 or LH+2 */ - r->avl_right = rr; /* rr: LH+1 */ - r->avl_left = node; /* node: LH+1 or LH+2 */ + RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ + RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ r->avl_height = node->avl_height + 1; - *nodep = r; + RCU_INIT_POINTER(*nodep, r); } else { /* rr: RH, rl: RH+1 */ - rlr = rl->avl_right; /* rlr: LH or LH-1 */ - rll = rl->avl_left; /* rll: LH or LH-1 */ - node->avl_right = rll; /* rll: LH or LH-1 */ - node->avl_left = l; /* l: LH */ + rlr = rcu_dereference_protected(rl->avl_right, + lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */ + rll = rcu_dereference_protected(rl->avl_left, + lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */ + RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ + RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ node->avl_height = lh + 1; /* node: LH+1 */ - r->avl_right = rr; /* rr: LH */ - r->avl_left = rlr; /* rlr: LH or LH-1 */ + RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ + RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ r->avl_height = lh + 1; /* r: LH+1 */ - rl->avl_right = r; /* r: LH+1 */ - rl->avl_left = node; /* node: LH+1 */ + RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ + RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ rl->avl_height = lh + 2; - *nodep = rl; + RCU_INIT_POINTER(*nodep, rl); } } else { node->avl_height = (lh > rh ? lh : rh) + 1; @@ -268,99 +350,117 @@ static void peer_avl_rebalance(struct inet_peer **stack[], } } -/* Called with local BH disabled and the pool write lock held. */ -#define link_to_pool(n) \ +/* Called with local BH disabled and the pool lock held. */ +#define link_to_pool(n, base) \ do { \ n->avl_height = 1; \ - n->avl_left = peer_avl_empty; \ - n->avl_right = peer_avl_empty; \ - **--stackptr = n; \ - peer_avl_rebalance(stack, stackptr); \ -} while(0) + n->avl_left = peer_avl_empty_rcu; \ + n->avl_right = peer_avl_empty_rcu; \ + /* lockless readers can catch us now */ \ + rcu_assign_pointer(**--stackptr, n); \ + peer_avl_rebalance(stack, stackptr, base); \ +} while (0) + +static void inetpeer_free_rcu(struct rcu_head *head) +{ + kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); +} /* May be called with local BH enabled. */ -static void unlink_from_pool(struct inet_peer *p) +static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { int do_free; do_free = 0; - write_lock_bh(&peer_pool_lock); + spin_lock_bh(&base->lock); /* Check the reference counter. It was artificially incremented by 1 - * in cleanup() function to prevent sudden disappearing. If the - * reference count is still 1 then the node is referenced only as `p' - * here and from the pool. So under the exclusive pool lock it's safe - * to remove the node and free it later. */ - if (atomic_read(&p->refcnt) == 1) { - struct inet_peer **stack[PEER_MAXDEPTH]; - struct inet_peer ***stackptr, ***delp; - if (lookup(p->v4daddr) != p) + * in cleanup() function to prevent sudden disappearing. If we can + * atomically (because of lockless readers) take this last reference, + * it's safe to remove the node and free it later. + * We use refcnt=-1 to alert lockless readers this entry is deleted. + */ + if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { + struct inet_peer __rcu ***stackptr, ***delp; + if (lookup(&p->daddr, stack, base) != p) BUG(); delp = stackptr - 1; /* *delp[0] == p */ - if (p->avl_left == peer_avl_empty) { + if (p->avl_left == peer_avl_empty_rcu) { *delp[0] = p->avl_right; --stackptr; } else { /* look for a node to insert instead of p */ struct inet_peer *t; - t = lookup_rightempty(p); - if (*stackptr[-1] != t) - BUG(); + t = lookup_rightempty(p, base); + BUG_ON(rcu_dereference_protected(*stackptr[-1], + lockdep_is_held(&base->lock)) != t); **--stackptr = t->avl_left; - /* t is removed, t->v4daddr > x->v4daddr for any + /* t is removed, t->daddr > x->daddr for any * x in p->avl_left subtree. * Put t in the old place of p. */ - *delp[0] = t; + RCU_INIT_POINTER(*delp[0], t); t->avl_left = p->avl_left; t->avl_right = p->avl_right; t->avl_height = p->avl_height; - if (delp[1] != &p->avl_left) - BUG(); + BUG_ON(delp[1] != &p->avl_left); delp[1] = &t->avl_left; /* was &p->avl_left */ } - peer_avl_rebalance(stack, stackptr); - peer_total--; + peer_avl_rebalance(stack, stackptr, base); + base->total--; do_free = 1; } - write_unlock_bh(&peer_pool_lock); + spin_unlock_bh(&base->lock); if (do_free) - kmem_cache_free(peer_cachep, p); + call_rcu_bh(&p->rcu, inetpeer_free_rcu); else /* The node is used again. Decrease the reference counter * back. The loop "cleanup -> unlink_from_unused * -> unlink_from_pool -> putpeer -> link_to_unused * -> cleanup (for the same node)" * doesn't really exist because the entry will have a - * recent deletion time and will not be cleaned again soon. */ + * recent deletion time and will not be cleaned again soon. + */ inet_putpeer(p); } +static struct inet_peer_base *family_to_base(int family) +{ + return (family == AF_INET ? &v4_peers : &v6_peers); +} + +static struct inet_peer_base *peer_to_base(struct inet_peer *p) +{ + return family_to_base(p->daddr.family); +} + /* May be called with local BH enabled. */ -static int cleanup_once(unsigned long ttl) +static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { - struct inet_peer *p; + struct inet_peer *p = NULL; /* Remove the first entry from the list of unused nodes. */ - spin_lock_bh(&inet_peer_unused_lock); - p = inet_peer_unused_head; - if (p != NULL) { - if (time_after(p->dtime + ttl, jiffies)) { + spin_lock_bh(&unused_peers.lock); + if (!list_empty(&unused_peers.list)) { + __u32 delta; + + p = list_first_entry(&unused_peers.list, struct inet_peer, unused); + delta = (__u32)jiffies - p->dtime; + + if (delta < ttl) { /* Do not prune fresh entries. */ - spin_unlock_bh(&inet_peer_unused_lock); + spin_unlock_bh(&unused_peers.lock); return -1; } - inet_peer_unused_head = p->unused_next; - if (p->unused_next != NULL) - p->unused_next->unused_prevp = p->unused_prevp; - else - inet_peer_unused_tailp = p->unused_prevp; - p->unused_prevp = NULL; /* mark as not on the list */ + + list_del_init(&p->unused); + /* Grab an extra reference to prevent node disappearing * before unlink_from_pool() call. */ atomic_inc(&p->refcnt); } - spin_unlock_bh(&inet_peer_unused_lock); + spin_unlock_bh(&unused_peers.lock); if (p == NULL) /* It means that the total number of USED entries has @@ -368,94 +468,116 @@ static int cleanup_once(unsigned long ttl) * happen because of entry limits in route cache. */ return -1; - unlink_from_pool(p); + unlink_from_pool(p, peer_to_base(p), stack); return 0; } /* Called with or without local BH being disabled. */ -struct inet_peer *inet_getpeer(__u32 daddr, int create) +struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) { - struct inet_peer *p, *n; - struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; + struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; + struct inet_peer_base *base = family_to_base(daddr->family); + struct inet_peer *p; - /* Look up for the address quickly. */ - read_lock_bh(&peer_pool_lock); - p = lookup(daddr); - if (p != peer_avl_empty) - atomic_inc(&p->refcnt); - read_unlock_bh(&peer_pool_lock); + /* Look up for the address quickly, lockless. + * Because of a concurrent writer, we might not find an existing entry. + */ + rcu_read_lock_bh(); + p = lookup_rcu_bh(daddr, base); + rcu_read_unlock_bh(); + + if (p) { + /* The existing node has been found. + * Remove the entry from unused list if it was there. + */ + unlink_from_unused(p); + return p; + } + /* retry an exact lookup, taking the lock before. + * At least, nodes should be hot in our cache. + */ + spin_lock_bh(&base->lock); + p = lookup(daddr, stack, base); if (p != peer_avl_empty) { - /* The existing node has been found. */ + atomic_inc(&p->refcnt); + spin_unlock_bh(&base->lock); /* Remove the entry from unused list if it was there. */ unlink_from_unused(p); return p; } + p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; + if (p) { + p->daddr = *daddr; + atomic_set(&p->refcnt, 1); + atomic_set(&p->rid, 0); + atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4)); + p->tcp_ts_stamp = 0; + INIT_LIST_HEAD(&p->unused); + + + /* Link the node. */ + link_to_pool(p, base); + base->total++; + } + spin_unlock_bh(&base->lock); - if (!create) - return NULL; - - /* Allocate the space outside the locked region. */ - n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); - if (n == NULL) - return NULL; - n->v4daddr = daddr; - atomic_set(&n->refcnt, 1); - n->ip_id_count = secure_ip_id(daddr); - n->tcp_ts_stamp = 0; - - write_lock_bh(&peer_pool_lock); - /* Check if an entry has suddenly appeared. */ - p = lookup(daddr); - if (p != peer_avl_empty) - goto out_free; - - /* Link the node. */ - link_to_pool(n); - n->unused_prevp = NULL; /* not on the list */ - peer_total++; - write_unlock_bh(&peer_pool_lock); - - if (peer_total >= inet_peer_threshold) + if (base->total >= inet_peer_threshold) /* Remove one less-recently-used entry. */ - cleanup_once(0); - - return n; - -out_free: - /* The appropriate node is already in the pool. */ - atomic_inc(&p->refcnt); - write_unlock_bh(&peer_pool_lock); - /* Remove the entry from unused list if it was there. */ - unlink_from_unused(p); - /* Free preallocated the preallocated node. */ - kmem_cache_free(peer_cachep, n); + cleanup_once(0, stack); + return p; } +static int compute_total(void) +{ + return v4_peers.total + v6_peers.total; +} +EXPORT_SYMBOL_GPL(inet_getpeer); + /* Called with local BH disabled. */ static void peer_check_expire(unsigned long dummy) { - int i; - int ttl; + unsigned long now = jiffies; + int ttl, total; + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; - if (peer_total >= inet_peer_threshold) + total = compute_total(); + if (total >= inet_peer_threshold) ttl = inet_peer_minttl; else ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * - peer_total / inet_peer_threshold * HZ; - for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); + total / inet_peer_threshold * HZ; + while (!cleanup_once(ttl, stack)) { + if (jiffies != now) + break; + } /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime * interval depending on the total number of entries (more entries, * less interval). */ - if (peer_total >= inet_peer_threshold) + total = compute_total(); + if (total >= inet_peer_threshold) peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; else peer_periodic_timer.expires = jiffies + inet_peer_gc_maxtime - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * - peer_total / inet_peer_threshold * HZ; + total / inet_peer_threshold * HZ; add_timer(&peer_periodic_timer); } + +void inet_putpeer(struct inet_peer *p) +{ + local_bh_disable(); + + if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { + list_add_tail(&p->unused, &unused_peers.list); + p->dtime = (__u32)jiffies; + spin_unlock(&unused_peers.lock); + } + + local_bh_enable(); +} +EXPORT_SYMBOL_GPL(inet_putpeer);