- Update to 2.6.25-rc3.
[linux-flexiantxendom0-3.2.10.git] / net / ipv4 / ipvs / ip_vs_lblc.c
1 /*
2  * IPVS:        Locality-Based Least-Connection scheduling module
3  *
4  * Version:     $Id: ip_vs_lblc.c,v 1.10 2002/09/15 08:14:08 wensong Exp $
5  *
6  * Authors:     Wensong Zhang <wensong@gnuchina.org>
7  *
8  *              This program is free software; you can redistribute it and/or
9  *              modify it under the terms of the GNU General Public License
10  *              as published by the Free Software Foundation; either version
11  *              2 of the License, or (at your option) any later version.
12  *
13  * Changes:
14  *     Martin Hamilton         :    fixed the terrible locking bugs
15  *                                   *lock(tbl->lock) ==> *lock(&tbl->lock)
16  *     Wensong Zhang           :    fixed the uninitilized tbl->lock bug
17  *     Wensong Zhang           :    added doing full expiration check to
18  *                                   collect stale entries of 24+ hours when
19  *                                   no partial expire check in a half hour
20  *     Julian Anastasov        :    replaced del_timer call with del_timer_sync
21  *                                   to avoid the possible race between timer
22  *                                   handler and del_timer thread in SMP
23  *
24  */
25
26 /*
27  * The lblc algorithm is as follows (pseudo code):
28  *
29  *       if cachenode[dest_ip] is null then
30  *               n, cachenode[dest_ip] <- {weighted least-conn node};
31  *       else
32  *               n <- cachenode[dest_ip];
33  *               if (n is dead) OR
34  *                  (n.conns>n.weight AND
35  *                   there is a node m with m.conns<m.weight/2) then
36  *                 n, cachenode[dest_ip] <- {weighted least-conn node};
37  *
38  *       return n;
39  *
40  * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
41  * me to write this module.
42  */
43
44 #include <linux/ip.h>
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/skbuff.h>
48 #include <linux/jiffies.h>
49
50 /* for sysctl */
51 #include <linux/fs.h>
52 #include <linux/sysctl.h>
53
54 #include <net/ip_vs.h>
55
56
57 /*
58  *    It is for garbage collection of stale IPVS lblc entries,
59  *    when the table is full.
60  */
61 #define CHECK_EXPIRE_INTERVAL   (60*HZ)
62 #define ENTRY_TIMEOUT           (6*60*HZ)
63
64 /*
65  *    It is for full expiration check.
66  *    When there is no partial expiration check (garbage collection)
67  *    in a half hour, do a full expiration check to collect stale
68  *    entries that haven't been touched for a day.
69  */
70 #define COUNT_FOR_FULL_EXPIRATION   30
71 static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
72
73
74 /*
75  *     for IPVS lblc entry hash table
76  */
77 #ifndef CONFIG_IP_VS_LBLC_TAB_BITS
78 #define CONFIG_IP_VS_LBLC_TAB_BITS      10
79 #endif
80 #define IP_VS_LBLC_TAB_BITS     CONFIG_IP_VS_LBLC_TAB_BITS
81 #define IP_VS_LBLC_TAB_SIZE     (1 << IP_VS_LBLC_TAB_BITS)
82 #define IP_VS_LBLC_TAB_MASK     (IP_VS_LBLC_TAB_SIZE - 1)
83
84
85 /*
86  *      IPVS lblc entry represents an association between destination
87  *      IP address and its destination server
88  */
89 struct ip_vs_lblc_entry {
90         struct list_head        list;
91         __be32                  addr;           /* destination IP address */
92         struct ip_vs_dest       *dest;          /* real server (cache) */
93         unsigned long           lastuse;        /* last used time */
94 };
95
96
97 /*
98  *      IPVS lblc hash table
99  */
100 struct ip_vs_lblc_table {
101         rwlock_t                lock;           /* lock for this table */
102         struct list_head        bucket[IP_VS_LBLC_TAB_SIZE];  /* hash bucket */
103         atomic_t                entries;        /* number of entries */
104         int                     max_size;       /* maximum size of entries */
105         struct timer_list       periodic_timer; /* collect stale entries */
106         int                     rover;          /* rover for expire check */
107         int                     counter;        /* counter for no expire */
108 };
109
110
111 /*
112  *      IPVS LBLC sysctl table
113  */
114
115 static ctl_table vs_vars_table[] = {
116         {
117                 .procname       = "lblc_expiration",
118                 .data           = &sysctl_ip_vs_lblc_expiration,
119                 .maxlen         = sizeof(int),
120                 .mode           = 0644,
121                 .proc_handler   = &proc_dointvec_jiffies,
122         },
123         { .ctl_name = 0 }
124 };
125
126 static struct ctl_table_header * sysctl_header;
127
128 /*
129  *      new/free a ip_vs_lblc_entry, which is a mapping of a destionation
130  *      IP address to a server.
131  */
132 static inline struct ip_vs_lblc_entry *
133 ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
134 {
135         struct ip_vs_lblc_entry *en;
136
137         en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
138         if (en == NULL) {
139                 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
140                 return NULL;
141         }
142
143         INIT_LIST_HEAD(&en->list);
144         en->addr = daddr;
145
146         atomic_inc(&dest->refcnt);
147         en->dest = dest;
148
149         return en;
150 }
151
152
153 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
154 {
155         list_del(&en->list);
156         /*
157          * We don't kfree dest because it is refered either by its service
158          * or the trash dest list.
159          */
160         atomic_dec(&en->dest->refcnt);
161         kfree(en);
162 }
163
164
165 /*
166  *      Returns hash value for IPVS LBLC entry
167  */
168 static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
169 {
170         return (ntohl(addr)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
171 }
172
173
174 /*
175  *      Hash an entry in the ip_vs_lblc_table.
176  *      returns bool success.
177  */
178 static int
179 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
180 {
181         unsigned hash;
182
183         if (!list_empty(&en->list)) {
184                 IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
185                           "called from %p\n", __builtin_return_address(0));
186                 return 0;
187         }
188
189         /*
190          *      Hash by destination IP address
191          */
192         hash = ip_vs_lblc_hashkey(en->addr);
193
194         write_lock(&tbl->lock);
195         list_add(&en->list, &tbl->bucket[hash]);
196         atomic_inc(&tbl->entries);
197         write_unlock(&tbl->lock);
198
199         return 1;
200 }
201
202
203 /*
204  *  Get ip_vs_lblc_entry associated with supplied parameters.
205  */
206 static inline struct ip_vs_lblc_entry *
207 ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
208 {
209         unsigned hash;
210         struct ip_vs_lblc_entry *en;
211
212         hash = ip_vs_lblc_hashkey(addr);
213
214         read_lock(&tbl->lock);
215
216         list_for_each_entry(en, &tbl->bucket[hash], list) {
217                 if (en->addr == addr) {
218                         /* HIT */
219                         read_unlock(&tbl->lock);
220                         return en;
221                 }
222         }
223
224         read_unlock(&tbl->lock);
225
226         return NULL;
227 }
228
229
230 /*
231  *      Flush all the entries of the specified table.
232  */
233 static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
234 {
235         int i;
236         struct ip_vs_lblc_entry *en, *nxt;
237
238         for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
239                 write_lock(&tbl->lock);
240                 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
241                         ip_vs_lblc_free(en);
242                         atomic_dec(&tbl->entries);
243                 }
244                 write_unlock(&tbl->lock);
245         }
246 }
247
248
249 static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
250 {
251         unsigned long now = jiffies;
252         int i, j;
253         struct ip_vs_lblc_entry *en, *nxt;
254
255         for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
256                 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
257
258                 write_lock(&tbl->lock);
259                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
260                         if (time_before(now,
261                                         en->lastuse + sysctl_ip_vs_lblc_expiration))
262                                 continue;
263
264                         ip_vs_lblc_free(en);
265                         atomic_dec(&tbl->entries);
266                 }
267                 write_unlock(&tbl->lock);
268         }
269         tbl->rover = j;
270 }
271
272
273 /*
274  *      Periodical timer handler for IPVS lblc table
275  *      It is used to collect stale entries when the number of entries
276  *      exceeds the maximum size of the table.
277  *
278  *      Fixme: we probably need more complicated algorithm to collect
279  *             entries that have not been used for a long time even
280  *             if the number of entries doesn't exceed the maximum size
281  *             of the table.
282  *      The full expiration check is for this purpose now.
283  */
284 static void ip_vs_lblc_check_expire(unsigned long data)
285 {
286         struct ip_vs_lblc_table *tbl;
287         unsigned long now = jiffies;
288         int goal;
289         int i, j;
290         struct ip_vs_lblc_entry *en, *nxt;
291
292         tbl = (struct ip_vs_lblc_table *)data;
293
294         if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
295                 /* do full expiration check */
296                 ip_vs_lblc_full_check(tbl);
297                 tbl->counter = 1;
298                 goto out;
299         }
300
301         if (atomic_read(&tbl->entries) <= tbl->max_size) {
302                 tbl->counter++;
303                 goto out;
304         }
305
306         goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
307         if (goal > tbl->max_size/2)
308                 goal = tbl->max_size/2;
309
310         for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
311                 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
312
313                 write_lock(&tbl->lock);
314                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
315                         if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
316                                 continue;
317
318                         ip_vs_lblc_free(en);
319                         atomic_dec(&tbl->entries);
320                         goal--;
321                 }
322                 write_unlock(&tbl->lock);
323                 if (goal <= 0)
324                         break;
325         }
326         tbl->rover = j;
327
328   out:
329         mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
330 }
331
332
333 static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
334 {
335         int i;
336         struct ip_vs_lblc_table *tbl;
337
338         /*
339          *    Allocate the ip_vs_lblc_table for this service
340          */
341         tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC);
342         if (tbl == NULL) {
343                 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
344                 return -ENOMEM;
345         }
346         svc->sched_data = tbl;
347         IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
348                   "current service\n",
349                   sizeof(struct ip_vs_lblc_table));
350
351         /*
352          *    Initialize the hash buckets
353          */
354         for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
355                 INIT_LIST_HEAD(&tbl->bucket[i]);
356         }
357         rwlock_init(&tbl->lock);
358         tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
359         tbl->rover = 0;
360         tbl->counter = 1;
361
362         /*
363          *    Hook periodic timer for garbage collection
364          */
365         setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
366                         (unsigned long)tbl);
367         tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
368         add_timer(&tbl->periodic_timer);
369
370         return 0;
371 }
372
373
374 static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
375 {
376         struct ip_vs_lblc_table *tbl = svc->sched_data;
377
378         /* remove periodic timer */
379         del_timer_sync(&tbl->periodic_timer);
380
381         /* got to clean up table entries here */
382         ip_vs_lblc_flush(tbl);
383
384         /* release the table itself */
385         kfree(svc->sched_data);
386         IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
387                   sizeof(struct ip_vs_lblc_table));
388
389         return 0;
390 }
391
392
393 static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
394 {
395         return 0;
396 }
397
398
399 static inline struct ip_vs_dest *
400 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
401 {
402         struct ip_vs_dest *dest, *least;
403         int loh, doh;
404
405         /*
406          * We think the overhead of processing active connections is fifty
407          * times higher than that of inactive connections in average. (This
408          * fifty times might not be accurate, we will change it later.) We
409          * use the following formula to estimate the overhead:
410          *                dest->activeconns*50 + dest->inactconns
411          * and the load:
412          *                (dest overhead) / dest->weight
413          *
414          * Remember -- no floats in kernel mode!!!
415          * The comparison of h1*w2 > h2*w1 is equivalent to that of
416          *                h1/w1 > h2/w2
417          * if every weight is larger than zero.
418          *
419          * The server with weight=0 is quiesced and will not receive any
420          * new connection.
421          */
422         list_for_each_entry(dest, &svc->destinations, n_list) {
423                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
424                         continue;
425                 if (atomic_read(&dest->weight) > 0) {
426                         least = dest;
427                         loh = atomic_read(&least->activeconns) * 50
428                                 + atomic_read(&least->inactconns);
429                         goto nextstage;
430                 }
431         }
432         return NULL;
433
434         /*
435          *    Find the destination with the least load.
436          */
437   nextstage:
438         list_for_each_entry_continue(dest, &svc->destinations, n_list) {
439                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
440                         continue;
441
442                 doh = atomic_read(&dest->activeconns) * 50
443                         + atomic_read(&dest->inactconns);
444                 if (loh * atomic_read(&dest->weight) >
445                     doh * atomic_read(&least->weight)) {
446                         least = dest;
447                         loh = doh;
448                 }
449         }
450
451         IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
452                   "activeconns %d refcnt %d weight %d overhead %d\n",
453                   NIPQUAD(least->addr), ntohs(least->port),
454                   atomic_read(&least->activeconns),
455                   atomic_read(&least->refcnt),
456                   atomic_read(&least->weight), loh);
457
458         return least;
459 }
460
461
462 /*
463  *   If this destination server is overloaded and there is a less loaded
464  *   server, then return true.
465  */
466 static inline int
467 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
468 {
469         if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
470                 struct ip_vs_dest *d;
471
472                 list_for_each_entry(d, &svc->destinations, n_list) {
473                         if (atomic_read(&d->activeconns)*2
474                             < atomic_read(&d->weight)) {
475                                 return 1;
476                         }
477                 }
478         }
479         return 0;
480 }
481
482
483 /*
484  *    Locality-Based (weighted) Least-Connection scheduling
485  */
486 static struct ip_vs_dest *
487 ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
488 {
489         struct ip_vs_dest *dest;
490         struct ip_vs_lblc_table *tbl;
491         struct ip_vs_lblc_entry *en;
492         struct iphdr *iph = ip_hdr(skb);
493
494         IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
495
496         tbl = (struct ip_vs_lblc_table *)svc->sched_data;
497         en = ip_vs_lblc_get(tbl, iph->daddr);
498         if (en == NULL) {
499                 dest = __ip_vs_wlc_schedule(svc, iph);
500                 if (dest == NULL) {
501                         IP_VS_DBG(1, "no destination available\n");
502                         return NULL;
503                 }
504                 en = ip_vs_lblc_new(iph->daddr, dest);
505                 if (en == NULL) {
506                         return NULL;
507                 }
508                 ip_vs_lblc_hash(tbl, en);
509         } else {
510                 dest = en->dest;
511                 if (!(dest->flags & IP_VS_DEST_F_AVAILABLE)
512                     || atomic_read(&dest->weight) <= 0
513                     || is_overloaded(dest, svc)) {
514                         dest = __ip_vs_wlc_schedule(svc, iph);
515                         if (dest == NULL) {
516                                 IP_VS_DBG(1, "no destination available\n");
517                                 return NULL;
518                         }
519                         atomic_dec(&en->dest->refcnt);
520                         atomic_inc(&dest->refcnt);
521                         en->dest = dest;
522                 }
523         }
524         en->lastuse = jiffies;
525
526         IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
527                   "--> server %u.%u.%u.%u:%d\n",
528                   NIPQUAD(en->addr),
529                   NIPQUAD(dest->addr),
530                   ntohs(dest->port));
531
532         return dest;
533 }
534
535
536 /*
537  *      IPVS LBLC Scheduler structure
538  */
539 static struct ip_vs_scheduler ip_vs_lblc_scheduler =
540 {
541         .name =                 "lblc",
542         .refcnt =               ATOMIC_INIT(0),
543         .module =               THIS_MODULE,
544         .init_service =         ip_vs_lblc_init_svc,
545         .done_service =         ip_vs_lblc_done_svc,
546         .update_service =       ip_vs_lblc_update_svc,
547         .schedule =             ip_vs_lblc_schedule,
548 };
549
550
551 static int __init ip_vs_lblc_init(void)
552 {
553         int ret;
554
555         INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
556         sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
557         ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
558         if (ret)
559                 unregister_sysctl_table(sysctl_header);
560         return ret;
561 }
562
563
564 static void __exit ip_vs_lblc_cleanup(void)
565 {
566         unregister_sysctl_table(sysctl_header);
567         unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
568 }
569
570
571 module_init(ip_vs_lblc_init);
572 module_exit(ip_vs_lblc_cleanup);
573 MODULE_LICENSE("GPL");