block cfq: don't use atomic_t for cfq_queue
[linux-flexiantxendom0-natty.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "cfq.h"
18
19 /*
20  * tunables
21  */
22 /* max queue in one round of service */
23 static const int cfq_quantum = 8;
24 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25 /* maximum backwards seek, in KiB */
26 static const int cfq_back_max = 16 * 1024;
27 /* penalty of a backwards seek */
28 static const int cfq_back_penalty = 2;
29 static const int cfq_slice_sync = HZ / 10;
30 static int cfq_slice_async = HZ / 25;
31 static const int cfq_slice_async_rq = 2;
32 static int cfq_slice_idle = HZ / 125;
33 static int cfq_group_idle = HZ / 125;
34 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35 static const int cfq_hist_divisor = 4;
36
37 /*
38  * offset from end of service tree
39  */
40 #define CFQ_IDLE_DELAY          (HZ / 5)
41
42 /*
43  * below this threshold, we consider thinktime immediate
44  */
45 #define CFQ_MIN_TT              (2)
46
47 #define CFQ_SLICE_SCALE         (5)
48 #define CFQ_HW_QUEUE_MIN        (5)
49 #define CFQ_SERVICE_SHIFT       12
50
51 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
52 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
53 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
54 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
55
56 #define RQ_CIC(rq)              \
57         ((struct cfq_io_context *) (rq)->elevator_private)
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private3)
60
61 static struct kmem_cache *cfq_pool;
62 static struct kmem_cache *cfq_ioc_pool;
63
64 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65 static struct completion *ioc_gone;
66 static DEFINE_SPINLOCK(ioc_gone_lock);
67
68 static DEFINE_SPINLOCK(cic_index_lock);
69 static DEFINE_IDA(cic_index_ida);
70
71 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
72 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75 #define sample_valid(samples)   ((samples) > 80)
76 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
77
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85         struct rb_root rb;
86         struct rb_node *left;
87         unsigned count;
88         unsigned total_weight;
89         u64 min_vdisktime;
90 };
91 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
92                         .count = 0, .min_vdisktime = 0, }
93
94 /*
95  * Per process-grouping structure
96  */
97 struct cfq_queue {
98         /* reference count */
99         int ref;
100         /* various state flags, see below */
101         unsigned int flags;
102         /* parent cfq_data */
103         struct cfq_data *cfqd;
104         /* service_tree member */
105         struct rb_node rb_node;
106         /* service_tree key */
107         unsigned long rb_key;
108         /* prio tree member */
109         struct rb_node p_node;
110         /* prio tree root we belong to, if any */
111         struct rb_root *p_root;
112         /* sorted list of pending requests */
113         struct rb_root sort_list;
114         /* if fifo isn't expired, next request to serve */
115         struct request *next_rq;
116         /* requests queued in sort_list */
117         int queued[2];
118         /* currently allocated requests */
119         int allocated[2];
120         /* fifo list of requests in sort_list */
121         struct list_head fifo;
122
123         /* time when queue got scheduled in to dispatch first request. */
124         unsigned long dispatch_start;
125         unsigned int allocated_slice;
126         unsigned int slice_dispatch;
127         /* time when first request from queue completed and slice started. */
128         unsigned long slice_start;
129         unsigned long slice_end;
130         long slice_resid;
131
132         /* pending metadata requests */
133         int meta_pending;
134         /* number of requests that are on the dispatch list or inside driver */
135         int dispatched;
136
137         /* io prio of this group */
138         unsigned short ioprio, org_ioprio;
139         unsigned short ioprio_class, org_ioprio_class;
140
141         pid_t pid;
142
143         u32 seek_history;
144         sector_t last_request_pos;
145
146         struct cfq_rb_root *service_tree;
147         struct cfq_queue *new_cfqq;
148         struct cfq_group *cfqg;
149         struct cfq_group *orig_cfqg;
150         /* Number of sectors dispatched from queue in single dispatch round */
151         unsigned long nr_sectors;
152 };
153
154 /*
155  * First index in the service_trees.
156  * IDLE is handled separately, so it has negative index
157  */
158 enum wl_prio_t {
159         BE_WORKLOAD = 0,
160         RT_WORKLOAD = 1,
161         IDLE_WORKLOAD = 2,
162         CFQ_PRIO_NR,
163 };
164
165 /*
166  * Second index in the service_trees.
167  */
168 enum wl_type_t {
169         ASYNC_WORKLOAD = 0,
170         SYNC_NOIDLE_WORKLOAD = 1,
171         SYNC_WORKLOAD = 2
172 };
173
174 /* This is per cgroup per device grouping structure */
175 struct cfq_group {
176         /* group service_tree member */
177         struct rb_node rb_node;
178
179         /* group service_tree key */
180         u64 vdisktime;
181         unsigned int weight;
182
183         /* number of cfqq currently on this group */
184         int nr_cfqq;
185
186         /*
187          * Per group busy queus average. Useful for workload slice calc. We
188          * create the array for each prio class but at run time it is used
189          * only for RT and BE class and slot for IDLE class remains unused.
190          * This is primarily done to avoid confusion and a gcc warning.
191          */
192         unsigned int busy_queues_avg[CFQ_PRIO_NR];
193         /*
194          * rr lists of queues with requests. We maintain service trees for
195          * RT and BE classes. These trees are subdivided in subclasses
196          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
197          * class there is no subclassification and all the cfq queues go on
198          * a single tree service_tree_idle.
199          * Counts are embedded in the cfq_rb_root
200          */
201         struct cfq_rb_root service_trees[2][3];
202         struct cfq_rb_root service_tree_idle;
203
204         unsigned long saved_workload_slice;
205         enum wl_type_t saved_workload;
206         enum wl_prio_t saved_serving_prio;
207         struct blkio_group blkg;
208 #ifdef CONFIG_CFQ_GROUP_IOSCHED
209         struct hlist_node cfqd_node;
210         atomic_t ref;
211 #endif
212         /* number of requests that are on the dispatch list or inside driver */
213         int dispatched;
214 };
215
216 /*
217  * Per block device queue structure
218  */
219 struct cfq_data {
220         struct request_queue *queue;
221         /* Root service tree for cfq_groups */
222         struct cfq_rb_root grp_service_tree;
223         struct cfq_group root_group;
224
225         /*
226          * The priority currently being served
227          */
228         enum wl_prio_t serving_prio;
229         enum wl_type_t serving_type;
230         unsigned long workload_expires;
231         struct cfq_group *serving_group;
232
233         /*
234          * Each priority tree is sorted by next_request position.  These
235          * trees are used when determining if two or more queues are
236          * interleaving requests (see cfq_close_cooperator).
237          */
238         struct rb_root prio_trees[CFQ_PRIO_LISTS];
239
240         unsigned int busy_queues;
241
242         int rq_in_driver;
243         int rq_in_flight[2];
244
245         /*
246          * queue-depth detection
247          */
248         int rq_queued;
249         int hw_tag;
250         /*
251          * hw_tag can be
252          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
253          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
254          *  0 => no NCQ
255          */
256         int hw_tag_est_depth;
257         unsigned int hw_tag_samples;
258
259         /*
260          * idle window management
261          */
262         struct timer_list idle_slice_timer;
263         struct work_struct unplug_work;
264
265         struct cfq_queue *active_queue;
266         struct cfq_io_context *active_cic;
267
268         /*
269          * async queue for each priority case
270          */
271         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
272         struct cfq_queue *async_idle_cfqq;
273
274         sector_t last_position;
275
276         /*
277          * tunables, see top of file
278          */
279         unsigned int cfq_quantum;
280         unsigned int cfq_fifo_expire[2];
281         unsigned int cfq_back_penalty;
282         unsigned int cfq_back_max;
283         unsigned int cfq_slice[2];
284         unsigned int cfq_slice_async_rq;
285         unsigned int cfq_slice_idle;
286         unsigned int cfq_group_idle;
287         unsigned int cfq_latency;
288         unsigned int cfq_group_isolation;
289
290         unsigned int cic_index;
291         struct list_head cic_list;
292
293         /*
294          * Fallback dummy cfqq for extreme OOM conditions
295          */
296         struct cfq_queue oom_cfqq;
297
298         unsigned long last_delayed_sync;
299
300         /* List of cfq groups being managed on this device*/
301         struct hlist_head cfqg_list;
302         struct rcu_head rcu;
303 };
304
305 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
306
307 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
308                                             enum wl_prio_t prio,
309                                             enum wl_type_t type)
310 {
311         if (!cfqg)
312                 return NULL;
313
314         if (prio == IDLE_WORKLOAD)
315                 return &cfqg->service_tree_idle;
316
317         return &cfqg->service_trees[prio][type];
318 }
319
320 enum cfqq_state_flags {
321         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
322         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
323         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
324         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
325         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
326         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
327         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
328         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
329         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
330         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
331         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
332         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
333         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
334 };
335
336 #define CFQ_CFQQ_FNS(name)                                              \
337 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
338 {                                                                       \
339         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
340 }                                                                       \
341 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
342 {                                                                       \
343         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
344 }                                                                       \
345 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
346 {                                                                       \
347         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
348 }
349
350 CFQ_CFQQ_FNS(on_rr);
351 CFQ_CFQQ_FNS(wait_request);
352 CFQ_CFQQ_FNS(must_dispatch);
353 CFQ_CFQQ_FNS(must_alloc_slice);
354 CFQ_CFQQ_FNS(fifo_expire);
355 CFQ_CFQQ_FNS(idle_window);
356 CFQ_CFQQ_FNS(prio_changed);
357 CFQ_CFQQ_FNS(slice_new);
358 CFQ_CFQQ_FNS(sync);
359 CFQ_CFQQ_FNS(coop);
360 CFQ_CFQQ_FNS(split_coop);
361 CFQ_CFQQ_FNS(deep);
362 CFQ_CFQQ_FNS(wait_busy);
363 #undef CFQ_CFQQ_FNS
364
365 #ifdef CONFIG_CFQ_GROUP_IOSCHED
366 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
367         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
368                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
369                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
370
371 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
372         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
373                                 blkg_path(&(cfqg)->blkg), ##args);      \
374
375 #else
376 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
377         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
378 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
379 #endif
380 #define cfq_log(cfqd, fmt, args...)     \
381         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
382
383 /* Traverses through cfq group service trees */
384 #define for_each_cfqg_st(cfqg, i, j, st) \
385         for (i = 0; i <= IDLE_WORKLOAD; i++) \
386                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
387                         : &cfqg->service_tree_idle; \
388                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
389                         (i == IDLE_WORKLOAD && j == 0); \
390                         j++, st = i < IDLE_WORKLOAD ? \
391                         &cfqg->service_trees[i][j]: NULL) \
392
393
394 static inline bool iops_mode(struct cfq_data *cfqd)
395 {
396         /*
397          * If we are not idling on queues and it is a NCQ drive, parallel
398          * execution of requests is on and measuring time is not possible
399          * in most of the cases until and unless we drive shallower queue
400          * depths and that becomes a performance bottleneck. In such cases
401          * switch to start providing fairness in terms of number of IOs.
402          */
403         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
404                 return true;
405         else
406                 return false;
407 }
408
409 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
410 {
411         if (cfq_class_idle(cfqq))
412                 return IDLE_WORKLOAD;
413         if (cfq_class_rt(cfqq))
414                 return RT_WORKLOAD;
415         return BE_WORKLOAD;
416 }
417
418
419 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
420 {
421         if (!cfq_cfqq_sync(cfqq))
422                 return ASYNC_WORKLOAD;
423         if (!cfq_cfqq_idle_window(cfqq))
424                 return SYNC_NOIDLE_WORKLOAD;
425         return SYNC_WORKLOAD;
426 }
427
428 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
429                                         struct cfq_data *cfqd,
430                                         struct cfq_group *cfqg)
431 {
432         if (wl == IDLE_WORKLOAD)
433                 return cfqg->service_tree_idle.count;
434
435         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
436                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
437                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
438 }
439
440 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
441                                         struct cfq_group *cfqg)
442 {
443         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
444                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
445 }
446
447 static void cfq_dispatch_insert(struct request_queue *, struct request *);
448 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
449                                        struct io_context *, gfp_t);
450 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
451                                                 struct io_context *);
452
453 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
454                                             bool is_sync)
455 {
456         return cic->cfqq[is_sync];
457 }
458
459 static inline void cic_set_cfqq(struct cfq_io_context *cic,
460                                 struct cfq_queue *cfqq, bool is_sync)
461 {
462         cic->cfqq[is_sync] = cfqq;
463 }
464
465 #define CIC_DEAD_KEY    1ul
466 #define CIC_DEAD_INDEX_SHIFT    1
467
468 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
469 {
470         return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
471 }
472
473 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
474 {
475         struct cfq_data *cfqd = cic->key;
476
477         if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
478                 return NULL;
479
480         return cfqd;
481 }
482
483 /*
484  * We regard a request as SYNC, if it's either a read or has the SYNC bit
485  * set (in which case it could also be direct WRITE).
486  */
487 static inline bool cfq_bio_sync(struct bio *bio)
488 {
489         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
490 }
491
492 /*
493  * scheduler run of queue, if there are requests pending and no one in the
494  * driver that will restart queueing
495  */
496 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
497 {
498         if (cfqd->busy_queues) {
499                 cfq_log(cfqd, "schedule dispatch");
500                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
501         }
502 }
503
504 static int cfq_queue_empty(struct request_queue *q)
505 {
506         struct cfq_data *cfqd = q->elevator->elevator_data;
507
508         return !cfqd->rq_queued;
509 }
510
511 /*
512  * Scale schedule slice based on io priority. Use the sync time slice only
513  * if a queue is marked sync and has sync io queued. A sync queue with async
514  * io only, should not get full sync slice length.
515  */
516 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
517                                  unsigned short prio)
518 {
519         const int base_slice = cfqd->cfq_slice[sync];
520
521         WARN_ON(prio >= IOPRIO_BE_NR);
522
523         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
524 }
525
526 static inline int
527 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
528 {
529         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
530 }
531
532 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
533 {
534         u64 d = delta << CFQ_SERVICE_SHIFT;
535
536         d = d * BLKIO_WEIGHT_DEFAULT;
537         do_div(d, cfqg->weight);
538         return d;
539 }
540
541 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
542 {
543         s64 delta = (s64)(vdisktime - min_vdisktime);
544         if (delta > 0)
545                 min_vdisktime = vdisktime;
546
547         return min_vdisktime;
548 }
549
550 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
551 {
552         s64 delta = (s64)(vdisktime - min_vdisktime);
553         if (delta < 0)
554                 min_vdisktime = vdisktime;
555
556         return min_vdisktime;
557 }
558
559 static void update_min_vdisktime(struct cfq_rb_root *st)
560 {
561         u64 vdisktime = st->min_vdisktime;
562         struct cfq_group *cfqg;
563
564         if (st->left) {
565                 cfqg = rb_entry_cfqg(st->left);
566                 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
567         }
568
569         st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
570 }
571
572 /*
573  * get averaged number of queues of RT/BE priority.
574  * average is updated, with a formula that gives more weight to higher numbers,
575  * to quickly follows sudden increases and decrease slowly
576  */
577
578 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
579                                         struct cfq_group *cfqg, bool rt)
580 {
581         unsigned min_q, max_q;
582         unsigned mult  = cfq_hist_divisor - 1;
583         unsigned round = cfq_hist_divisor / 2;
584         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
585
586         min_q = min(cfqg->busy_queues_avg[rt], busy);
587         max_q = max(cfqg->busy_queues_avg[rt], busy);
588         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
589                 cfq_hist_divisor;
590         return cfqg->busy_queues_avg[rt];
591 }
592
593 static inline unsigned
594 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
595 {
596         struct cfq_rb_root *st = &cfqd->grp_service_tree;
597
598         return cfq_target_latency * cfqg->weight / st->total_weight;
599 }
600
601 static inline void
602 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
603 {
604         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
605         if (cfqd->cfq_latency) {
606                 /*
607                  * interested queues (we consider only the ones with the same
608                  * priority class in the cfq group)
609                  */
610                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
611                                                 cfq_class_rt(cfqq));
612                 unsigned sync_slice = cfqd->cfq_slice[1];
613                 unsigned expect_latency = sync_slice * iq;
614                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
615
616                 if (expect_latency > group_slice) {
617                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
618                         /* scale low_slice according to IO priority
619                          * and sync vs async */
620                         unsigned low_slice =
621                                 min(slice, base_low_slice * slice / sync_slice);
622                         /* the adapted slice value is scaled to fit all iqs
623                          * into the target latency */
624                         slice = max(slice * group_slice / expect_latency,
625                                     low_slice);
626                 }
627         }
628         cfqq->slice_start = jiffies;
629         cfqq->slice_end = jiffies + slice;
630         cfqq->allocated_slice = slice;
631         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
632 }
633
634 /*
635  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
636  * isn't valid until the first request from the dispatch is activated
637  * and the slice time set.
638  */
639 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
640 {
641         if (cfq_cfqq_slice_new(cfqq))
642                 return false;
643         if (time_before(jiffies, cfqq->slice_end))
644                 return false;
645
646         return true;
647 }
648
649 /*
650  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
651  * We choose the request that is closest to the head right now. Distance
652  * behind the head is penalized and only allowed to a certain extent.
653  */
654 static struct request *
655 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
656 {
657         sector_t s1, s2, d1 = 0, d2 = 0;
658         unsigned long back_max;
659 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
660 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
661         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
662
663         if (rq1 == NULL || rq1 == rq2)
664                 return rq2;
665         if (rq2 == NULL)
666                 return rq1;
667
668         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
669                 return rq1;
670         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
671                 return rq2;
672         if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
673                 return rq1;
674         else if ((rq2->cmd_flags & REQ_META) &&
675                  !(rq1->cmd_flags & REQ_META))
676                 return rq2;
677
678         s1 = blk_rq_pos(rq1);
679         s2 = blk_rq_pos(rq2);
680
681         /*
682          * by definition, 1KiB is 2 sectors
683          */
684         back_max = cfqd->cfq_back_max * 2;
685
686         /*
687          * Strict one way elevator _except_ in the case where we allow
688          * short backward seeks which are biased as twice the cost of a
689          * similar forward seek.
690          */
691         if (s1 >= last)
692                 d1 = s1 - last;
693         else if (s1 + back_max >= last)
694                 d1 = (last - s1) * cfqd->cfq_back_penalty;
695         else
696                 wrap |= CFQ_RQ1_WRAP;
697
698         if (s2 >= last)
699                 d2 = s2 - last;
700         else if (s2 + back_max >= last)
701                 d2 = (last - s2) * cfqd->cfq_back_penalty;
702         else
703                 wrap |= CFQ_RQ2_WRAP;
704
705         /* Found required data */
706
707         /*
708          * By doing switch() on the bit mask "wrap" we avoid having to
709          * check two variables for all permutations: --> faster!
710          */
711         switch (wrap) {
712         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
713                 if (d1 < d2)
714                         return rq1;
715                 else if (d2 < d1)
716                         return rq2;
717                 else {
718                         if (s1 >= s2)
719                                 return rq1;
720                         else
721                                 return rq2;
722                 }
723
724         case CFQ_RQ2_WRAP:
725                 return rq1;
726         case CFQ_RQ1_WRAP:
727                 return rq2;
728         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
729         default:
730                 /*
731                  * Since both rqs are wrapped,
732                  * start with the one that's further behind head
733                  * (--> only *one* back seek required),
734                  * since back seek takes more time than forward.
735                  */
736                 if (s1 <= s2)
737                         return rq1;
738                 else
739                         return rq2;
740         }
741 }
742
743 /*
744  * The below is leftmost cache rbtree addon
745  */
746 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
747 {
748         /* Service tree is empty */
749         if (!root->count)
750                 return NULL;
751
752         if (!root->left)
753                 root->left = rb_first(&root->rb);
754
755         if (root->left)
756                 return rb_entry(root->left, struct cfq_queue, rb_node);
757
758         return NULL;
759 }
760
761 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
762 {
763         if (!root->left)
764                 root->left = rb_first(&root->rb);
765
766         if (root->left)
767                 return rb_entry_cfqg(root->left);
768
769         return NULL;
770 }
771
772 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
773 {
774         rb_erase(n, root);
775         RB_CLEAR_NODE(n);
776 }
777
778 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
779 {
780         if (root->left == n)
781                 root->left = NULL;
782         rb_erase_init(n, &root->rb);
783         --root->count;
784 }
785
786 /*
787  * would be nice to take fifo expire time into account as well
788  */
789 static struct request *
790 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
791                   struct request *last)
792 {
793         struct rb_node *rbnext = rb_next(&last->rb_node);
794         struct rb_node *rbprev = rb_prev(&last->rb_node);
795         struct request *next = NULL, *prev = NULL;
796
797         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
798
799         if (rbprev)
800                 prev = rb_entry_rq(rbprev);
801
802         if (rbnext)
803                 next = rb_entry_rq(rbnext);
804         else {
805                 rbnext = rb_first(&cfqq->sort_list);
806                 if (rbnext && rbnext != &last->rb_node)
807                         next = rb_entry_rq(rbnext);
808         }
809
810         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
811 }
812
813 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
814                                       struct cfq_queue *cfqq)
815 {
816         /*
817          * just an approximation, should be ok.
818          */
819         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
820                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
821 }
822
823 static inline s64
824 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
825 {
826         return cfqg->vdisktime - st->min_vdisktime;
827 }
828
829 static void
830 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
831 {
832         struct rb_node **node = &st->rb.rb_node;
833         struct rb_node *parent = NULL;
834         struct cfq_group *__cfqg;
835         s64 key = cfqg_key(st, cfqg);
836         int left = 1;
837
838         while (*node != NULL) {
839                 parent = *node;
840                 __cfqg = rb_entry_cfqg(parent);
841
842                 if (key < cfqg_key(st, __cfqg))
843                         node = &parent->rb_left;
844                 else {
845                         node = &parent->rb_right;
846                         left = 0;
847                 }
848         }
849
850         if (left)
851                 st->left = &cfqg->rb_node;
852
853         rb_link_node(&cfqg->rb_node, parent, node);
854         rb_insert_color(&cfqg->rb_node, &st->rb);
855 }
856
857 static void
858 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
859 {
860         struct cfq_rb_root *st = &cfqd->grp_service_tree;
861         struct cfq_group *__cfqg;
862         struct rb_node *n;
863
864         cfqg->nr_cfqq++;
865         if (!RB_EMPTY_NODE(&cfqg->rb_node))
866                 return;
867
868         /*
869          * Currently put the group at the end. Later implement something
870          * so that groups get lesser vtime based on their weights, so that
871          * if group does not loose all if it was not continously backlogged.
872          */
873         n = rb_last(&st->rb);
874         if (n) {
875                 __cfqg = rb_entry_cfqg(n);
876                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
877         } else
878                 cfqg->vdisktime = st->min_vdisktime;
879
880         __cfq_group_service_tree_add(st, cfqg);
881         st->total_weight += cfqg->weight;
882 }
883
884 static void
885 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
886 {
887         struct cfq_rb_root *st = &cfqd->grp_service_tree;
888
889         BUG_ON(cfqg->nr_cfqq < 1);
890         cfqg->nr_cfqq--;
891
892         /* If there are other cfq queues under this group, don't delete it */
893         if (cfqg->nr_cfqq)
894                 return;
895
896         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
897         st->total_weight -= cfqg->weight;
898         if (!RB_EMPTY_NODE(&cfqg->rb_node))
899                 cfq_rb_erase(&cfqg->rb_node, st);
900         cfqg->saved_workload_slice = 0;
901         cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
902 }
903
904 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
905 {
906         unsigned int slice_used;
907
908         /*
909          * Queue got expired before even a single request completed or
910          * got expired immediately after first request completion.
911          */
912         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
913                 /*
914                  * Also charge the seek time incurred to the group, otherwise
915                  * if there are mutiple queues in the group, each can dispatch
916                  * a single request on seeky media and cause lots of seek time
917                  * and group will never know it.
918                  */
919                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
920                                         1);
921         } else {
922                 slice_used = jiffies - cfqq->slice_start;
923                 if (slice_used > cfqq->allocated_slice)
924                         slice_used = cfqq->allocated_slice;
925         }
926
927         return slice_used;
928 }
929
930 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
931                                 struct cfq_queue *cfqq)
932 {
933         struct cfq_rb_root *st = &cfqd->grp_service_tree;
934         unsigned int used_sl, charge;
935         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
936                         - cfqg->service_tree_idle.count;
937
938         BUG_ON(nr_sync < 0);
939         used_sl = charge = cfq_cfqq_slice_usage(cfqq);
940
941         if (iops_mode(cfqd))
942                 charge = cfqq->slice_dispatch;
943         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
944                 charge = cfqq->allocated_slice;
945
946         /* Can't update vdisktime while group is on service tree */
947         cfq_rb_erase(&cfqg->rb_node, st);
948         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
949         __cfq_group_service_tree_add(st, cfqg);
950
951         /* This group is being expired. Save the context */
952         if (time_after(cfqd->workload_expires, jiffies)) {
953                 cfqg->saved_workload_slice = cfqd->workload_expires
954                                                 - jiffies;
955                 cfqg->saved_workload = cfqd->serving_type;
956                 cfqg->saved_serving_prio = cfqd->serving_prio;
957         } else
958                 cfqg->saved_workload_slice = 0;
959
960         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
961                                         st->min_vdisktime);
962         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
963                         " sect=%u", used_sl, cfqq->slice_dispatch, charge,
964                         iops_mode(cfqd), cfqq->nr_sectors);
965         cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
966         cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
967 }
968
969 #ifdef CONFIG_CFQ_GROUP_IOSCHED
970 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
971 {
972         if (blkg)
973                 return container_of(blkg, struct cfq_group, blkg);
974         return NULL;
975 }
976
977 void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
978                                         unsigned int weight)
979 {
980         cfqg_of_blkg(blkg)->weight = weight;
981 }
982
983 static struct cfq_group *
984 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
985 {
986         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
987         struct cfq_group *cfqg = NULL;
988         void *key = cfqd;
989         int i, j;
990         struct cfq_rb_root *st;
991         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
992         unsigned int major, minor;
993
994         cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
995         if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
996                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
997                 cfqg->blkg.dev = MKDEV(major, minor);
998                 goto done;
999         }
1000         if (cfqg || !create)
1001                 goto done;
1002
1003         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1004         if (!cfqg)
1005                 goto done;
1006
1007         for_each_cfqg_st(cfqg, i, j, st)
1008                 *st = CFQ_RB_ROOT;
1009         RB_CLEAR_NODE(&cfqg->rb_node);
1010
1011         /*
1012          * Take the initial reference that will be released on destroy
1013          * This can be thought of a joint reference by cgroup and
1014          * elevator which will be dropped by either elevator exit
1015          * or cgroup deletion path depending on who is exiting first.
1016          */
1017         atomic_set(&cfqg->ref, 1);
1018
1019         /*
1020          * Add group onto cgroup list. It might happen that bdi->dev is
1021          * not initiliazed yet. Initialize this new group without major
1022          * and minor info and this info will be filled in once a new thread
1023          * comes for IO. See code above.
1024          */
1025         if (bdi->dev) {
1026                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1027                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1028                                         MKDEV(major, minor));
1029         } else
1030                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1031                                         0);
1032
1033         cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1034
1035         /* Add group on cfqd list */
1036         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1037
1038 done:
1039         return cfqg;
1040 }
1041
1042 /*
1043  * Search for the cfq group current task belongs to. If create = 1, then also
1044  * create the cfq group if it does not exist. request_queue lock must be held.
1045  */
1046 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1047 {
1048         struct cgroup *cgroup;
1049         struct cfq_group *cfqg = NULL;
1050
1051         rcu_read_lock();
1052         cgroup = task_cgroup(current, blkio_subsys_id);
1053         cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1054         if (!cfqg && create)
1055                 cfqg = &cfqd->root_group;
1056         rcu_read_unlock();
1057         return cfqg;
1058 }
1059
1060 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1061 {
1062         atomic_inc(&cfqg->ref);
1063         return cfqg;
1064 }
1065
1066 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1067 {
1068         /* Currently, all async queues are mapped to root group */
1069         if (!cfq_cfqq_sync(cfqq))
1070                 cfqg = &cfqq->cfqd->root_group;
1071
1072         cfqq->cfqg = cfqg;
1073         /* cfqq reference on cfqg */
1074         atomic_inc(&cfqq->cfqg->ref);
1075 }
1076
1077 static void cfq_put_cfqg(struct cfq_group *cfqg)
1078 {
1079         struct cfq_rb_root *st;
1080         int i, j;
1081
1082         BUG_ON(atomic_read(&cfqg->ref) <= 0);
1083         if (!atomic_dec_and_test(&cfqg->ref))
1084                 return;
1085         for_each_cfqg_st(cfqg, i, j, st)
1086                 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1087         kfree(cfqg);
1088 }
1089
1090 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1091 {
1092         /* Something wrong if we are trying to remove same group twice */
1093         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1094
1095         hlist_del_init(&cfqg->cfqd_node);
1096
1097         /*
1098          * Put the reference taken at the time of creation so that when all
1099          * queues are gone, group can be destroyed.
1100          */
1101         cfq_put_cfqg(cfqg);
1102 }
1103
1104 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1105 {
1106         struct hlist_node *pos, *n;
1107         struct cfq_group *cfqg;
1108
1109         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1110                 /*
1111                  * If cgroup removal path got to blk_group first and removed
1112                  * it from cgroup list, then it will take care of destroying
1113                  * cfqg also.
1114                  */
1115                 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1116                         cfq_destroy_cfqg(cfqd, cfqg);
1117         }
1118 }
1119
1120 /*
1121  * Blk cgroup controller notification saying that blkio_group object is being
1122  * delinked as associated cgroup object is going away. That also means that
1123  * no new IO will come in this group. So get rid of this group as soon as
1124  * any pending IO in the group is finished.
1125  *
1126  * This function is called under rcu_read_lock(). key is the rcu protected
1127  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1128  * read lock.
1129  *
1130  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1131  * it should not be NULL as even if elevator was exiting, cgroup deltion
1132  * path got to it first.
1133  */
1134 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1135 {
1136         unsigned long  flags;
1137         struct cfq_data *cfqd = key;
1138
1139         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1140         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1141         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1142 }
1143
1144 #else /* GROUP_IOSCHED */
1145 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1146 {
1147         return &cfqd->root_group;
1148 }
1149
1150 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1151 {
1152         return cfqg;
1153 }
1154
1155 static inline void
1156 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1157         cfqq->cfqg = cfqg;
1158 }
1159
1160 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1161 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1162
1163 #endif /* GROUP_IOSCHED */
1164
1165 /*
1166  * The cfqd->service_trees holds all pending cfq_queue's that have
1167  * requests waiting to be processed. It is sorted in the order that
1168  * we will service the queues.
1169  */
1170 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1171                                  bool add_front)
1172 {
1173         struct rb_node **p, *parent;
1174         struct cfq_queue *__cfqq;
1175         unsigned long rb_key;
1176         struct cfq_rb_root *service_tree;
1177         int left;
1178         int new_cfqq = 1;
1179         int group_changed = 0;
1180
1181 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1182         if (!cfqd->cfq_group_isolation
1183             && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1184             && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1185                 /* Move this cfq to root group */
1186                 cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1187                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1188                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1189                 cfqq->orig_cfqg = cfqq->cfqg;
1190                 cfqq->cfqg = &cfqd->root_group;
1191                 atomic_inc(&cfqd->root_group.ref);
1192                 group_changed = 1;
1193         } else if (!cfqd->cfq_group_isolation
1194                    && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1195                 /* cfqq is sequential now needs to go to its original group */
1196                 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1197                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1198                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1199                 cfq_put_cfqg(cfqq->cfqg);
1200                 cfqq->cfqg = cfqq->orig_cfqg;
1201                 cfqq->orig_cfqg = NULL;
1202                 group_changed = 1;
1203                 cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1204         }
1205 #endif
1206
1207         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1208                                                 cfqq_type(cfqq));
1209         if (cfq_class_idle(cfqq)) {
1210                 rb_key = CFQ_IDLE_DELAY;
1211                 parent = rb_last(&service_tree->rb);
1212                 if (parent && parent != &cfqq->rb_node) {
1213                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1214                         rb_key += __cfqq->rb_key;
1215                 } else
1216                         rb_key += jiffies;
1217         } else if (!add_front) {
1218                 /*
1219                  * Get our rb key offset. Subtract any residual slice
1220                  * value carried from last service. A negative resid
1221                  * count indicates slice overrun, and this should position
1222                  * the next service time further away in the tree.
1223                  */
1224                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1225                 rb_key -= cfqq->slice_resid;
1226                 cfqq->slice_resid = 0;
1227         } else {
1228                 rb_key = -HZ;
1229                 __cfqq = cfq_rb_first(service_tree);
1230                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1231         }
1232
1233         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1234                 new_cfqq = 0;
1235                 /*
1236                  * same position, nothing more to do
1237                  */
1238                 if (rb_key == cfqq->rb_key &&
1239                     cfqq->service_tree == service_tree)
1240                         return;
1241
1242                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1243                 cfqq->service_tree = NULL;
1244         }
1245
1246         left = 1;
1247         parent = NULL;
1248         cfqq->service_tree = service_tree;
1249         p = &service_tree->rb.rb_node;
1250         while (*p) {
1251                 struct rb_node **n;
1252
1253                 parent = *p;
1254                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1255
1256                 /*
1257                  * sort by key, that represents service time.
1258                  */
1259                 if (time_before(rb_key, __cfqq->rb_key))
1260                         n = &(*p)->rb_left;
1261                 else {
1262                         n = &(*p)->rb_right;
1263                         left = 0;
1264                 }
1265
1266                 p = n;
1267         }
1268
1269         if (left)
1270                 service_tree->left = &cfqq->rb_node;
1271
1272         cfqq->rb_key = rb_key;
1273         rb_link_node(&cfqq->rb_node, parent, p);
1274         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1275         service_tree->count++;
1276         if ((add_front || !new_cfqq) && !group_changed)
1277                 return;
1278         cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1279 }
1280
1281 static struct cfq_queue *
1282 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1283                      sector_t sector, struct rb_node **ret_parent,
1284                      struct rb_node ***rb_link)
1285 {
1286         struct rb_node **p, *parent;
1287         struct cfq_queue *cfqq = NULL;
1288
1289         parent = NULL;
1290         p = &root->rb_node;
1291         while (*p) {
1292                 struct rb_node **n;
1293
1294                 parent = *p;
1295                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1296
1297                 /*
1298                  * Sort strictly based on sector.  Smallest to the left,
1299                  * largest to the right.
1300                  */
1301                 if (sector > blk_rq_pos(cfqq->next_rq))
1302                         n = &(*p)->rb_right;
1303                 else if (sector < blk_rq_pos(cfqq->next_rq))
1304                         n = &(*p)->rb_left;
1305                 else
1306                         break;
1307                 p = n;
1308                 cfqq = NULL;
1309         }
1310
1311         *ret_parent = parent;
1312         if (rb_link)
1313                 *rb_link = p;
1314         return cfqq;
1315 }
1316
1317 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1318 {
1319         struct rb_node **p, *parent;
1320         struct cfq_queue *__cfqq;
1321
1322         if (cfqq->p_root) {
1323                 rb_erase(&cfqq->p_node, cfqq->p_root);
1324                 cfqq->p_root = NULL;
1325         }
1326
1327         if (cfq_class_idle(cfqq))
1328                 return;
1329         if (!cfqq->next_rq)
1330                 return;
1331
1332         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1333         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1334                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1335         if (!__cfqq) {
1336                 rb_link_node(&cfqq->p_node, parent, p);
1337                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1338         } else
1339                 cfqq->p_root = NULL;
1340 }
1341
1342 /*
1343  * Update cfqq's position in the service tree.
1344  */
1345 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1346 {
1347         /*
1348          * Resorting requires the cfqq to be on the RR list already.
1349          */
1350         if (cfq_cfqq_on_rr(cfqq)) {
1351                 cfq_service_tree_add(cfqd, cfqq, 0);
1352                 cfq_prio_tree_add(cfqd, cfqq);
1353         }
1354 }
1355
1356 /*
1357  * add to busy list of queues for service, trying to be fair in ordering
1358  * the pending list according to last request service
1359  */
1360 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1361 {
1362         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1363         BUG_ON(cfq_cfqq_on_rr(cfqq));
1364         cfq_mark_cfqq_on_rr(cfqq);
1365         cfqd->busy_queues++;
1366
1367         cfq_resort_rr_list(cfqd, cfqq);
1368 }
1369
1370 /*
1371  * Called when the cfqq no longer has requests pending, remove it from
1372  * the service tree.
1373  */
1374 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1375 {
1376         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1377         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1378         cfq_clear_cfqq_on_rr(cfqq);
1379
1380         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1381                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1382                 cfqq->service_tree = NULL;
1383         }
1384         if (cfqq->p_root) {
1385                 rb_erase(&cfqq->p_node, cfqq->p_root);
1386                 cfqq->p_root = NULL;
1387         }
1388
1389         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1390         BUG_ON(!cfqd->busy_queues);
1391         cfqd->busy_queues--;
1392 }
1393
1394 /*
1395  * rb tree support functions
1396  */
1397 static void cfq_del_rq_rb(struct request *rq)
1398 {
1399         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1400         const int sync = rq_is_sync(rq);
1401
1402         BUG_ON(!cfqq->queued[sync]);
1403         cfqq->queued[sync]--;
1404
1405         elv_rb_del(&cfqq->sort_list, rq);
1406
1407         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1408                 /*
1409                  * Queue will be deleted from service tree when we actually
1410                  * expire it later. Right now just remove it from prio tree
1411                  * as it is empty.
1412                  */
1413                 if (cfqq->p_root) {
1414                         rb_erase(&cfqq->p_node, cfqq->p_root);
1415                         cfqq->p_root = NULL;
1416                 }
1417         }
1418 }
1419
1420 static void cfq_add_rq_rb(struct request *rq)
1421 {
1422         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1423         struct cfq_data *cfqd = cfqq->cfqd;
1424         struct request *__alias, *prev;
1425
1426         cfqq->queued[rq_is_sync(rq)]++;
1427
1428         /*
1429          * looks a little odd, but the first insert might return an alias.
1430          * if that happens, put the alias on the dispatch list
1431          */
1432         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1433                 cfq_dispatch_insert(cfqd->queue, __alias);
1434
1435         if (!cfq_cfqq_on_rr(cfqq))
1436                 cfq_add_cfqq_rr(cfqd, cfqq);
1437
1438         /*
1439          * check if this request is a better next-serve candidate
1440          */
1441         prev = cfqq->next_rq;
1442         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1443
1444         /*
1445          * adjust priority tree position, if ->next_rq changes
1446          */
1447         if (prev != cfqq->next_rq)
1448                 cfq_prio_tree_add(cfqd, cfqq);
1449
1450         BUG_ON(!cfqq->next_rq);
1451 }
1452
1453 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1454 {
1455         elv_rb_del(&cfqq->sort_list, rq);
1456         cfqq->queued[rq_is_sync(rq)]--;
1457         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1458                                         rq_data_dir(rq), rq_is_sync(rq));
1459         cfq_add_rq_rb(rq);
1460         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1461                         &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1462                         rq_is_sync(rq));
1463 }
1464
1465 static struct request *
1466 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1467 {
1468         struct task_struct *tsk = current;
1469         struct cfq_io_context *cic;
1470         struct cfq_queue *cfqq;
1471
1472         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1473         if (!cic)
1474                 return NULL;
1475
1476         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1477         if (cfqq) {
1478                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1479
1480                 return elv_rb_find(&cfqq->sort_list, sector);
1481         }
1482
1483         return NULL;
1484 }
1485
1486 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1487 {
1488         struct cfq_data *cfqd = q->elevator->elevator_data;
1489
1490         cfqd->rq_in_driver++;
1491         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1492                                                 cfqd->rq_in_driver);
1493
1494         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1495 }
1496
1497 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1498 {
1499         struct cfq_data *cfqd = q->elevator->elevator_data;
1500
1501         WARN_ON(!cfqd->rq_in_driver);
1502         cfqd->rq_in_driver--;
1503         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1504                                                 cfqd->rq_in_driver);
1505 }
1506
1507 static void cfq_remove_request(struct request *rq)
1508 {
1509         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1510
1511         if (cfqq->next_rq == rq)
1512                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1513
1514         list_del_init(&rq->queuelist);
1515         cfq_del_rq_rb(rq);
1516
1517         cfqq->cfqd->rq_queued--;
1518         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1519                                         rq_data_dir(rq), rq_is_sync(rq));
1520         if (rq->cmd_flags & REQ_META) {
1521                 WARN_ON(!cfqq->meta_pending);
1522                 cfqq->meta_pending--;
1523         }
1524 }
1525
1526 static int cfq_merge(struct request_queue *q, struct request **req,
1527                      struct bio *bio)
1528 {
1529         struct cfq_data *cfqd = q->elevator->elevator_data;
1530         struct request *__rq;
1531
1532         __rq = cfq_find_rq_fmerge(cfqd, bio);
1533         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1534                 *req = __rq;
1535                 return ELEVATOR_FRONT_MERGE;
1536         }
1537
1538         return ELEVATOR_NO_MERGE;
1539 }
1540
1541 static void cfq_merged_request(struct request_queue *q, struct request *req,
1542                                int type)
1543 {
1544         if (type == ELEVATOR_FRONT_MERGE) {
1545                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1546
1547                 cfq_reposition_rq_rb(cfqq, req);
1548         }
1549 }
1550
1551 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1552                                 struct bio *bio)
1553 {
1554         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1555                                         bio_data_dir(bio), cfq_bio_sync(bio));
1556 }
1557
1558 static void
1559 cfq_merged_requests(struct request_queue *q, struct request *rq,
1560                     struct request *next)
1561 {
1562         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1563         /*
1564          * reposition in fifo if next is older than rq
1565          */
1566         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1567             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1568                 list_move(&rq->queuelist, &next->queuelist);
1569                 rq_set_fifo_time(rq, rq_fifo_time(next));
1570         }
1571
1572         if (cfqq->next_rq == next)
1573                 cfqq->next_rq = rq;
1574         cfq_remove_request(next);
1575         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1576                                         rq_data_dir(next), rq_is_sync(next));
1577 }
1578
1579 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1580                            struct bio *bio)
1581 {
1582         struct cfq_data *cfqd = q->elevator->elevator_data;
1583         struct cfq_io_context *cic;
1584         struct cfq_queue *cfqq;
1585
1586         /*
1587          * Disallow merge of a sync bio into an async request.
1588          */
1589         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1590                 return false;
1591
1592         /*
1593          * Lookup the cfqq that this bio will be queued with. Allow
1594          * merge only if rq is queued there.
1595          */
1596         cic = cfq_cic_lookup(cfqd, current->io_context);
1597         if (!cic)
1598                 return false;
1599
1600         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1601         return cfqq == RQ_CFQQ(rq);
1602 }
1603
1604 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1605 {
1606         del_timer(&cfqd->idle_slice_timer);
1607         cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1608 }
1609
1610 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1611                                    struct cfq_queue *cfqq)
1612 {
1613         if (cfqq) {
1614                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1615                                 cfqd->serving_prio, cfqd->serving_type);
1616                 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1617                 cfqq->slice_start = 0;
1618                 cfqq->dispatch_start = jiffies;
1619                 cfqq->allocated_slice = 0;
1620                 cfqq->slice_end = 0;
1621                 cfqq->slice_dispatch = 0;
1622                 cfqq->nr_sectors = 0;
1623
1624                 cfq_clear_cfqq_wait_request(cfqq);
1625                 cfq_clear_cfqq_must_dispatch(cfqq);
1626                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1627                 cfq_clear_cfqq_fifo_expire(cfqq);
1628                 cfq_mark_cfqq_slice_new(cfqq);
1629
1630                 cfq_del_timer(cfqd, cfqq);
1631         }
1632
1633         cfqd->active_queue = cfqq;
1634 }
1635
1636 /*
1637  * current cfqq expired its slice (or was too idle), select new one
1638  */
1639 static void
1640 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1641                     bool timed_out)
1642 {
1643         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1644
1645         if (cfq_cfqq_wait_request(cfqq))
1646                 cfq_del_timer(cfqd, cfqq);
1647
1648         cfq_clear_cfqq_wait_request(cfqq);
1649         cfq_clear_cfqq_wait_busy(cfqq);
1650
1651         /*
1652          * If this cfqq is shared between multiple processes, check to
1653          * make sure that those processes are still issuing I/Os within
1654          * the mean seek distance.  If not, it may be time to break the
1655          * queues apart again.
1656          */
1657         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1658                 cfq_mark_cfqq_split_coop(cfqq);
1659
1660         /*
1661          * store what was left of this slice, if the queue idled/timed out
1662          */
1663         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1664                 cfqq->slice_resid = cfqq->slice_end - jiffies;
1665                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1666         }
1667
1668         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1669
1670         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1671                 cfq_del_cfqq_rr(cfqd, cfqq);
1672
1673         cfq_resort_rr_list(cfqd, cfqq);
1674
1675         if (cfqq == cfqd->active_queue)
1676                 cfqd->active_queue = NULL;
1677
1678         if (cfqd->active_cic) {
1679                 put_io_context(cfqd->active_cic->ioc);
1680                 cfqd->active_cic = NULL;
1681         }
1682 }
1683
1684 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1685 {
1686         struct cfq_queue *cfqq = cfqd->active_queue;
1687
1688         if (cfqq)
1689                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1690 }
1691
1692 /*
1693  * Get next queue for service. Unless we have a queue preemption,
1694  * we'll simply select the first cfqq in the service tree.
1695  */
1696 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1697 {
1698         struct cfq_rb_root *service_tree =
1699                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1700                                         cfqd->serving_type);
1701
1702         if (!cfqd->rq_queued)
1703                 return NULL;
1704
1705         /* There is nothing to dispatch */
1706         if (!service_tree)
1707                 return NULL;
1708         if (RB_EMPTY_ROOT(&service_tree->rb))
1709                 return NULL;
1710         return cfq_rb_first(service_tree);
1711 }
1712
1713 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1714 {
1715         struct cfq_group *cfqg;
1716         struct cfq_queue *cfqq;
1717         int i, j;
1718         struct cfq_rb_root *st;
1719
1720         if (!cfqd->rq_queued)
1721                 return NULL;
1722
1723         cfqg = cfq_get_next_cfqg(cfqd);
1724         if (!cfqg)
1725                 return NULL;
1726
1727         for_each_cfqg_st(cfqg, i, j, st)
1728                 if ((cfqq = cfq_rb_first(st)) != NULL)
1729                         return cfqq;
1730         return NULL;
1731 }
1732
1733 /*
1734  * Get and set a new active queue for service.
1735  */
1736 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1737                                               struct cfq_queue *cfqq)
1738 {
1739         if (!cfqq)
1740                 cfqq = cfq_get_next_queue(cfqd);
1741
1742         __cfq_set_active_queue(cfqd, cfqq);
1743         return cfqq;
1744 }
1745
1746 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1747                                           struct request *rq)
1748 {
1749         if (blk_rq_pos(rq) >= cfqd->last_position)
1750                 return blk_rq_pos(rq) - cfqd->last_position;
1751         else
1752                 return cfqd->last_position - blk_rq_pos(rq);
1753 }
1754
1755 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1756                                struct request *rq)
1757 {
1758         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1759 }
1760
1761 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1762                                     struct cfq_queue *cur_cfqq)
1763 {
1764         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1765         struct rb_node *parent, *node;
1766         struct cfq_queue *__cfqq;
1767         sector_t sector = cfqd->last_position;
1768
1769         if (RB_EMPTY_ROOT(root))
1770                 return NULL;
1771
1772         /*
1773          * First, if we find a request starting at the end of the last
1774          * request, choose it.
1775          */
1776         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1777         if (__cfqq)
1778                 return __cfqq;
1779
1780         /*
1781          * If the exact sector wasn't found, the parent of the NULL leaf
1782          * will contain the closest sector.
1783          */
1784         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1785         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1786                 return __cfqq;
1787
1788         if (blk_rq_pos(__cfqq->next_rq) < sector)
1789                 node = rb_next(&__cfqq->p_node);
1790         else
1791                 node = rb_prev(&__cfqq->p_node);
1792         if (!node)
1793                 return NULL;
1794
1795         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1796         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1797                 return __cfqq;
1798
1799         return NULL;
1800 }
1801
1802 /*
1803  * cfqd - obvious
1804  * cur_cfqq - passed in so that we don't decide that the current queue is
1805  *            closely cooperating with itself.
1806  *
1807  * So, basically we're assuming that that cur_cfqq has dispatched at least
1808  * one request, and that cfqd->last_position reflects a position on the disk
1809  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1810  * assumption.
1811  */
1812 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1813                                               struct cfq_queue *cur_cfqq)
1814 {
1815         struct cfq_queue *cfqq;
1816
1817         if (cfq_class_idle(cur_cfqq))
1818                 return NULL;
1819         if (!cfq_cfqq_sync(cur_cfqq))
1820                 return NULL;
1821         if (CFQQ_SEEKY(cur_cfqq))
1822                 return NULL;
1823
1824         /*
1825          * Don't search priority tree if it's the only queue in the group.
1826          */
1827         if (cur_cfqq->cfqg->nr_cfqq == 1)
1828                 return NULL;
1829
1830         /*
1831          * We should notice if some of the queues are cooperating, eg
1832          * working closely on the same area of the disk. In that case,
1833          * we can group them together and don't waste time idling.
1834          */
1835         cfqq = cfqq_close(cfqd, cur_cfqq);
1836         if (!cfqq)
1837                 return NULL;
1838
1839         /* If new queue belongs to different cfq_group, don't choose it */
1840         if (cur_cfqq->cfqg != cfqq->cfqg)
1841                 return NULL;
1842
1843         /*
1844          * It only makes sense to merge sync queues.
1845          */
1846         if (!cfq_cfqq_sync(cfqq))
1847                 return NULL;
1848         if (CFQQ_SEEKY(cfqq))
1849                 return NULL;
1850
1851         /*
1852          * Do not merge queues of different priority classes
1853          */
1854         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1855                 return NULL;
1856
1857         return cfqq;
1858 }
1859
1860 /*
1861  * Determine whether we should enforce idle window for this queue.
1862  */
1863
1864 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1865 {
1866         enum wl_prio_t prio = cfqq_prio(cfqq);
1867         struct cfq_rb_root *service_tree = cfqq->service_tree;
1868
1869         BUG_ON(!service_tree);
1870         BUG_ON(!service_tree->count);
1871
1872         if (!cfqd->cfq_slice_idle)
1873                 return false;
1874
1875         /* We never do for idle class queues. */
1876         if (prio == IDLE_WORKLOAD)
1877                 return false;
1878
1879         /* We do for queues that were marked with idle window flag. */
1880         if (cfq_cfqq_idle_window(cfqq) &&
1881            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1882                 return true;
1883
1884         /*
1885          * Otherwise, we do only if they are the last ones
1886          * in their service tree.
1887          */
1888         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1889                 return true;
1890         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1891                         service_tree->count);
1892         return false;
1893 }
1894
1895 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1896 {
1897         struct cfq_queue *cfqq = cfqd->active_queue;
1898         struct cfq_io_context *cic;
1899         unsigned long sl, group_idle = 0;
1900
1901         /*
1902          * SSD device without seek penalty, disable idling. But only do so
1903          * for devices that support queuing, otherwise we still have a problem
1904          * with sync vs async workloads.
1905          */
1906         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1907                 return;
1908
1909         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1910         WARN_ON(cfq_cfqq_slice_new(cfqq));
1911
1912         /*
1913          * idle is disabled, either manually or by past process history
1914          */
1915         if (!cfq_should_idle(cfqd, cfqq)) {
1916                 /* no queue idling. Check for group idling */
1917                 if (cfqd->cfq_group_idle)
1918                         group_idle = cfqd->cfq_group_idle;
1919                 else
1920                         return;
1921         }
1922
1923         /*
1924          * still active requests from this queue, don't idle
1925          */
1926         if (cfqq->dispatched)
1927                 return;
1928
1929         /*
1930          * task has exited, don't wait
1931          */
1932         cic = cfqd->active_cic;
1933         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1934                 return;
1935
1936         /*
1937          * If our average think time is larger than the remaining time
1938          * slice, then don't idle. This avoids overrunning the allotted
1939          * time slice.
1940          */
1941         if (sample_valid(cic->ttime_samples) &&
1942             (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1943                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1944                                 cic->ttime_mean);
1945                 return;
1946         }
1947
1948         /* There are other queues in the group, don't do group idle */
1949         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1950                 return;
1951
1952         cfq_mark_cfqq_wait_request(cfqq);
1953
1954         if (group_idle)
1955                 sl = cfqd->cfq_group_idle;
1956         else
1957                 sl = cfqd->cfq_slice_idle;
1958
1959         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1960         cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1961         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1962                         group_idle ? 1 : 0);
1963 }
1964
1965 /*
1966  * Move request from internal lists to the request queue dispatch list.
1967  */
1968 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1969 {
1970         struct cfq_data *cfqd = q->elevator->elevator_data;
1971         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1972
1973         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1974
1975         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1976         cfq_remove_request(rq);
1977         cfqq->dispatched++;
1978         (RQ_CFQG(rq))->dispatched++;
1979         elv_dispatch_sort(q, rq);
1980
1981         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1982         cfqq->nr_sectors += blk_rq_sectors(rq);
1983         cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1984                                         rq_data_dir(rq), rq_is_sync(rq));
1985 }
1986
1987 /*
1988  * return expired entry, or NULL to just start from scratch in rbtree
1989  */
1990 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1991 {
1992         struct request *rq = NULL;
1993
1994         if (cfq_cfqq_fifo_expire(cfqq))
1995                 return NULL;
1996
1997         cfq_mark_cfqq_fifo_expire(cfqq);
1998
1999         if (list_empty(&cfqq->fifo))
2000                 return NULL;
2001
2002         rq = rq_entry_fifo(cfqq->fifo.next);
2003         if (time_before(jiffies, rq_fifo_time(rq)))
2004                 rq = NULL;
2005
2006         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2007         return rq;
2008 }
2009
2010 static inline int
2011 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2012 {
2013         const int base_rq = cfqd->cfq_slice_async_rq;
2014
2015         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2016
2017         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2018 }
2019
2020 /*
2021  * Must be called with the queue_lock held.
2022  */
2023 static int cfqq_process_refs(struct cfq_queue *cfqq)
2024 {
2025         int process_refs, io_refs;
2026
2027         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2028         process_refs = cfqq->ref - io_refs;
2029         BUG_ON(process_refs < 0);
2030         return process_refs;
2031 }
2032
2033 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2034 {
2035         int process_refs, new_process_refs;
2036         struct cfq_queue *__cfqq;
2037
2038         /*
2039          * If there are no process references on the new_cfqq, then it is
2040          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2041          * chain may have dropped their last reference (not just their
2042          * last process reference).
2043          */
2044         if (!cfqq_process_refs(new_cfqq))
2045                 return;
2046
2047         /* Avoid a circular list and skip interim queue merges */
2048         while ((__cfqq = new_cfqq->new_cfqq)) {
2049                 if (__cfqq == cfqq)
2050                         return;
2051                 new_cfqq = __cfqq;
2052         }
2053
2054         process_refs = cfqq_process_refs(cfqq);
2055         new_process_refs = cfqq_process_refs(new_cfqq);
2056         /*
2057          * If the process for the cfqq has gone away, there is no
2058          * sense in merging the queues.
2059          */
2060         if (process_refs == 0 || new_process_refs == 0)
2061                 return;
2062
2063         /*
2064          * Merge in the direction of the lesser amount of work.
2065          */
2066         if (new_process_refs >= process_refs) {
2067                 cfqq->new_cfqq = new_cfqq;
2068                 new_cfqq->ref += process_refs;
2069         } else {
2070                 new_cfqq->new_cfqq = cfqq;
2071                 cfqq->ref += new_process_refs;
2072         }
2073 }
2074
2075 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2076                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2077 {
2078         struct cfq_queue *queue;
2079         int i;
2080         bool key_valid = false;
2081         unsigned long lowest_key = 0;
2082         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2083
2084         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2085                 /* select the one with lowest rb_key */
2086                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2087                 if (queue &&
2088                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2089                         lowest_key = queue->rb_key;
2090                         cur_best = i;
2091                         key_valid = true;
2092                 }
2093         }
2094
2095         return cur_best;
2096 }
2097
2098 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2099 {
2100         unsigned slice;
2101         unsigned count;
2102         struct cfq_rb_root *st;
2103         unsigned group_slice;
2104         enum wl_prio_t original_prio = cfqd->serving_prio;
2105
2106         /* Choose next priority. RT > BE > IDLE */
2107         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2108                 cfqd->serving_prio = RT_WORKLOAD;
2109         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2110                 cfqd->serving_prio = BE_WORKLOAD;
2111         else {
2112                 cfqd->serving_prio = IDLE_WORKLOAD;
2113                 cfqd->workload_expires = jiffies + 1;
2114                 return;
2115         }
2116
2117         if (original_prio != cfqd->serving_prio)
2118                 goto new_workload;
2119
2120         /*
2121          * For RT and BE, we have to choose also the type
2122          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2123          * expiration time
2124          */
2125         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2126         count = st->count;
2127
2128         /*
2129          * check workload expiration, and that we still have other queues ready
2130          */
2131         if (count && !time_after(jiffies, cfqd->workload_expires))
2132                 return;
2133
2134 new_workload:
2135         /* otherwise select new workload type */
2136         cfqd->serving_type =
2137                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2138         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2139         count = st->count;
2140
2141         /*
2142          * the workload slice is computed as a fraction of target latency
2143          * proportional to the number of queues in that workload, over
2144          * all the queues in the same priority class
2145          */
2146         group_slice = cfq_group_slice(cfqd, cfqg);
2147
2148         slice = group_slice * count /
2149                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2150                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2151
2152         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2153                 unsigned int tmp;
2154
2155                 /*
2156                  * Async queues are currently system wide. Just taking
2157                  * proportion of queues with-in same group will lead to higher
2158                  * async ratio system wide as generally root group is going
2159                  * to have higher weight. A more accurate thing would be to
2160                  * calculate system wide asnc/sync ratio.
2161                  */
2162                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2163                 tmp = tmp/cfqd->busy_queues;
2164                 slice = min_t(unsigned, slice, tmp);
2165
2166                 /* async workload slice is scaled down according to
2167                  * the sync/async slice ratio. */
2168                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2169         } else
2170                 /* sync workload slice is at least 2 * cfq_slice_idle */
2171                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2172
2173         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2174         cfq_log(cfqd, "workload slice:%d", slice);
2175         cfqd->workload_expires = jiffies + slice;
2176 }
2177
2178 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2179 {
2180         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2181         struct cfq_group *cfqg;
2182
2183         if (RB_EMPTY_ROOT(&st->rb))
2184                 return NULL;
2185         cfqg = cfq_rb_first_group(st);
2186         update_min_vdisktime(st);
2187         return cfqg;
2188 }
2189
2190 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2191 {
2192         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2193
2194         cfqd->serving_group = cfqg;
2195
2196         /* Restore the workload type data */
2197         if (cfqg->saved_workload_slice) {
2198                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2199                 cfqd->serving_type = cfqg->saved_workload;
2200                 cfqd->serving_prio = cfqg->saved_serving_prio;
2201         } else
2202                 cfqd->workload_expires = jiffies - 1;
2203
2204         choose_service_tree(cfqd, cfqg);
2205 }
2206
2207 /*
2208  * Select a queue for service. If we have a current active queue,
2209  * check whether to continue servicing it, or retrieve and set a new one.
2210  */
2211 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2212 {
2213         struct cfq_queue *cfqq, *new_cfqq = NULL;
2214
2215         cfqq = cfqd->active_queue;
2216         if (!cfqq)
2217                 goto new_queue;
2218
2219         if (!cfqd->rq_queued)
2220                 return NULL;
2221
2222         /*
2223          * We were waiting for group to get backlogged. Expire the queue
2224          */
2225         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2226                 goto expire;
2227
2228         /*
2229          * The active queue has run out of time, expire it and select new.
2230          */
2231         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2232                 /*
2233                  * If slice had not expired at the completion of last request
2234                  * we might not have turned on wait_busy flag. Don't expire
2235                  * the queue yet. Allow the group to get backlogged.
2236                  *
2237                  * The very fact that we have used the slice, that means we
2238                  * have been idling all along on this queue and it should be
2239                  * ok to wait for this request to complete.
2240                  */
2241                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2242                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2243                         cfqq = NULL;
2244                         goto keep_queue;
2245                 } else
2246                         goto check_group_idle;
2247         }
2248
2249         /*
2250          * The active queue has requests and isn't expired, allow it to
2251          * dispatch.
2252          */
2253         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2254                 goto keep_queue;
2255
2256         /*
2257          * If another queue has a request waiting within our mean seek
2258          * distance, let it run.  The expire code will check for close
2259          * cooperators and put the close queue at the front of the service
2260          * tree.  If possible, merge the expiring queue with the new cfqq.
2261          */
2262         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2263         if (new_cfqq) {
2264                 if (!cfqq->new_cfqq)
2265                         cfq_setup_merge(cfqq, new_cfqq);
2266                 goto expire;
2267         }
2268
2269         /*
2270          * No requests pending. If the active queue still has requests in
2271          * flight or is idling for a new request, allow either of these
2272          * conditions to happen (or time out) before selecting a new queue.
2273          */
2274         if (timer_pending(&cfqd->idle_slice_timer)) {
2275                 cfqq = NULL;
2276                 goto keep_queue;
2277         }
2278
2279         /*
2280          * This is a deep seek queue, but the device is much faster than
2281          * the queue can deliver, don't idle
2282          **/
2283         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2284             (cfq_cfqq_slice_new(cfqq) ||
2285             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2286                 cfq_clear_cfqq_deep(cfqq);
2287                 cfq_clear_cfqq_idle_window(cfqq);
2288         }
2289
2290         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2291                 cfqq = NULL;
2292                 goto keep_queue;
2293         }
2294
2295         /*
2296          * If group idle is enabled and there are requests dispatched from
2297          * this group, wait for requests to complete.
2298          */
2299 check_group_idle:
2300         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2301             && cfqq->cfqg->dispatched) {
2302                 cfqq = NULL;
2303                 goto keep_queue;
2304         }
2305
2306 expire:
2307         cfq_slice_expired(cfqd, 0);
2308 new_queue:
2309         /*
2310          * Current queue expired. Check if we have to switch to a new
2311          * service tree
2312          */
2313         if (!new_cfqq)
2314                 cfq_choose_cfqg(cfqd);
2315
2316         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2317 keep_queue:
2318         return cfqq;
2319 }
2320
2321 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2322 {
2323         int dispatched = 0;
2324
2325         while (cfqq->next_rq) {
2326                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2327                 dispatched++;
2328         }
2329
2330         BUG_ON(!list_empty(&cfqq->fifo));
2331
2332         /* By default cfqq is not expired if it is empty. Do it explicitly */
2333         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2334         return dispatched;
2335 }
2336
2337 /*
2338  * Drain our current requests. Used for barriers and when switching
2339  * io schedulers on-the-fly.
2340  */
2341 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2342 {
2343         struct cfq_queue *cfqq;
2344         int dispatched = 0;
2345
2346         /* Expire the timeslice of the current active queue first */
2347         cfq_slice_expired(cfqd, 0);
2348         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2349                 __cfq_set_active_queue(cfqd, cfqq);
2350                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2351         }
2352
2353         BUG_ON(cfqd->busy_queues);
2354
2355         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2356         return dispatched;
2357 }
2358
2359 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2360         struct cfq_queue *cfqq)
2361 {
2362         /* the queue hasn't finished any request, can't estimate */
2363         if (cfq_cfqq_slice_new(cfqq))
2364                 return true;
2365         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2366                 cfqq->slice_end))
2367                 return true;
2368
2369         return false;
2370 }
2371
2372 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2373 {
2374         unsigned int max_dispatch;
2375
2376         /*
2377          * Drain async requests before we start sync IO
2378          */
2379         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2380                 return false;
2381
2382         /*
2383          * If this is an async queue and we have sync IO in flight, let it wait
2384          */
2385         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2386                 return false;
2387
2388         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2389         if (cfq_class_idle(cfqq))
2390                 max_dispatch = 1;
2391
2392         /*
2393          * Does this cfqq already have too much IO in flight?
2394          */
2395         if (cfqq->dispatched >= max_dispatch) {
2396                 /*
2397                  * idle queue must always only have a single IO in flight
2398                  */
2399                 if (cfq_class_idle(cfqq))
2400                         return false;
2401
2402                 /*
2403                  * We have other queues, don't allow more IO from this one
2404                  */
2405                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2406                         return false;
2407
2408                 /*
2409                  * Sole queue user, no limit
2410                  */
2411                 if (cfqd->busy_queues == 1)
2412                         max_dispatch = -1;
2413                 else
2414                         /*
2415                          * Normally we start throttling cfqq when cfq_quantum/2
2416                          * requests have been dispatched. But we can drive
2417                          * deeper queue depths at the beginning of slice
2418                          * subjected to upper limit of cfq_quantum.
2419                          * */
2420                         max_dispatch = cfqd->cfq_quantum;
2421         }
2422
2423         /*
2424          * Async queues must wait a bit before being allowed dispatch.
2425          * We also ramp up the dispatch depth gradually for async IO,
2426          * based on the last sync IO we serviced
2427          */
2428         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2429                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2430                 unsigned int depth;
2431
2432                 depth = last_sync / cfqd->cfq_slice[1];
2433                 if (!depth && !cfqq->dispatched)
2434                         depth = 1;
2435                 if (depth < max_dispatch)
2436                         max_dispatch = depth;
2437         }
2438
2439         /*
2440          * If we're below the current max, allow a dispatch
2441          */
2442         return cfqq->dispatched < max_dispatch;
2443 }
2444
2445 /*
2446  * Dispatch a request from cfqq, moving them to the request queue
2447  * dispatch list.
2448  */
2449 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2450 {
2451         struct request *rq;
2452
2453         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2454
2455         if (!cfq_may_dispatch(cfqd, cfqq))
2456                 return false;
2457
2458         /*
2459          * follow expired path, else get first next available
2460          */
2461         rq = cfq_check_fifo(cfqq);
2462         if (!rq)
2463                 rq = cfqq->next_rq;
2464
2465         /*
2466          * insert request into driver dispatch list
2467          */
2468         cfq_dispatch_insert(cfqd->queue, rq);
2469
2470         if (!cfqd->active_cic) {
2471                 struct cfq_io_context *cic = RQ_CIC(rq);
2472
2473                 atomic_long_inc(&cic->ioc->refcount);
2474                 cfqd->active_cic = cic;
2475         }
2476
2477         return true;
2478 }
2479
2480 /*
2481  * Find the cfqq that we need to service and move a request from that to the
2482  * dispatch list
2483  */
2484 static int cfq_dispatch_requests(struct request_queue *q, int force)
2485 {
2486         struct cfq_data *cfqd = q->elevator->elevator_data;
2487         struct cfq_queue *cfqq;
2488
2489         if (!cfqd->busy_queues)
2490                 return 0;
2491
2492         if (unlikely(force))
2493                 return cfq_forced_dispatch(cfqd);
2494
2495         cfqq = cfq_select_queue(cfqd);
2496         if (!cfqq)
2497                 return 0;
2498
2499         /*
2500          * Dispatch a request from this cfqq, if it is allowed
2501          */
2502         if (!cfq_dispatch_request(cfqd, cfqq))
2503                 return 0;
2504
2505         cfqq->slice_dispatch++;
2506         cfq_clear_cfqq_must_dispatch(cfqq);
2507
2508         /*
2509          * expire an async queue immediately if it has used up its slice. idle
2510          * queue always expire after 1 dispatch round.
2511          */
2512         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2513             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2514             cfq_class_idle(cfqq))) {
2515                 cfqq->slice_end = jiffies + 1;
2516                 cfq_slice_expired(cfqd, 0);
2517         }
2518
2519         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2520         return 1;
2521 }
2522
2523 /*
2524  * task holds one reference to the queue, dropped when task exits. each rq
2525  * in-flight on this queue also holds a reference, dropped when rq is freed.
2526  *
2527  * Each cfq queue took a reference on the parent group. Drop it now.
2528  * queue lock must be held here.
2529  */
2530 static void cfq_put_queue(struct cfq_queue *cfqq)
2531 {
2532         struct cfq_data *cfqd = cfqq->cfqd;
2533         struct cfq_group *cfqg, *orig_cfqg;
2534
2535         BUG_ON(cfqq->ref <= 0);
2536
2537         cfqq->ref--;
2538         if (cfqq->ref)
2539                 return;
2540
2541         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2542         BUG_ON(rb_first(&cfqq->sort_list));
2543         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2544         cfqg = cfqq->cfqg;
2545         orig_cfqg = cfqq->orig_cfqg;
2546
2547         if (unlikely(cfqd->active_queue == cfqq)) {
2548                 __cfq_slice_expired(cfqd, cfqq, 0);
2549                 cfq_schedule_dispatch(cfqd);
2550         }
2551
2552         BUG_ON(cfq_cfqq_on_rr(cfqq));
2553         kmem_cache_free(cfq_pool, cfqq);
2554         cfq_put_cfqg(cfqg);
2555         if (orig_cfqg)
2556                 cfq_put_cfqg(orig_cfqg);
2557 }
2558
2559 /*
2560  * Must always be called with the rcu_read_lock() held
2561  */
2562 static void
2563 __call_for_each_cic(struct io_context *ioc,
2564                     void (*func)(struct io_context *, struct cfq_io_context *))
2565 {
2566         struct cfq_io_context *cic;
2567         struct hlist_node *n;
2568
2569         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2570                 func(ioc, cic);
2571 }
2572
2573 /*
2574  * Call func for each cic attached to this ioc.
2575  */
2576 static void
2577 call_for_each_cic(struct io_context *ioc,
2578                   void (*func)(struct io_context *, struct cfq_io_context *))
2579 {
2580         rcu_read_lock();
2581         __call_for_each_cic(ioc, func);
2582         rcu_read_unlock();
2583 }
2584
2585 static void cfq_cic_free_rcu(struct rcu_head *head)
2586 {
2587         struct cfq_io_context *cic;
2588
2589         cic = container_of(head, struct cfq_io_context, rcu_head);
2590
2591         kmem_cache_free(cfq_ioc_pool, cic);
2592         elv_ioc_count_dec(cfq_ioc_count);
2593
2594         if (ioc_gone) {
2595                 /*
2596                  * CFQ scheduler is exiting, grab exit lock and check
2597                  * the pending io context count. If it hits zero,
2598                  * complete ioc_gone and set it back to NULL
2599                  */
2600                 spin_lock(&ioc_gone_lock);
2601                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2602                         complete(ioc_gone);
2603                         ioc_gone = NULL;
2604                 }
2605                 spin_unlock(&ioc_gone_lock);
2606         }
2607 }
2608
2609 static void cfq_cic_free(struct cfq_io_context *cic)
2610 {
2611         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2612 }
2613
2614 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2615 {
2616         unsigned long flags;
2617         unsigned long dead_key = (unsigned long) cic->key;
2618
2619         BUG_ON(!(dead_key & CIC_DEAD_KEY));
2620
2621         spin_lock_irqsave(&ioc->lock, flags);
2622         radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2623         hlist_del_rcu(&cic->cic_list);
2624         spin_unlock_irqrestore(&ioc->lock, flags);
2625
2626         cfq_cic_free(cic);
2627 }
2628
2629 /*
2630  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2631  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2632  * and ->trim() which is called with the task lock held
2633  */
2634 static void cfq_free_io_context(struct io_context *ioc)
2635 {
2636         /*
2637          * ioc->refcount is zero here, or we are called from elv_unregister(),
2638          * so no more cic's are allowed to be linked into this ioc.  So it
2639          * should be ok to iterate over the known list, we will see all cic's
2640          * since no new ones are added.
2641          */
2642         __call_for_each_cic(ioc, cic_free_func);
2643 }
2644
2645 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2646 {
2647         struct cfq_queue *__cfqq, *next;
2648
2649         /*
2650          * If this queue was scheduled to merge with another queue, be
2651          * sure to drop the reference taken on that queue (and others in
2652          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2653          */
2654         __cfqq = cfqq->new_cfqq;
2655         while (__cfqq) {
2656                 if (__cfqq == cfqq) {
2657                         WARN(1, "cfqq->new_cfqq loop detected\n");
2658                         break;
2659                 }
2660                 next = __cfqq->new_cfqq;
2661                 cfq_put_queue(__cfqq);
2662                 __cfqq = next;
2663         }
2664 }
2665
2666 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2667 {
2668         if (unlikely(cfqq == cfqd->active_queue)) {
2669                 __cfq_slice_expired(cfqd, cfqq, 0);
2670                 cfq_schedule_dispatch(cfqd);
2671         }
2672
2673         cfq_put_cooperator(cfqq);
2674
2675         cfq_put_queue(cfqq);
2676 }
2677
2678 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2679                                          struct cfq_io_context *cic)
2680 {
2681         struct io_context *ioc = cic->ioc;
2682
2683         list_del_init(&cic->queue_list);
2684
2685         /*
2686          * Make sure dead mark is seen for dead queues
2687          */
2688         smp_wmb();
2689         cic->key = cfqd_dead_key(cfqd);
2690
2691         if (ioc->ioc_data == cic)
2692                 rcu_assign_pointer(ioc->ioc_data, NULL);
2693
2694         if (cic->cfqq[BLK_RW_ASYNC]) {
2695                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2696                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2697         }
2698
2699         if (cic->cfqq[BLK_RW_SYNC]) {
2700                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2701                 cic->cfqq[BLK_RW_SYNC] = NULL;
2702         }
2703 }
2704
2705 static void cfq_exit_single_io_context(struct io_context *ioc,
2706                                        struct cfq_io_context *cic)
2707 {
2708         struct cfq_data *cfqd = cic_to_cfqd(cic);
2709
2710         if (cfqd) {
2711                 struct request_queue *q = cfqd->queue;
2712                 unsigned long flags;
2713
2714                 spin_lock_irqsave(q->queue_lock, flags);
2715
2716                 /*
2717                  * Ensure we get a fresh copy of the ->key to prevent
2718                  * race between exiting task and queue
2719                  */
2720                 smp_read_barrier_depends();
2721                 if (cic->key == cfqd)
2722                         __cfq_exit_single_io_context(cfqd, cic);
2723
2724                 spin_unlock_irqrestore(q->queue_lock, flags);
2725         }
2726 }
2727
2728 /*
2729  * The process that ioc belongs to has exited, we need to clean up
2730  * and put the internal structures we have that belongs to that process.
2731  */
2732 static void cfq_exit_io_context(struct io_context *ioc)
2733 {
2734         call_for_each_cic(ioc, cfq_exit_single_io_context);
2735 }
2736
2737 static struct cfq_io_context *
2738 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2739 {
2740         struct cfq_io_context *cic;
2741
2742         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2743                                                         cfqd->queue->node);
2744         if (cic) {
2745                 cic->last_end_request = jiffies;
2746                 INIT_LIST_HEAD(&cic->queue_list);
2747                 INIT_HLIST_NODE(&cic->cic_list);
2748                 cic->dtor = cfq_free_io_context;
2749                 cic->exit = cfq_exit_io_context;
2750                 elv_ioc_count_inc(cfq_ioc_count);
2751         }
2752
2753         return cic;
2754 }
2755
2756 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2757 {
2758         struct task_struct *tsk = current;
2759         int ioprio_class;
2760
2761         if (!cfq_cfqq_prio_changed(cfqq))
2762                 return;
2763
2764         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2765         switch (ioprio_class) {
2766         default:
2767                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2768         case IOPRIO_CLASS_NONE:
2769                 /*
2770                  * no prio set, inherit CPU scheduling settings
2771                  */
2772                 cfqq->ioprio = task_nice_ioprio(tsk);
2773                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2774                 break;
2775         case IOPRIO_CLASS_RT:
2776                 cfqq->ioprio = task_ioprio(ioc);
2777                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2778                 break;
2779         case IOPRIO_CLASS_BE:
2780                 cfqq->ioprio = task_ioprio(ioc);
2781                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2782                 break;
2783         case IOPRIO_CLASS_IDLE:
2784                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2785                 cfqq->ioprio = 7;
2786                 cfq_clear_cfqq_idle_window(cfqq);
2787                 break;
2788         }
2789
2790         /*
2791          * keep track of original prio settings in case we have to temporarily
2792          * elevate the priority of this queue
2793          */
2794         cfqq->org_ioprio = cfqq->ioprio;
2795         cfqq->org_ioprio_class = cfqq->ioprio_class;
2796         cfq_clear_cfqq_prio_changed(cfqq);
2797 }
2798
2799 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2800 {
2801         struct cfq_data *cfqd = cic_to_cfqd(cic);
2802         struct cfq_queue *cfqq;
2803         unsigned long flags;
2804
2805         if (unlikely(!cfqd))
2806                 return;
2807
2808         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2809
2810         cfqq = cic->cfqq[BLK_RW_ASYNC];
2811         if (cfqq) {
2812                 struct cfq_queue *new_cfqq;
2813                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2814                                                 GFP_ATOMIC);
2815                 if (new_cfqq) {
2816                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2817                         cfq_put_queue(cfqq);
2818                 }
2819         }
2820
2821         cfqq = cic->cfqq[BLK_RW_SYNC];
2822         if (cfqq)
2823                 cfq_mark_cfqq_prio_changed(cfqq);
2824
2825         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2826 }
2827
2828 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2829 {
2830         call_for_each_cic(ioc, changed_ioprio);
2831         ioc->ioprio_changed = 0;
2832 }
2833
2834 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2835                           pid_t pid, bool is_sync)
2836 {
2837         RB_CLEAR_NODE(&cfqq->rb_node);
2838         RB_CLEAR_NODE(&cfqq->p_node);
2839         INIT_LIST_HEAD(&cfqq->fifo);
2840
2841         cfqq->ref = 0;
2842         cfqq->cfqd = cfqd;
2843
2844         cfq_mark_cfqq_prio_changed(cfqq);
2845
2846         if (is_sync) {
2847                 if (!cfq_class_idle(cfqq))
2848                         cfq_mark_cfqq_idle_window(cfqq);
2849                 cfq_mark_cfqq_sync(cfqq);
2850         }
2851         cfqq->pid = pid;
2852 }
2853
2854 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2855 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2856 {
2857         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2858         struct cfq_data *cfqd = cic_to_cfqd(cic);
2859         unsigned long flags;
2860         struct request_queue *q;
2861
2862         if (unlikely(!cfqd))
2863                 return;
2864
2865         q = cfqd->queue;
2866
2867         spin_lock_irqsave(q->queue_lock, flags);
2868
2869         if (sync_cfqq) {
2870                 /*
2871                  * Drop reference to sync queue. A new sync queue will be
2872                  * assigned in new group upon arrival of a fresh request.
2873                  */
2874                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2875                 cic_set_cfqq(cic, NULL, 1);
2876                 cfq_put_queue(sync_cfqq);
2877         }
2878
2879         spin_unlock_irqrestore(q->queue_lock, flags);
2880 }
2881
2882 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2883 {
2884         call_for_each_cic(ioc, changed_cgroup);
2885         ioc->cgroup_changed = 0;
2886 }
2887 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2888
2889 static struct cfq_queue *
2890 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2891                      struct io_context *ioc, gfp_t gfp_mask)
2892 {
2893         struct cfq_queue *cfqq, *new_cfqq = NULL;
2894         struct cfq_io_context *cic;
2895         struct cfq_group *cfqg;
2896
2897 retry:
2898         cfqg = cfq_get_cfqg(cfqd, 1);
2899         cic = cfq_cic_lookup(cfqd, ioc);
2900         /* cic always exists here */
2901         cfqq = cic_to_cfqq(cic, is_sync);
2902
2903         /*
2904          * Always try a new alloc if we fell back to the OOM cfqq
2905          * originally, since it should just be a temporary situation.
2906          */
2907         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2908                 cfqq = NULL;
2909                 if (new_cfqq) {
2910                         cfqq = new_cfqq;
2911                         new_cfqq = NULL;
2912                 } else if (gfp_mask & __GFP_WAIT) {
2913                         spin_unlock_irq(cfqd->queue->queue_lock);
2914                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2915                                         gfp_mask | __GFP_ZERO,
2916                                         cfqd->queue->node);
2917                         spin_lock_irq(cfqd->queue->queue_lock);
2918                         if (new_cfqq)
2919                                 goto retry;
2920                 } else {
2921                         cfqq = kmem_cache_alloc_node(cfq_pool,
2922                                         gfp_mask | __GFP_ZERO,
2923                                         cfqd->queue->node);
2924                 }
2925
2926                 if (cfqq) {
2927                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2928                         cfq_init_prio_data(cfqq, ioc);
2929                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2930                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2931                 } else
2932                         cfqq = &cfqd->oom_cfqq;
2933         }
2934
2935         if (new_cfqq)
2936                 kmem_cache_free(cfq_pool, new_cfqq);
2937
2938         return cfqq;
2939 }
2940
2941 static struct cfq_queue **
2942 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2943 {
2944         switch (ioprio_class) {
2945         case IOPRIO_CLASS_RT:
2946                 return &cfqd->async_cfqq[0][ioprio];
2947         case IOPRIO_CLASS_BE:
2948                 return &cfqd->async_cfqq[1][ioprio];
2949         case IOPRIO_CLASS_IDLE:
2950                 return &cfqd->async_idle_cfqq;
2951         default:
2952                 BUG();
2953         }
2954 }
2955
2956 static struct cfq_queue *
2957 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2958               gfp_t gfp_mask)
2959 {
2960         const int ioprio = task_ioprio(ioc);
2961         const int ioprio_class = task_ioprio_class(ioc);
2962         struct cfq_queue **async_cfqq = NULL;
2963         struct cfq_queue *cfqq = NULL;
2964
2965         if (!is_sync) {
2966                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2967                 cfqq = *async_cfqq;
2968         }
2969
2970         if (!cfqq)
2971                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2972
2973         /*
2974          * pin the queue now that it's allocated, scheduler exit will prune it
2975          */
2976         if (!is_sync && !(*async_cfqq)) {
2977                 cfqq->ref++;
2978                 *async_cfqq = cfqq;
2979         }
2980
2981         cfqq->ref++;
2982         return cfqq;
2983 }
2984
2985 /*
2986  * We drop cfq io contexts lazily, so we may find a dead one.
2987  */
2988 static void
2989 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2990                   struct cfq_io_context *cic)
2991 {
2992         unsigned long flags;
2993
2994         WARN_ON(!list_empty(&cic->queue_list));
2995         BUG_ON(cic->key != cfqd_dead_key(cfqd));
2996
2997         spin_lock_irqsave(&ioc->lock, flags);
2998
2999         BUG_ON(ioc->ioc_data == cic);
3000
3001         radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3002         hlist_del_rcu(&cic->cic_list);
3003         spin_unlock_irqrestore(&ioc->lock, flags);
3004
3005         cfq_cic_free(cic);
3006 }
3007
3008 static struct cfq_io_context *
3009 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3010 {
3011         struct cfq_io_context *cic;
3012         unsigned long flags;
3013
3014         if (unlikely(!ioc))
3015                 return NULL;
3016
3017         rcu_read_lock();
3018
3019         /*
3020          * we maintain a last-hit cache, to avoid browsing over the tree
3021          */
3022         cic = rcu_dereference(ioc->ioc_data);
3023         if (cic && cic->key == cfqd) {
3024                 rcu_read_unlock();
3025                 return cic;
3026         }
3027
3028         do {
3029                 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3030                 rcu_read_unlock();
3031                 if (!cic)
3032                         break;
3033                 if (unlikely(cic->key != cfqd)) {
3034                         cfq_drop_dead_cic(cfqd, ioc, cic);
3035                         rcu_read_lock();
3036                         continue;
3037                 }
3038
3039                 spin_lock_irqsave(&ioc->lock, flags);
3040                 rcu_assign_pointer(ioc->ioc_data, cic);
3041                 spin_unlock_irqrestore(&ioc->lock, flags);
3042                 break;
3043         } while (1);
3044
3045         return cic;
3046 }
3047
3048 /*
3049  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3050  * the process specific cfq io context when entered from the block layer.
3051  * Also adds the cic to a per-cfqd list, used when this queue is removed.
3052  */
3053 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3054                         struct cfq_io_context *cic, gfp_t gfp_mask)
3055 {
3056         unsigned long flags;
3057         int ret;
3058
3059         ret = radix_tree_preload(gfp_mask);
3060         if (!ret) {
3061                 cic->ioc = ioc;
3062                 cic->key = cfqd;
3063
3064                 spin_lock_irqsave(&ioc->lock, flags);
3065                 ret = radix_tree_insert(&ioc->radix_root,
3066                                                 cfqd->cic_index, cic);
3067                 if (!ret)
3068                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3069                 spin_unlock_irqrestore(&ioc->lock, flags);
3070
3071                 radix_tree_preload_end();
3072
3073                 if (!ret) {
3074                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3075                         list_add(&cic->queue_list, &cfqd->cic_list);
3076                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3077                 }
3078         }
3079
3080         if (ret)
3081                 printk(KERN_ERR "cfq: cic link failed!\n");
3082
3083         return ret;
3084 }
3085
3086 /*
3087  * Setup general io context and cfq io context. There can be several cfq
3088  * io contexts per general io context, if this process is doing io to more
3089  * than one device managed by cfq.
3090  */
3091 static struct cfq_io_context *
3092 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3093 {
3094         struct io_context *ioc = NULL;
3095         struct cfq_io_context *cic;
3096
3097         might_sleep_if(gfp_mask & __GFP_WAIT);
3098
3099         ioc = get_io_context(gfp_mask, cfqd->queue->node);
3100         if (!ioc)
3101                 return NULL;
3102
3103         cic = cfq_cic_lookup(cfqd, ioc);
3104         if (cic)
3105                 goto out;
3106
3107         cic = cfq_alloc_io_context(cfqd, gfp_mask);
3108         if (cic == NULL)
3109                 goto err;
3110
3111         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3112                 goto err_free;
3113
3114 out:
3115         smp_read_barrier_depends();
3116         if (unlikely(ioc->ioprio_changed))
3117                 cfq_ioc_set_ioprio(ioc);
3118
3119 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3120         if (unlikely(ioc->cgroup_changed))
3121                 cfq_ioc_set_cgroup(ioc);
3122 #endif
3123         return cic;
3124 err_free:
3125         cfq_cic_free(cic);
3126 err:
3127         put_io_context(ioc);
3128         return NULL;
3129 }
3130
3131 static void
3132 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3133 {
3134         unsigned long elapsed = jiffies - cic->last_end_request;
3135         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3136
3137         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3138         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3139         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3140 }
3141
3142 static void
3143 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3144                        struct request *rq)
3145 {
3146         sector_t sdist = 0;
3147         sector_t n_sec = blk_rq_sectors(rq);
3148         if (cfqq->last_request_pos) {
3149                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3150                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3151                 else
3152                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3153         }
3154
3155         cfqq->seek_history <<= 1;
3156         if (blk_queue_nonrot(cfqd->queue))
3157                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3158         else
3159                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3160 }
3161
3162 /*
3163  * Disable idle window if the process thinks too long or seeks so much that
3164  * it doesn't matter
3165  */
3166 static void
3167 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3168                        struct cfq_io_context *cic)
3169 {
3170         int old_idle, enable_idle;
3171
3172         /*
3173          * Don't idle for async or idle io prio class
3174          */
3175         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3176                 return;
3177
3178         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3179
3180         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3181                 cfq_mark_cfqq_deep(cfqq);
3182
3183         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3184                 enable_idle = 0;
3185         else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3186             (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3187                 enable_idle = 0;
3188         else if (sample_valid(cic->ttime_samples)) {
3189                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3190                         enable_idle = 0;
3191                 else
3192                         enable_idle = 1;
3193         }
3194
3195         if (old_idle != enable_idle) {
3196                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3197                 if (enable_idle)
3198                         cfq_mark_cfqq_idle_window(cfqq);
3199                 else
3200                         cfq_clear_cfqq_idle_window(cfqq);
3201         }
3202 }
3203
3204 /*
3205  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3206  * no or if we aren't sure, a 1 will cause a preempt.
3207  */
3208 static bool
3209 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3210                    struct request *rq)
3211 {
3212         struct cfq_queue *cfqq;
3213
3214         cfqq = cfqd->active_queue;
3215         if (!cfqq)
3216                 return false;
3217
3218         if (cfq_class_idle(new_cfqq))
3219                 return false;
3220
3221         if (cfq_class_idle(cfqq))
3222                 return true;
3223
3224         /*
3225          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3226          */
3227         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3228                 return false;
3229
3230         /*
3231          * if the new request is sync, but the currently running queue is
3232          * not, let the sync request have priority.
3233          */
3234         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3235                 return true;
3236
3237         if (new_cfqq->cfqg != cfqq->cfqg)
3238                 return false;
3239
3240         if (cfq_slice_used(cfqq))
3241                 return true;
3242
3243         /* Allow preemption only if we are idling on sync-noidle tree */
3244         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3245             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3246             new_cfqq->service_tree->count == 2 &&
3247             RB_EMPTY_ROOT(&cfqq->sort_list))
3248                 return true;
3249
3250         /*
3251          * So both queues are sync. Let the new request get disk time if
3252          * it's a metadata request and the current queue is doing regular IO.
3253          */
3254         if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3255                 return true;
3256
3257         /*
3258          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3259          */
3260         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3261                 return true;
3262
3263         /* An idle queue should not be idle now for some reason */
3264         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3265                 return true;
3266
3267         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3268                 return false;
3269
3270         /*
3271          * if this request is as-good as one we would expect from the
3272          * current cfqq, let it preempt
3273          */
3274         if (cfq_rq_close(cfqd, cfqq, rq))
3275                 return true;
3276
3277         return false;
3278 }
3279
3280 /*
3281  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3282  * let it have half of its nominal slice.
3283  */
3284 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3285 {
3286         cfq_log_cfqq(cfqd, cfqq, "preempt");
3287         cfq_slice_expired(cfqd, 1);
3288
3289         /*
3290          * Put the new queue at the front of the of the current list,
3291          * so we know that it will be selected next.
3292          */
3293         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3294
3295         cfq_service_tree_add(cfqd, cfqq, 1);
3296
3297         cfqq->slice_end = 0;
3298         cfq_mark_cfqq_slice_new(cfqq);
3299 }
3300
3301 /*
3302  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3303  * something we should do about it
3304  */
3305 static void
3306 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3307                 struct request *rq)
3308 {
3309         struct cfq_io_context *cic = RQ_CIC(rq);
3310
3311         cfqd->rq_queued++;
3312         if (rq->cmd_flags & REQ_META)
3313                 cfqq->meta_pending++;
3314
3315         cfq_update_io_thinktime(cfqd, cic);
3316         cfq_update_io_seektime(cfqd, cfqq, rq);
3317         cfq_update_idle_window(cfqd, cfqq, cic);
3318
3319         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3320
3321         if (cfqq == cfqd->active_queue) {
3322                 /*
3323                  * Remember that we saw a request from this process, but
3324                  * don't start queuing just yet. Otherwise we risk seeing lots
3325                  * of tiny requests, because we disrupt the normal plugging
3326                  * and merging. If the request is already larger than a single
3327                  * page, let it rip immediately. For that case we assume that
3328                  * merging is already done. Ditto for a busy system that
3329                  * has other work pending, don't risk delaying until the
3330                  * idle timer unplug to continue working.
3331                  */
3332                 if (cfq_cfqq_wait_request(cfqq)) {
3333                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3334                             cfqd->busy_queues > 1) {
3335                                 cfq_del_timer(cfqd, cfqq);
3336                                 cfq_clear_cfqq_wait_request(cfqq);
3337                                 __blk_run_queue(cfqd->queue);
3338                         } else {
3339                                 cfq_blkiocg_update_idle_time_stats(
3340                                                 &cfqq->cfqg->blkg);
3341                                 cfq_mark_cfqq_must_dispatch(cfqq);
3342                         }
3343                 }
3344         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3345                 /*
3346                  * not the active queue - expire current slice if it is
3347                  * idle and has expired it's mean thinktime or this new queue
3348                  * has some old slice time left and is of higher priority or
3349                  * this new queue is RT and the current one is BE
3350                  */
3351                 cfq_preempt_queue(cfqd, cfqq);
3352                 __blk_run_queue(cfqd->queue);
3353         }
3354 }
3355
3356 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3357 {
3358         struct cfq_data *cfqd = q->elevator->elevator_data;
3359         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3360
3361         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3362         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3363
3364         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3365         list_add_tail(&rq->queuelist, &cfqq->fifo);
3366         cfq_add_rq_rb(rq);
3367         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3368                         &cfqd->serving_group->blkg, rq_data_dir(rq),
3369                         rq_is_sync(rq));
3370         cfq_rq_enqueued(cfqd, cfqq, rq);
3371 }
3372
3373 /*
3374  * Update hw_tag based on peak queue depth over 50 samples under
3375  * sufficient load.
3376  */
3377 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3378 {
3379         struct cfq_queue *cfqq = cfqd->active_queue;
3380
3381         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3382                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3383
3384         if (cfqd->hw_tag == 1)
3385                 return;
3386
3387         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3388             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3389                 return;
3390
3391         /*
3392          * If active queue hasn't enough requests and can idle, cfq might not
3393          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3394          * case
3395          */
3396         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3397             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3398             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3399                 return;
3400
3401         if (cfqd->hw_tag_samples++ < 50)
3402                 return;
3403
3404         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3405                 cfqd->hw_tag = 1;
3406         else
3407                 cfqd->hw_tag = 0;
3408 }
3409
3410 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3411 {
3412         struct cfq_io_context *cic = cfqd->active_cic;
3413
3414         /* If there are other queues in the group, don't wait */
3415         if (cfqq->cfqg->nr_cfqq > 1)
3416                 return false;
3417
3418         if (cfq_slice_used(cfqq))
3419                 return true;
3420
3421         /* if slice left is less than think time, wait busy */
3422         if (cic && sample_valid(cic->ttime_samples)
3423             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3424                 return true;
3425
3426         /*
3427          * If think times is less than a jiffy than ttime_mean=0 and above
3428          * will not be true. It might happen that slice has not expired yet
3429          * but will expire soon (4-5 ns) during select_queue(). To cover the
3430          * case where think time is less than a jiffy, mark the queue wait
3431          * busy if only 1 jiffy is left in the slice.
3432          */
3433         if (cfqq->slice_end - jiffies == 1)
3434                 return true;
3435
3436         return false;
3437 }
3438
3439 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3440 {
3441         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3442         struct cfq_data *cfqd = cfqq->cfqd;
3443         const int sync = rq_is_sync(rq);
3444         unsigned long now;
3445
3446         now = jiffies;
3447         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3448                      !!(rq->cmd_flags & REQ_NOIDLE));
3449
3450         cfq_update_hw_tag(cfqd);
3451
3452         WARN_ON(!cfqd->rq_in_driver);
3453         WARN_ON(!cfqq->dispatched);
3454         cfqd->rq_in_driver--;
3455         cfqq->dispatched--;
3456         (RQ_CFQG(rq))->dispatched--;
3457         cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3458                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3459                         rq_data_dir(rq), rq_is_sync(rq));
3460
3461         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3462
3463         if (sync) {
3464                 RQ_CIC(rq)->last_end_request = now;
3465                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3466                         cfqd->last_delayed_sync = now;
3467         }
3468
3469         /*
3470          * If this is the active queue, check if it needs to be expired,
3471          * or if we want to idle in case it has no pending requests.
3472          */
3473         if (cfqd->active_queue == cfqq) {
3474                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3475
3476                 if (cfq_cfqq_slice_new(cfqq)) {
3477                         cfq_set_prio_slice(cfqd, cfqq);
3478                         cfq_clear_cfqq_slice_new(cfqq);
3479                 }
3480
3481                 /*
3482                  * Should we wait for next request to come in before we expire
3483                  * the queue.
3484                  */
3485                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3486                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3487                         if (!cfqd->cfq_slice_idle)
3488                                 extend_sl = cfqd->cfq_group_idle;
3489                         cfqq->slice_end = jiffies + extend_sl;
3490                         cfq_mark_cfqq_wait_busy(cfqq);
3491                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3492                 }
3493
3494                 /*
3495                  * Idling is not enabled on:
3496                  * - expired queues
3497                  * - idle-priority queues
3498                  * - async queues
3499                  * - queues with still some requests queued
3500                  * - when there is a close cooperator
3501                  */
3502                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3503                         cfq_slice_expired(cfqd, 1);
3504                 else if (sync && cfqq_empty &&
3505                          !cfq_close_cooperator(cfqd, cfqq)) {
3506                         cfq_arm_slice_timer(cfqd);
3507                 }
3508         }
3509
3510         if (!cfqd->rq_in_driver)
3511                 cfq_schedule_dispatch(cfqd);
3512 }
3513
3514 /*
3515  * we temporarily boost lower priority queues if they are holding fs exclusive
3516  * resources. they are boosted to normal prio (CLASS_BE/4)
3517  */
3518 static void cfq_prio_boost(struct cfq_queue *cfqq)
3519 {
3520         if (has_fs_excl()) {
3521                 /*
3522                  * boost idle prio on transactions that would lock out other
3523                  * users of the filesystem
3524                  */
3525                 if (cfq_class_idle(cfqq))
3526                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3527                 if (cfqq->ioprio > IOPRIO_NORM)
3528                         cfqq->ioprio = IOPRIO_NORM;
3529         } else {
3530                 /*
3531                  * unboost the queue (if needed)
3532                  */
3533                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3534                 cfqq->ioprio = cfqq->org_ioprio;
3535         }
3536 }
3537
3538 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3539 {
3540         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3541                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3542                 return ELV_MQUEUE_MUST;
3543         }
3544
3545         return ELV_MQUEUE_MAY;
3546 }
3547
3548 static int cfq_may_queue(struct request_queue *q, int rw)
3549 {
3550         struct cfq_data *cfqd = q->elevator->elevator_data;
3551         struct task_struct *tsk = current;
3552         struct cfq_io_context *cic;
3553         struct cfq_queue *cfqq;
3554
3555         /*
3556          * don't force setup of a queue from here, as a call to may_queue
3557          * does not necessarily imply that a request actually will be queued.
3558          * so just lookup a possibly existing queue, or return 'may queue'
3559          * if that fails
3560          */
3561         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3562         if (!cic)
3563                 return ELV_MQUEUE_MAY;
3564
3565         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3566         if (cfqq) {
3567                 cfq_init_prio_data(cfqq, cic->ioc);
3568                 cfq_prio_boost(cfqq);
3569
3570                 return __cfq_may_queue(cfqq);
3571         }
3572
3573         return ELV_MQUEUE_MAY;
3574 }
3575
3576 /*
3577  * queue lock held here
3578  */
3579 static void cfq_put_request(struct request *rq)
3580 {
3581         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3582
3583         if (cfqq) {
3584                 const int rw = rq_data_dir(rq);
3585
3586                 BUG_ON(!cfqq->allocated[rw]);
3587                 cfqq->allocated[rw]--;
3588
3589                 put_io_context(RQ_CIC(rq)->ioc);
3590
3591                 rq->elevator_private = NULL;
3592                 rq->elevator_private2 = NULL;
3593
3594                 /* Put down rq reference on cfqg */
3595                 cfq_put_cfqg(RQ_CFQG(rq));
3596                 rq->elevator_private3 = NULL;
3597
3598                 cfq_put_queue(cfqq);
3599         }
3600 }
3601
3602 static struct cfq_queue *
3603 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3604                 struct cfq_queue *cfqq)
3605 {
3606         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3607         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3608         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3609         cfq_put_queue(cfqq);
3610         return cic_to_cfqq(cic, 1);
3611 }
3612
3613 /*
3614  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3615  * was the last process referring to said cfqq.
3616  */
3617 static struct cfq_queue *
3618 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3619 {
3620         if (cfqq_process_refs(cfqq) == 1) {
3621                 cfqq->pid = current->pid;
3622                 cfq_clear_cfqq_coop(cfqq);
3623                 cfq_clear_cfqq_split_coop(cfqq);
3624                 return cfqq;
3625         }
3626
3627         cic_set_cfqq(cic, NULL, 1);
3628
3629         cfq_put_cooperator(cfqq);
3630
3631         cfq_put_queue(cfqq);
3632         return NULL;
3633 }
3634 /*
3635  * Allocate cfq data structures associated with this request.
3636  */
3637 static int
3638 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3639 {
3640         struct cfq_data *cfqd = q->elevator->elevator_data;
3641         struct cfq_io_context *cic;
3642         const int rw = rq_data_dir(rq);
3643         const bool is_sync = rq_is_sync(rq);
3644         struct cfq_queue *cfqq;
3645         unsigned long flags;
3646
3647         might_sleep_if(gfp_mask & __GFP_WAIT);
3648
3649         cic = cfq_get_io_context(cfqd, gfp_mask);
3650
3651         spin_lock_irqsave(q->queue_lock, flags);
3652
3653         if (!cic)
3654                 goto queue_fail;
3655
3656 new_queue:
3657         cfqq = cic_to_cfqq(cic, is_sync);
3658         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3659                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3660                 cic_set_cfqq(cic, cfqq, is_sync);
3661         } else {
3662                 /*
3663                  * If the queue was seeky for too long, break it apart.
3664                  */
3665                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3666                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3667                         cfqq = split_cfqq(cic, cfqq);
3668                         if (!cfqq)
3669                                 goto new_queue;
3670                 }
3671
3672                 /*
3673                  * Check to see if this queue is scheduled to merge with
3674                  * another, closely cooperating queue.  The merging of
3675                  * queues happens here as it must be done in process context.
3676                  * The reference on new_cfqq was taken in merge_cfqqs.
3677                  */
3678                 if (cfqq->new_cfqq)
3679                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3680         }
3681
3682         cfqq->allocated[rw]++;
3683         cfqq->ref++;
3684
3685         spin_unlock_irqrestore(q->queue_lock, flags);
3686
3687         rq->elevator_private = cic;
3688         rq->elevator_private2 = cfqq;
3689         rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3690         return 0;
3691
3692 queue_fail:
3693         if (cic)
3694                 put_io_context(cic->ioc);
3695
3696         cfq_schedule_dispatch(cfqd);
3697         spin_unlock_irqrestore(q->queue_lock, flags);
3698         cfq_log(cfqd, "set_request fail");
3699         return 1;
3700 }
3701
3702 static void cfq_kick_queue(struct work_struct *work)
3703 {
3704         struct cfq_data *cfqd =
3705                 container_of(work, struct cfq_data, unplug_work);
3706         struct request_queue *q = cfqd->queue;
3707
3708         spin_lock_irq(q->queue_lock);
3709         __blk_run_queue(cfqd->queue);
3710         spin_unlock_irq(q->queue_lock);
3711 }
3712
3713 /*
3714  * Timer running if the active_queue is currently idling inside its time slice
3715  */
3716 static void cfq_idle_slice_timer(unsigned long data)
3717 {
3718         struct cfq_data *cfqd = (struct cfq_data *) data;
3719         struct cfq_queue *cfqq;
3720         unsigned long flags;
3721         int timed_out = 1;
3722
3723         cfq_log(cfqd, "idle timer fired");
3724
3725         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3726
3727         cfqq = cfqd->active_queue;
3728         if (cfqq) {
3729                 timed_out = 0;
3730
3731                 /*
3732                  * We saw a request before the queue expired, let it through
3733                  */
3734                 if (cfq_cfqq_must_dispatch(cfqq))
3735                         goto out_kick;
3736
3737                 /*
3738                  * expired
3739                  */
3740                 if (cfq_slice_used(cfqq))
3741                         goto expire;
3742
3743                 /*
3744                  * only expire and reinvoke request handler, if there are
3745                  * other queues with pending requests
3746                  */
3747                 if (!cfqd->busy_queues)
3748                         goto out_cont;
3749
3750                 /*
3751                  * not expired and it has a request pending, let it dispatch
3752                  */
3753                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3754                         goto out_kick;
3755
3756                 /*
3757                  * Queue depth flag is reset only when the idle didn't succeed
3758                  */
3759                 cfq_clear_cfqq_deep(cfqq);
3760         }
3761 expire:
3762         cfq_slice_expired(cfqd, timed_out);
3763 out_kick:
3764         cfq_schedule_dispatch(cfqd);
3765 out_cont:
3766         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3767 }
3768
3769 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3770 {
3771         del_timer_sync(&cfqd->idle_slice_timer);
3772         cancel_work_sync(&cfqd->unplug_work);
3773 }
3774
3775 static void cfq_put_async_queues(struct cfq_data *cfqd)
3776 {
3777         int i;
3778
3779         for (i = 0; i < IOPRIO_BE_NR; i++) {
3780                 if (cfqd->async_cfqq[0][i])
3781                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3782                 if (cfqd->async_cfqq[1][i])
3783                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3784         }
3785
3786         if (cfqd->async_idle_cfqq)
3787                 cfq_put_queue(cfqd->async_idle_cfqq);
3788 }
3789
3790 static void cfq_cfqd_free(struct rcu_head *head)
3791 {
3792         kfree(container_of(head, struct cfq_data, rcu));
3793 }
3794
3795 static void cfq_exit_queue(struct elevator_queue *e)
3796 {
3797         struct cfq_data *cfqd = e->elevator_data;
3798         struct request_queue *q = cfqd->queue;
3799
3800         cfq_shutdown_timer_wq(cfqd);
3801
3802         spin_lock_irq(q->queue_lock);
3803
3804         if (cfqd->active_queue)
3805                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3806
3807         while (!list_empty(&cfqd->cic_list)) {
3808                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3809                                                         struct cfq_io_context,
3810                                                         queue_list);
3811
3812                 __cfq_exit_single_io_context(cfqd, cic);
3813         }
3814
3815         cfq_put_async_queues(cfqd);
3816         cfq_release_cfq_groups(cfqd);
3817         cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3818
3819         spin_unlock_irq(q->queue_lock);
3820
3821         cfq_shutdown_timer_wq(cfqd);
3822
3823         spin_lock(&cic_index_lock);
3824         ida_remove(&cic_index_ida, cfqd->cic_index);
3825         spin_unlock(&cic_index_lock);
3826
3827         /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3828         call_rcu(&cfqd->rcu, cfq_cfqd_free);
3829 }
3830
3831 static int cfq_alloc_cic_index(void)
3832 {
3833         int index, error;
3834
3835         do {
3836                 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3837                         return -ENOMEM;
3838
3839                 spin_lock(&cic_index_lock);
3840                 error = ida_get_new(&cic_index_ida, &index);
3841                 spin_unlock(&cic_index_lock);
3842                 if (error && error != -EAGAIN)
3843                         return error;
3844         } while (error);
3845
3846         return index;
3847 }
3848
3849 static void *cfq_init_queue(struct request_queue *q)
3850 {
3851         struct cfq_data *cfqd;
3852         int i, j;
3853         struct cfq_group *cfqg;
3854         struct cfq_rb_root *st;
3855
3856         i = cfq_alloc_cic_index();
3857         if (i < 0)
3858                 return NULL;
3859
3860         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3861         if (!cfqd)
3862                 return NULL;
3863
3864         /*
3865          * Don't need take queue_lock in the routine, since we are
3866          * initializing the ioscheduler, and nobody is using cfqd
3867          */
3868         cfqd->cic_index = i;
3869
3870         /* Init root service tree */
3871         cfqd->grp_service_tree = CFQ_RB_ROOT;
3872
3873         /* Init root group */
3874         cfqg = &cfqd->root_group;
3875         for_each_cfqg_st(cfqg, i, j, st)
3876                 *st = CFQ_RB_ROOT;
3877         RB_CLEAR_NODE(&cfqg->rb_node);
3878
3879         /* Give preference to root group over other groups */
3880         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3881
3882 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3883         /*
3884          * Take a reference to root group which we never drop. This is just
3885          * to make sure that cfq_put_cfqg() does not try to kfree root group
3886          */
3887         atomic_set(&cfqg->ref, 1);
3888         rcu_read_lock();
3889         cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3890                                         (void *)cfqd, 0);
3891         rcu_read_unlock();
3892 #endif
3893         /*
3894          * Not strictly needed (since RB_ROOT just clears the node and we
3895          * zeroed cfqd on alloc), but better be safe in case someone decides
3896          * to add magic to the rb code
3897          */
3898         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3899                 cfqd->prio_trees[i] = RB_ROOT;
3900
3901         /*
3902          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3903          * Grab a permanent reference to it, so that the normal code flow
3904          * will not attempt to free it.
3905          */
3906         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3907         cfqd->oom_cfqq.ref++;
3908         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3909
3910         INIT_LIST_HEAD(&cfqd->cic_list);
3911
3912         cfqd->queue = q;
3913
3914         init_timer(&cfqd->idle_slice_timer);
3915         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3916         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3917
3918         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3919
3920         cfqd->cfq_quantum = cfq_quantum;
3921         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3922         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3923         cfqd->cfq_back_max = cfq_back_max;
3924         cfqd->cfq_back_penalty = cfq_back_penalty;
3925         cfqd->cfq_slice[0] = cfq_slice_async;
3926         cfqd->cfq_slice[1] = cfq_slice_sync;
3927         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3928         cfqd->cfq_slice_idle = cfq_slice_idle;
3929         cfqd->cfq_group_idle = cfq_group_idle;
3930         cfqd->cfq_latency = 1;
3931         cfqd->cfq_group_isolation = 0;
3932         cfqd->hw_tag = -1;
3933         /*
3934          * we optimistically start assuming sync ops weren't delayed in last
3935          * second, in order to have larger depth for async operations.
3936          */
3937         cfqd->last_delayed_sync = jiffies - HZ;
3938         return cfqd;
3939 }
3940
3941 static void cfq_slab_kill(void)
3942 {
3943         /*
3944          * Caller already ensured that pending RCU callbacks are completed,
3945          * so we should have no busy allocations at this point.
3946          */
3947         if (cfq_pool)
3948                 kmem_cache_destroy(cfq_pool);
3949         if (cfq_ioc_pool)
3950                 kmem_cache_destroy(cfq_ioc_pool);
3951 }
3952
3953 static int __init cfq_slab_setup(void)
3954 {
3955         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3956         if (!cfq_pool)
3957                 goto fail;
3958
3959         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3960         if (!cfq_ioc_pool)
3961                 goto fail;
3962
3963         return 0;
3964 fail:
3965         cfq_slab_kill();
3966         return -ENOMEM;
3967 }
3968
3969 /*
3970  * sysfs parts below -->
3971  */
3972 static ssize_t
3973 cfq_var_show(unsigned int var, char *page)
3974 {
3975         return sprintf(page, "%d\n", var);
3976 }
3977
3978 static ssize_t
3979 cfq_var_store(unsigned int *var, const char *page, size_t count)
3980 {
3981         char *p = (char *) page;
3982
3983         *var = simple_strtoul(p, &p, 10);
3984         return count;
3985 }
3986
3987 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3988 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3989 {                                                                       \
3990         struct cfq_data *cfqd = e->elevator_data;                       \
3991         unsigned int __data = __VAR;                                    \
3992         if (__CONV)                                                     \
3993                 __data = jiffies_to_msecs(__data);                      \
3994         return cfq_var_show(__data, (page));                            \
3995 }
3996 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3997 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3998 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3999 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4000 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4001 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4002 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4003 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4004 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4005 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4006 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4007 SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
4008 #undef SHOW_FUNCTION
4009
4010 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4011 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4012 {                                                                       \
4013         struct cfq_data *cfqd = e->elevator_data;                       \
4014         unsigned int __data;                                            \
4015         int ret = cfq_var_store(&__data, (page), count);                \
4016         if (__data < (MIN))                                             \
4017                 __data = (MIN);                                         \
4018         else if (__data > (MAX))                                        \
4019                 __data = (MAX);                                         \
4020         if (__CONV)                                                     \
4021                 *(__PTR) = msecs_to_jiffies(__data);                    \
4022         else                                                            \
4023                 *(__PTR) = __data;                                      \
4024         return ret;                                                     \
4025 }
4026 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4027 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4028                 UINT_MAX, 1);
4029 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4030                 UINT_MAX, 1);
4031 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4032 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4033                 UINT_MAX, 0);
4034 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4035 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4036 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4037 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4038 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4039                 UINT_MAX, 0);
4040 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4041 STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
4042 #undef STORE_FUNCTION
4043
4044 #define CFQ_ATTR(name) \
4045         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4046
4047 static struct elv_fs_entry cfq_attrs[] = {
4048         CFQ_ATTR(quantum),
4049         CFQ_ATTR(fifo_expire_sync),
4050         CFQ_ATTR(fifo_expire_async),
4051         CFQ_ATTR(back_seek_max),
4052         CFQ_ATTR(back_seek_penalty),
4053         CFQ_ATTR(slice_sync),
4054         CFQ_ATTR(slice_async),
4055         CFQ_ATTR(slice_async_rq),
4056         CFQ_ATTR(slice_idle),
4057         CFQ_ATTR(group_idle),
4058         CFQ_ATTR(low_latency),
4059         CFQ_ATTR(group_isolation),
4060         __ATTR_NULL
4061 };
4062
4063 static struct elevator_type iosched_cfq = {
4064         .ops = {
4065                 .elevator_merge_fn =            cfq_merge,
4066                 .elevator_merged_fn =           cfq_merged_request,
4067                 .elevator_merge_req_fn =        cfq_merged_requests,
4068                 .elevator_allow_merge_fn =      cfq_allow_merge,
4069                 .elevator_bio_merged_fn =       cfq_bio_merged,
4070                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4071                 .elevator_add_req_fn =          cfq_insert_request,
4072                 .elevator_activate_req_fn =     cfq_activate_request,
4073                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4074                 .elevator_queue_empty_fn =      cfq_queue_empty,
4075                 .elevator_completed_req_fn =    cfq_completed_request,
4076                 .elevator_former_req_fn =       elv_rb_former_request,
4077                 .elevator_latter_req_fn =       elv_rb_latter_request,
4078                 .elevator_set_req_fn =          cfq_set_request,
4079                 .elevator_put_req_fn =          cfq_put_request,
4080                 .elevator_may_queue_fn =        cfq_may_queue,
4081                 .elevator_init_fn =             cfq_init_queue,
4082                 .elevator_exit_fn =             cfq_exit_queue,
4083                 .trim =                         cfq_free_io_context,
4084         },
4085         .elevator_attrs =       cfq_attrs,
4086         .elevator_name =        "cfq",
4087         .elevator_owner =       THIS_MODULE,
4088 };
4089
4090 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4091 static struct blkio_policy_type blkio_policy_cfq = {
4092         .ops = {
4093                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
4094                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4095         },
4096         .plid = BLKIO_POLICY_PROP,
4097 };
4098 #else
4099 static struct blkio_policy_type blkio_policy_cfq;
4100 #endif
4101
4102 static int __init cfq_init(void)
4103 {
4104         /*
4105          * could be 0 on HZ < 1000 setups
4106          */
4107         if (!cfq_slice_async)
4108                 cfq_slice_async = 1;
4109         if (!cfq_slice_idle)
4110                 cfq_slice_idle = 1;
4111
4112 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4113         if (!cfq_group_idle)
4114                 cfq_group_idle = 1;
4115 #else
4116                 cfq_group_idle = 0;
4117 #endif
4118         if (cfq_slab_setup())
4119                 return -ENOMEM;
4120
4121         elv_register(&iosched_cfq);
4122         blkio_policy_register(&blkio_policy_cfq);
4123
4124         return 0;
4125 }
4126
4127 static void __exit cfq_exit(void)
4128 {
4129         DECLARE_COMPLETION_ONSTACK(all_gone);
4130         blkio_policy_unregister(&blkio_policy_cfq);
4131         elv_unregister(&iosched_cfq);
4132         ioc_gone = &all_gone;
4133         /* ioc_gone's update must be visible before reading ioc_count */
4134         smp_wmb();
4135
4136         /*
4137          * this also protects us from entering cfq_slab_kill() with
4138          * pending RCU callbacks
4139          */
4140         if (elv_ioc_count_read(cfq_ioc_count))
4141                 wait_for_completion(&all_gone);
4142         ida_destroy(&cic_index_ida);
4143         cfq_slab_kill();
4144 }
4145
4146 module_init(cfq_init);
4147 module_exit(cfq_exit);
4148
4149 MODULE_AUTHOR("Jens Axboe");
4150 MODULE_LICENSE("GPL");
4151 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");