UBUNTU: Ubuntu-2.6.38-12.51
[linux-flexiantxendom0-natty.git] / block / cfq-iosched.c
index 4cb4cf7..ea83a4f 100644 (file)
@@ -207,7 +207,7 @@ struct cfq_group {
        struct blkio_group blkg;
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        struct hlist_node cfqd_node;
-       atomic_t ref;
+       int ref;
 #endif
        /* number of requests that are on the dispatch list or inside driver */
        int dispatched;
@@ -598,8 +598,8 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
        return cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static inline unsigned
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
        if (cfqd->cfq_latency) {
@@ -625,6 +625,14 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                                    low_slice);
                }
        }
+       return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+
        cfqq->slice_start = jiffies;
        cfqq->slice_end = jiffies + slice;
        cfqq->allocated_slice = slice;
@@ -1014,11 +1022,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
         * elevator which will be dropped by either elevator exit
         * or cgroup deletion path depending on who is exiting first.
         */
-       atomic_set(&cfqg->ref, 1);
+       cfqg->ref = 1;
 
        /*
         * Add group onto cgroup list. It might happen that bdi->dev is
-        * not initiliazed yet. Initialize this new group without major
+        * not initialized yet. Initialize this new group without major
         * and minor info and this info will be filled in once a new thread
         * comes for IO. See code above.
         */
@@ -1059,7 +1067,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
 
 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
 {
-       atomic_inc(&cfqg->ref);
+       cfqg->ref++;
        return cfqg;
 }
 
@@ -1071,7 +1079,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
 
        cfqq->cfqg = cfqg;
        /* cfqq reference on cfqg */
-       atomic_inc(&cfqq->cfqg->ref);
+       cfqq->cfqg->ref++;
 }
 
 static void cfq_put_cfqg(struct cfq_group *cfqg)
@@ -1079,8 +1087,9 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
        struct cfq_rb_root *st;
        int i, j;
 
-       BUG_ON(atomic_read(&cfqg->ref) <= 0);
-       if (!atomic_dec_and_test(&cfqg->ref))
+       BUG_ON(cfqg->ref <= 0);
+       cfqg->ref--;
+       if (cfqg->ref)
                return;
        for_each_cfqg_st(cfqg, i, j, st)
                BUG_ON(!RB_EMPTY_ROOT(&st->rb));
@@ -1188,7 +1197,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        cfq_group_service_tree_del(cfqd, cfqq->cfqg);
                cfqq->orig_cfqg = cfqq->cfqg;
                cfqq->cfqg = &cfqd->root_group;
-               atomic_inc(&cfqd->root_group.ref);
+               cfqd->root_group.ref++;
                group_changed = 1;
        } else if (!cfqd->cfq_group_isolation
                   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
@@ -1660,8 +1669,11 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        /*
         * store what was left of this slice, if the queue idled/timed out
         */
-       if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
-               cfqq->slice_resid = cfqq->slice_end - jiffies;
+       if (timed_out) {
+               if (cfq_cfqq_slice_new(cfqq))
+                       cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
+               else
+                       cfqq->slice_resid = cfqq->slice_end - jiffies;
                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
        }
 
@@ -3283,10 +3295,19 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
+       struct cfq_queue *old_cfqq = cfqd->active_queue;
+
        cfq_log_cfqq(cfqd, cfqq, "preempt");
        cfq_slice_expired(cfqd, 1);
 
        /*
+        * workload type is changed, don't save slice, otherwise preempt
+        * doesn't happen
+        */
+       if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+               cfqq->cfqg->saved_workload_slice = 0;
+
+       /*
         * Put the new queue at the front of the of the current list,
         * so we know that it will be selected next.
         */
@@ -3334,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue);
+                               __blk_run_queue(cfqd->queue, false);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3349,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue);
+               __blk_run_queue(cfqd->queue, false);
        }
 }
 
@@ -3411,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        struct cfq_io_context *cic = cfqd->active_cic;
 
+       /* If the queue already has requests, don't wait */
+       if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+               return false;
+
        /* If there are other queues in the group, don't wait */
        if (cfqq->cfqg->nr_cfqq > 1)
                return false;
@@ -3681,12 +3706,12 @@ new_queue:
 
        cfqq->allocated[rw]++;
        cfqq->ref++;
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
        rq->elevator_private = cic;
        rq->elevator_private2 = cfqq;
        rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
        return 0;
 
 queue_fail:
@@ -3706,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue);
+       __blk_run_queue(cfqd->queue, false);
        spin_unlock_irq(q->queue_lock);
 }
 
@@ -3884,7 +3909,7 @@ static void *cfq_init_queue(struct request_queue *q)
         * Take a reference to root group which we never drop. This is just
         * to make sure that cfq_put_cfqg() does not try to kfree root group
         */
-       atomic_set(&cfqg->ref, 1);
+       cfqg->ref = 1;
        rcu_read_lock();
        cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
                                        (void *)cfqd, 0);