Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / block / cfq-iosched.c
index f3b44c3..3c38536 100644 (file)
@@ -295,6 +295,7 @@ struct cfq_data {
        unsigned int cfq_slice_idle;
        unsigned int cfq_group_idle;
        unsigned int cfq_latency;
+       unsigned int cfq_target_latency;
 
        /*
         * Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
-       return cfq_target_latency * cfqg->weight / st->total_weight;
+       return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
 static inline unsigned
@@ -1656,6 +1657,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
                    struct request *next)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
        /*
         * reposition in fifo if next is older than rq
         */
@@ -1670,6 +1673,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        cfq_remove_request(next);
        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(next), rq_is_sync(next));
+
+       cfqq = RQ_CFQQ(next);
+       /*
+        * all requests of this queue are merged to other queues, delete it
+        * from the service tree. If it's the active_queue,
+        * cfq_dispatch_requests() will choose to expire it or do idle
+        */
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+           cfqq != cfqd->active_queue)
+               cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1687,18 +1700,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 
        /*
         * Lookup the cfqq that this bio will be queued with and allow
-        * merge only if rq is queued there.  This function can be called
-        * from plug merge without queue_lock.  In such cases, ioc of @rq
-        * and %current are guaranteed to be equal.  Avoid lookup which
-        * requires queue_lock by using @rq's cic.
+        * merge only if rq is queued there.
         */
-       if (current->io_context == RQ_CIC(rq)->icq.ioc) {
-               cic = RQ_CIC(rq);
-       } else {
-               cic = cfq_cic_lookup(cfqd, current->io_context);
-               if (!cic)
-                       return false;
-       }
+       cic = cfq_cic_lookup(cfqd, current->io_context);
+       if (!cic)
+               return false;
 
        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
        return cfqq == RQ_CFQQ(rq);
@@ -1782,7 +1788,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                cfqd->active_queue = NULL;
 
        if (cfqd->active_cic) {
-               put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
+               put_io_context(cfqd->active_cic->icq.ioc);
                cfqd->active_cic = NULL;
        }
 }
@@ -2266,7 +2272,8 @@ new_workload:
                 * to have higher weight. A more accurate thing would be to
                 * calculate system wide asnc/sync ratio.
                 */
-               tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+               tmp = cfqd->cfq_target_latency *
+                       cfqg_busy_async_queues(cfqd, cfqg);
                tmp = tmp/cfqd->busy_queues;
                slice = min_t(unsigned, slice, tmp);
 
@@ -3105,7 +3112,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       struct cfq_queue *old_cfqq = cfqd->active_queue;
+       enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
 
        cfq_log_cfqq(cfqd, cfqq, "preempt");
        cfq_slice_expired(cfqd, 1);
@@ -3114,7 +3121,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * workload type is changed, don't save slice, otherwise preempt
         * doesn't happen
         */
-       if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+       if (old_type != cfqq_type(cfqq))
                cfqq->cfqg->saved_workload_slice = 0;
 
        /*
@@ -3465,20 +3472,20 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
        const int rw = rq_data_dir(rq);
        const bool is_sync = rq_is_sync(rq);
        struct cfq_queue *cfqq;
+       unsigned int changed;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
        spin_lock_irq(q->queue_lock);
 
        /* handle changed notifications */
-       if (unlikely(cic->icq.changed)) {
-               if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
-                       changed_ioprio(cic);
+       changed = icq_get_changed(&cic->icq);
+       if (unlikely(changed & ICQ_IOPRIO_CHANGED))
+               changed_ioprio(cic);
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-               if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
-                       changed_cgroup(cic);
+       if (unlikely(changed & ICQ_CGROUP_CHANGED))
+               changed_cgroup(cic);
 #endif
-       }
 
 new_queue:
        cfqq = cic_to_cfqq(cic, is_sync);
@@ -3732,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_back_penalty = cfq_back_penalty;
        cfqd->cfq_slice[0] = cfq_slice_async;
        cfqd->cfq_slice[1] = cfq_slice_sync;
+       cfqd->cfq_target_latency = cfq_target_latency;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
        cfqd->cfq_group_idle = cfq_group_idle;
@@ -3783,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -3816,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -3833,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_idle),
        CFQ_ATTR(group_idle),
        CFQ_ATTR(low_latency),
+       CFQ_ATTR(target_latency),
        __ATTR_NULL
 };