static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 100;
+static int cfq_slice_idle = HZ / 70;
#define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
*/
struct hlist_head *crq_hash;
+ unsigned int max_queued;
+
mempool_t *crq_pool;
int rq_in_driver;
{
struct cfq_queue *cfqq = cfqd->active_queue;
- if (cfqq) {
- if (cfq_cfqq_on_rr(cfqq) && cfqd->busy_queues == 1)
- return;
+ if (cfqq)
__cfq_slice_expired(cfqd, cfqq, preempted);
- }
}
/*
n = rb_first(&ioc->cic_root);
while (n != NULL) {
cic = rb_entry(n, struct cfq_io_context, rb_node);
-
+
changed_ioprio(cic);
n = rb_next(n);
}
int enable_idle = cfq_cfqq_idle_window(cfqq);
if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
- enable_idle = 0;
+ enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
enable_idle = 0;
*/
if (!cfq_crq_is_sync(crq)) {
/*
- * sync process issued async request, if it's waiting
+ * sync process issued an async request, if it's waiting
* then expire it and kick rq handling.
*/
if (cic == cfqd->active_cic &&
__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct task_struct *task, int rw)
{
+#if 1
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
!cfq_cfqq_must_alloc_slice(cfqq)) {
cfq_mark_cfqq_must_alloc_slice(cfqq);
}
return ELV_MQUEUE_MAY;
+#else
+ if (!cfqq || task->flags & PF_MEMALLOC)
+ return ELV_MQUEUE_MAY;
+ if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
+ if (cfq_cfqq_wait_request(cfqq))
+ return ELV_MQUEUE_MUST;
+
+ /*
+ * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
+ * can quickly flood the queue with writes from a single task
+ */
+ if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
+ return ELV_MQUEUE_MUST;
+ }
+
+ return ELV_MQUEUE_MAY;
+ }
+ if (cfq_class_idle(cfqq))
+ return ELV_MQUEUE_NO;
+ if (cfqq->allocated[rw] >= cfqd->max_queued) {
+ struct io_context *ioc = get_io_context(GFP_ATOMIC);
+ int ret = ELV_MQUEUE_NO;
+
+ if (ioc && ioc->nr_batch_requests)
+ ret = ELV_MQUEUE_MAY;
+
+ put_io_context(ioc);
+ return ret;
+ }
+
+ return ELV_MQUEUE_MAY;
+#endif
}
static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct request_list *rl = &q->rq;
- if (unlikely(cfqd->rq_starved)) {
- struct request_list *rl = &q->rq;
-
+ if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
smp_mb();
if (waitqueue_active(&rl->wait[READ]))
wake_up(&rl->wait[READ]);
+ }
+
+ if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
+ smp_mb();
if (waitqueue_active(&rl->wait[WRITE]))
wake_up(&rl->wait[WRITE]);
}
if (!cfqd->cfq_hash)
goto out_cfqhash;
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+ cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
cfqd->queue = q;
+ cfqd->max_queued = q->nr_requests / 4;
+ q->nr_batching = cfq_queued;
+
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;