block: simplify ioc_release_fn()
authorTejun Heo <tj@kernel.org>
Wed, 15 Feb 2012 08:45:52 +0000 (09:45 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 15 Feb 2012 08:45:52 +0000 (09:45 +0100)
Reverse double lock dancing in ioc_release_fn() can be simplified by
just using trylock on the queue_lock and back out from ioc lock on
trylock failure.  Simplify it.

Signed-off-by: Tejun Heo <tj@kernel.org>
Tested-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

block/blk-ioc.c

index 811879c..f53c80e 100644 (file)
@@ -79,7 +79,6 @@ static void ioc_release_fn(struct work_struct *work)
 {
        struct io_context *ioc = container_of(work, struct io_context,
                                              release_work);
-       struct request_queue *last_q = NULL;
        unsigned long flags;
 
        /*
@@ -93,44 +92,19 @@ static void ioc_release_fn(struct work_struct *work)
        while (!hlist_empty(&ioc->icq_list)) {
                struct io_cq *icq = hlist_entry(ioc->icq_list.first,
                                                struct io_cq, ioc_node);
-               struct request_queue *this_q = icq->q;
-
-               if (this_q != last_q) {
-                       /*
-                        * Need to switch to @this_q.  Once we release
-                        * @ioc->lock, it can go away along with @cic.
-                        * Hold on to it.
-                        */
-                       __blk_get_queue(this_q);
-
-                       /*
-                        * blk_put_queue() might sleep thanks to kobject
-                        * idiocy.  Always release both locks, put and
-                        * restart.
-                        */
-                       if (last_q) {
-                               spin_unlock(last_q->queue_lock);
-                               spin_unlock_irqrestore(&ioc->lock, flags);
-                               blk_put_queue(last_q);
-                       } else {
-                               spin_unlock_irqrestore(&ioc->lock, flags);
-                       }
-
-                       last_q = this_q;
-                       spin_lock_irqsave(this_q->queue_lock, flags);
-                       spin_lock_nested(&ioc->lock, 1);
-                       continue;
+               struct request_queue *q = icq->q;
+
+               if (spin_trylock(q->queue_lock)) {
+                       ioc_exit_icq(icq);
+                       spin_unlock(q->queue_lock);
+               } else {
+                       spin_unlock_irqrestore(&ioc->lock, flags);
+                       cpu_relax();
+                       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
                }
-               ioc_exit_icq(icq);
        }
 
-       if (last_q) {
-               spin_unlock(last_q->queue_lock);
-               spin_unlock_irqrestore(&ioc->lock, flags);
-               blk_put_queue(last_q);
-       } else {
-               spin_unlock_irqrestore(&ioc->lock, flags);
-       }
+       spin_unlock_irqrestore(&ioc->lock, flags);
 
        kmem_cache_free(iocontext_cachep, ioc);
 }