- patches.suse/dm-mpath-requeue-for-stopped-queue: Handle I/O
authorHannes Reinecke <hare@suse.de>
Fri, 23 Jan 2009 14:59:52 +0000 (15:59 +0100)
committerHannes Reinecke <hare@suse.de>
Fri, 23 Jan 2009 14:59:52 +0000 (15:59 +0100)
  on stopped queues correctly (bnc#458393).

suse-commit: e6fd06af79fe4db63cea6b5aeace2b366808881f

drivers/md/dm-mpath.c
drivers/md/dm.c

index 3926859..8e241ab 100644 (file)
@@ -159,9 +159,7 @@ static struct priority_group *alloc_priority_group(void)
 
 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 {
-       unsigned long flags;
        struct pgpath *pgpath, *tmp;
-       struct multipath *m = ti->private;
 
        list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
                list_del(&pgpath->list);
@@ -436,8 +434,8 @@ static void process_queued_ios(struct work_struct *work)
 {
        struct multipath *m =
                container_of(work, struct multipath, process_queued_ios);
-       struct pgpath *pgpath = NULL, *tmp;
-       unsigned must_queue = 1;
+       struct pgpath *pgpath = NULL, *tmp;
+       unsigned must_queue = 1;
        unsigned long flags;
 
        spin_lock_irqsave(&m->lock, flags);
@@ -450,6 +448,12 @@ static void process_queued_ios(struct work_struct *work)
 
        pgpath = m->current_pgpath;
 
+       if (pgpath) {
+               struct block_device *bdev = pgpath->path.dev->bdev;
+               if (unlikely(blk_queue_stopped(bdev_get_queue(bdev))))
+                       goto out;
+       }
+
        if ((pgpath && !m->queue_io) ||
            (!pgpath && !m->queue_if_no_path))
                must_queue = 0;
@@ -619,22 +623,27 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
                memcpy(p->path.pdev, p->path.dev->name, 16);
        }
 
-       if (m->hw_handler_name && p->path.dev) {
+       if (p->path.dev) {
                struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
 
-               r = scsi_dh_attach(q, m->hw_handler_name);
-               if (r == -EBUSY) {
-                       /*
-                        * Already attached to different hw_handler,
-                        * try to reattach with correct one.
-                        */
-                       scsi_dh_detach(q);
+               if (m->hw_handler_name) {
                        r = scsi_dh_attach(q, m->hw_handler_name);
-               }
-               if (r < 0) {
-                       ti->error = "error attaching hardware handler";
-                       dm_put_device(ti, p->path.dev);
-                       goto bad;
+                       if (r == -EBUSY) {
+                               /*
+                                * Already attached to different hw_handler,
+                                * try to reattach with correct one.
+                                */
+                               scsi_dh_detach(q);
+                               r = scsi_dh_attach(q, m->hw_handler_name);
+                       }
+                       if (r < 0) {
+                               ti->error = "error attaching hardware handler";
+                               dm_put_device(ti, p->path.dev);
+                               goto bad;
+                       }
+               } else {
+                       /* Play safe and detach hardware handler */
+                       scsi_dh_detach(q);
                }
        }
 
@@ -644,6 +653,11 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
                goto bad;
        }
 
+       if (!p->is_active) {
+               ps->type->fail_path(ps, &p->path);
+               p->fail_count++;
+               m->nr_valid_paths--;
+       }
        return p;
 
  bad:
index c57693e..8cb452f 100644 (file)
@@ -1304,7 +1304,11 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
                return 0;
        }
 
-       if (unlikely(!md->map)) {
+       /*
+        * Submitting to a stopped queue with no map is okay;
+        * might happen during reconfiguration.
+        */
+       if (unlikely(!md->map) && !blk_queue_stopped(q)) {
                bio_endio(bio, -EIO);
                return 0;
        }
@@ -1503,9 +1507,6 @@ static void map_request(struct dm_target *ti, struct request *rq,
        tio->ti = ti;
        atomic_inc(&md->pending);
 
-#if 0
-       /* This might trigger accidentally */
-
        /*
         * Although submitted requests to the md->queue are checked against
         * the table/queue limitations at the submission time, the limitations
@@ -1525,10 +1526,12 @@ static void map_request(struct dm_target *ti, struct request *rq,
        if (unlikely(r)) {
                DMWARN("violating the queue limitation. the limitation may be"
                       " shrunk while there are some requests in the queue.");
+#if 0
+               /* This might trigger accidentally */
                dm_kill_request(clone, r);
                return;
-       }
 #endif
+       }
 
        r = ti->type->map_rq(ti, clone, &tio->info);
        switch (r) {