- patches.fixes/blk-dont-invoke-request_fn-on-stopped-queue:
authorHannes Reinecke <hare@suse.de>
Tue, 16 Dec 2008 15:23:20 +0000 (16:23 +0100)
committerHannes Reinecke <hare@suse.de>
Tue, 16 Dec 2008 15:23:20 +0000 (16:23 +0100)
  block: only call ->request_fn when the queue is not stopped
  (bnc#457041).
- patches.fixes/blk-get-extra-reference-before-unmap: block:
  hold extra reference to bio in blk_rq_map_user_iov().
- patches.fixes/blk-move-unplug_work-init: block: move
  q->unplug_work initialization.
- patches.fixes/blk-set-segment-boundary-mask: block: fix setting
  of max_segment_size and seg_boundary mask.
- patches.fixes/dm-mpath-send-activate-to-every-path: Handle
  multiple paths in a path group properly during pg_init.
- patches.fixes/scsi_dh-add-lsi-ids-to-rdac: Adding LSI vendor
  and product IDs to RDAC device handler.
- patches.suse/dm-mpath-accept-failed-paths: Accept failed paths
  for multipath maps (bnc#458037,bnc#458393).
- patches.suse/scsi-check-removed-device-for-offline: Check if
  device is removed in scsi_device_online() (bnc#456747).

suse-commit: b0f0790a5267f439df8d367f69cad10cca23e2dc

block/blk-core.c
block/blk-map.c
block/blk-settings.c
block/elevator.c
drivers/md/dm-mpath.c
drivers/md/dm-mpath.h
drivers/md/dm-table.c
drivers/md/dm.c
drivers/scsi/device_handler/scsi_dh_rdac.c
include/linux/blkdev.h
include/scsi/scsi_device.h

index 88b71ae..f3bc8a5 100644 (file)
@@ -324,6 +324,9 @@ EXPORT_SYMBOL(blk_unplug);
 
 static void blk_invoke_request_fn(struct request_queue *q)
 {
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
        /*
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
@@ -399,8 +402,13 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    See @blk_run_queue. This variant must be called with the queue lock
+ *    held and interrupts disabled.
+ *
  */
 void __blk_run_queue(struct request_queue *q)
 {
@@ -418,6 +426,12 @@ EXPORT_SYMBOL(__blk_run_queue);
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on this queue, if it has pending work to do.
+ *    May be used to restart queueing when a request has completed. Also
+ *    See @blk_start_queueing.
+ *
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -501,6 +515,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        init_timer(&q->unplug_timer);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->timeout_list);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -578,7 +593,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                                   1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -884,7 +899,8 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * This is basically a helper to remove the need to know whether a queue
  * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
  *
  * The queue lock must be held with interrupts disabled.
  */
index ea1bf53..e6620e9 100644 (file)
@@ -202,6 +202,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                return PTR_ERR(bio);
 
        if (bio->bi_size != len) {
+               /*
+                * Grab an extra reference to this bio, as bio_unmap_user()
+                * expects to be able to drop it twice as it happens on the
+                * normal IO completion path
+                */
+               bio_get(bio);
                bio_endio(bio, 0);
                bio_unmap_user(bio);
                return -EINVAL;
index 5edd818..a1d0888 100644 (file)
@@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->nr_requests = BLKDEV_MAX_RQ;
        blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
        blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
+       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
        q->make_request_fn = mfn;
        q->backing_dev_info.ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -141,8 +144,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        if (q->unplug_delay == 0)
                q->unplug_delay = 1;
 
-       INIT_WORK(&q->unplug_work, blk_unplug_work);
-
        q->unplug_timer.function = blk_unplug_timeout;
        q->unplug_timer.data = (unsigned long)q;
 
@@ -316,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        /* zero is "infinity" */
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
 
        t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
        t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
index 3c6973b..f70e963 100644 (file)
@@ -612,7 +612,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   processing.
                 */
                blk_remove_plug(q);
-               q->request_fn(q);
+               blk_start_queueing(q);
                break;
 
        case ELEVATOR_INSERT_SORT:
@@ -943,7 +943,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
                    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
-                       q->request_fn(q);
+                       blk_start_queueing(q);
                }
        }
 }
@@ -1102,8 +1102,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        elv_drain_elevator(q);
 
        while (q->rq.elvpriv) {
-               blk_remove_plug(q);
-               q->request_fn(q);
+               blk_start_queueing(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
index 7786afe..0f28805 100644 (file)
@@ -33,6 +33,7 @@ struct pgpath {
 
        struct dm_path path;
        struct work_struct deactivate_path;
+       struct work_struct activate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -62,8 +63,6 @@ struct multipath {
        spinlock_t lock;
 
        const char *hw_handler_name;
-       struct work_struct activate_path;
-       struct pgpath *pgpath_to_activate;
        unsigned nr_priority_groups;
        struct list_head priority_groups;
        unsigned pg_init_required;      /* pg_init needs calling? */
@@ -126,6 +125,7 @@ static struct pgpath *alloc_pgpath(void)
        if (pgpath) {
                pgpath->is_active = 1;
                INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+               INIT_WORK(&pgpath->activate_path, activate_path);
        }
 
        return pgpath;
@@ -166,10 +166,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
        list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
                list_del(&pgpath->list);
                dm_put_device(ti, pgpath->path.dev);
-               spin_lock_irqsave(&m->lock, flags);
-               if (m->pgpath_to_activate == pgpath)
-                       m->pgpath_to_activate = NULL;
-               spin_unlock_irqrestore(&m->lock, flags);
                free_pgpath(pgpath);
        }
 }
@@ -200,7 +196,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                m->queue_io = 1;
                INIT_WORK(&m->process_queued_ios, process_queued_ios);
                INIT_WORK(&m->trigger_event, trigger_event);
-               INIT_WORK(&m->activate_path, activate_path);
                m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
@@ -259,6 +254,11 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 
        m->current_pgpath = path_to_pgpath(path);
 
+       if (!m->current_pgpath->path.dev) {
+               m->current_pgpath = NULL;
+               return -ENODEV;
+       }
+
        if (m->current_pg != pg)
                __switch_pg(m, m->current_pgpath);
 
@@ -436,8 +436,8 @@ static void process_queued_ios(struct work_struct *work)
 {
        struct multipath *m =
                container_of(work, struct multipath, process_queued_ios);
-       struct pgpath *pgpath = NULL;
-       unsigned init_required = 0, must_queue = 1;
+       struct pgpath *pgpath = NULL, *tmp;
+       unsigned must_queue = 1;
        unsigned long flags;
 
        spin_lock_irqsave(&m->lock, flags);
@@ -455,19 +455,15 @@ static void process_queued_ios(struct work_struct *work)
                must_queue = 0;
 
        if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
-               m->pgpath_to_activate = pgpath;
                m->pg_init_count++;
                m->pg_init_required = 0;
-               m->pg_init_in_progress = 1;
-               init_required = 1;
+               list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
+                       queue_work(kmpath_handlerd, &tmp->activate_path);
+                       m->pg_init_in_progress++;
+               }
        }
-
 out:
        spin_unlock_irqrestore(&m->lock, flags);
-
-       if (init_required)
-               queue_work(kmpath_handlerd, &m->activate_path);
-
        if (!must_queue)
                dispatch_queued_ios(m);
 }
@@ -601,10 +597,11 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
 
                /* Try to add a failed device */
                if (sscanf(path, "%u:%u", &major, &minor) == 2) {
+                       dev_t dev;
+
                        /* Extract the major/minor numbers */
-                       p->path.pdev = MKDEV(major, minor);
-                       if (MAJOR(p->path.pdev) != major ||
-                           MINOR(p->path.pdev) != minor) {
+                       dev = MKDEV(major, minor);
+                       if (MAJOR(dev) != major || MINOR(dev) != minor) {
                                /* Nice try, didn't work */
                                DMWARN("Invalid device path %s", path);
                                ti->error = "error converting devnum";
@@ -612,11 +609,14 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
                        }
                        DMWARN("adding disabled device %d:%d", major, minor);
                        p->path.dev = NULL;
+                       format_dev_t(p->path.pdev, dev);
                        p->is_active = 0;
                } else {
                        ti->error = "error getting device";
                        goto bad;
                }
+       } else {
+               memcpy(p->path.pdev, p->path.dev->name, 16);
        }
 
        if (m->hw_handler_name && p->path.dev) {
@@ -910,7 +910,7 @@ static int fail_path(struct pgpath *pgpath)
        if (!pgpath->is_active)
                goto out;
 
-       DMWARN("Failing path %s.", pgpath->path.dev->name);
+       DMWARN("Failing path %s.", pgpath->path.pdev);
 
        pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
        pgpath->is_active = 0;
@@ -921,9 +921,8 @@ static int fail_path(struct pgpath *pgpath)
        if (pgpath == m->current_pgpath)
                m->current_pgpath = NULL;
 
-       if (pgpath->path.dev)
-               dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
-                              pgpath->path.dev->name, m->nr_valid_paths);
+       dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
+                      pgpath->path.pdev, m->nr_valid_paths);
 
        queue_work(kmultipathd, &m->trigger_event);
        queue_work(kmultipathd, &pgpath->deactivate_path);
@@ -949,8 +948,7 @@ static int reinstate_path(struct pgpath *pgpath)
                goto out;
 
        if (!pgpath->path.dev) {
-               DMWARN("Cannot reinstate disabled path %d:%d",
-                      MAJOR(pgpath->path.pdev), MINOR(pgpath->path.pdev));
+               DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
                r = -ENODEV;
                goto out;
        }
@@ -973,7 +971,7 @@ static int reinstate_path(struct pgpath *pgpath)
                queue_work(kmultipathd, &m->process_queued_ios);
 
        dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
-                     pgpath->path.dev->name, m->nr_valid_paths);
+                      pgpath->path.pdev, m->nr_valid_paths);
 
        queue_work(kmultipathd, &m->trigger_event);
 
@@ -1157,27 +1155,21 @@ static void pg_init_done(struct dm_path *path, int errors)
                pg->bypassed = 0;
        }
 
-       m->pg_init_in_progress = 0;
-       queue_work(kmultipathd, &m->process_queued_ios);
-       spin_unlock_irqrestore(&m->lock, flags);
+       m->pg_init_in_progress--;
+       if (!m->pg_init_in_progress)
+              queue_work(kmultipathd, &m->process_queued_ios);
+       spin_unlock_irqrestore(&m->lock, flags);
 }
 
 static void activate_path(struct work_struct *work)
 {
-       int ret;
-       struct multipath *m =
-               container_of(work, struct multipath, activate_path);
-       struct dm_path *path;
-       unsigned long flags;
+       int ret = SCSI_DH_DEV_OFFLINED;
+       struct pgpath *pgpath =
+               container_of(work, struct pgpath, activate_path);
 
-       spin_lock_irqsave(&m->lock, flags);
-       path = &m->pgpath_to_activate->path;
-       m->pgpath_to_activate = NULL;
-       spin_unlock_irqrestore(&m->lock, flags);
-       if (!path)
-               return;
-       ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
-       pg_init_done(path, ret);
+       if (pgpath->path.dev)
+               ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
+       pg_init_done(&pgpath->path, ret);
 }
 
 /*
@@ -1343,16 +1335,9 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
                               pg->ps.type->info_args);
 
                        list_for_each_entry(p, &pg->pgpaths, list) {
-                               if (p->path.dev) {
-                                       DMEMIT("%s %s %u ", p->path.dev->name,
-                                              p->is_active ? "A" : "F",
-                                              p->fail_count);
-                               } else {
-                                       DMEMIT("%d:%d F %u ",
-                                              MAJOR(p->path.pdev),
-                                              MINOR(p->path.pdev),
-                                              p->fail_count);
-                               }
+                               DMEMIT("%s %s %u ", p->path.pdev,
+                                      p->is_active ? "A" : "F",
+                                      p->fail_count);
                                if (pg->ps.type->status)
                                        sz += pg->ps.type->status(&pg->ps,
                                              &p->path, type, result + sz,
@@ -1376,12 +1361,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
                               pg->ps.type->table_args);
 
                        list_for_each_entry(p, &pg->pgpaths, list) {
-                               if (p->path.dev)
-                                       DMEMIT("%s ", p->path.dev->name);
-                               else
-                                       DMEMIT("%d:%d ",
-                                              MAJOR(p->path.pdev),
-                                              MINOR(p->path.pdev));
+                               DMEMIT("%s ", p->path.pdev);
                                if (pg->ps.type->status)
                                        sz += pg->ps.type->status(&pg->ps,
                                              &p->path, type, result + sz,
index 8c0a252..f97388d 100644 (file)
@@ -12,7 +12,7 @@
 struct dm_dev;
 
 struct dm_path {
-       dev_t pdev;             /* Requested physical device */
+       char pdev[16];          /* Requested physical device */
        struct dm_dev *dev;     /* Read-only */
        void *pscontext;        /* For path-selector use */
 };
index ee61b82..413765b 100644 (file)
@@ -690,7 +690,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
        if (!rs->max_segment_size)
                rs->max_segment_size = MAX_SEGMENT_SIZE;
        if (!rs->seg_boundary_mask)
-               rs->seg_boundary_mask = -1;
+               rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        if (!rs->bounce_pfn)
                rs->bounce_pfn = -1;
 }
index 6783f91..134995e 100644 (file)
@@ -1503,6 +1503,9 @@ static void map_request(struct dm_target *ti, struct request *rq,
        tio->ti = ti;
        atomic_inc(&md->pending);
 
+#if 0
+       /* This might trigger accidentally */
+
        /*
         * Although submitted requests to the md->queue are checked against
         * the table/queue limitations at the submission time, the limitations
@@ -1525,6 +1528,7 @@ static void map_request(struct dm_target *ti, struct request *rq,
                dm_kill_request(clone, r);
                return;
        }
+#endif
 
        r = ti->type->map_rq(ti, clone, &tio->info);
        switch (r) {
index 67e5e52..f25a03e 100644 (file)
@@ -602,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
        {"SUN", "LCSM100_F", 0},
        {"DELL", "MD3000", 0},
        {"DELL", "MD3000i", 0},
+       {"LSI", "INF-01-00"},
+       {"ENGENIO", "INF-01-00"},
        {NULL, NULL, 0},
 };
 
index a6e0f7d..0ace19d 100644 (file)
@@ -908,6 +908,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
 
 #define MAX_SEGMENT_SIZE       65536
 
+#define BLK_SEG_BOUNDARY_MASK  0xFFFFFFFFUL
+
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
 static inline int queue_hardsect_size(struct request_queue *q)
index e96daf6..63b58d2 100644 (file)
@@ -401,7 +401,9 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
  */
 static inline int scsi_device_online(struct scsi_device *sdev)
 {
-       return sdev->sdev_state != SDEV_OFFLINE;
+       return (sdev->sdev_state != SDEV_OFFLINE &&
+               sdev->sdev_state != SDEV_CANCEL &&
+               sdev->sdev_state != SDEV_DEL);
 }
 static inline int scsi_device_blocked(struct scsi_device *sdev)
 {