- Update to 3.4-rc7.
[linux-flexiantxendom0-3.2.10.git] / drivers / md / dm-mpath.c
index cb7f9a0..6195bff 100644 (file)
@@ -226,6 +226,27 @@ static void free_multipath(struct multipath *m)
        kfree(m);
 }
 
+static int set_mapinfo(struct multipath *m, union map_info *info)
+{
+       struct dm_mpath_io *mpio;
+
+       mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+       if (!mpio)
+               return -ENOMEM;
+
+       memset(mpio, 0, sizeof(*mpio));
+       info->ptr = mpio;
+
+       return 0;
+}
+
+static void clear_mapinfo(struct multipath *m, union map_info *info)
+{
+       struct dm_mpath_io *mpio = info->ptr;
+
+       info->ptr = NULL;
+       mempool_free(mpio, m->mpio_pool);
+}
 
 /*-----------------------------------------------
  * Path selection
@@ -278,6 +299,11 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 
        m->current_pgpath = path_to_pgpath(path);
 
+       if (!m->current_pgpath->path.dev) {
+               m->current_pgpath = NULL;
+               return -ENODEV;
+       }
+
        if (m->current_pg != pg)
                __switch_pg(m, m->current_pgpath);
 
@@ -341,13 +367,14 @@ static int __must_push_back(struct multipath *m)
 }
 
 static int map_io(struct multipath *m, struct request *clone,
-                 struct dm_mpath_io *mpio, unsigned was_queued)
+                 union map_info *map_context, unsigned was_queued)
 {
        int r = DM_MAPIO_REMAPPED;
        size_t nr_bytes = blk_rq_bytes(clone);
        unsigned long flags;
        struct pgpath *pgpath;
        struct block_device *bdev;
+       struct dm_mpath_io *mpio = map_context->ptr;
 
        spin_lock_irqsave(&m->lock, flags);
 
@@ -423,7 +450,6 @@ static void dispatch_queued_ios(struct multipath *m)
 {
        int r;
        unsigned long flags;
-       struct dm_mpath_io *mpio;
        union map_info *info;
        struct request *clone, *n;
        LIST_HEAD(cl);
@@ -436,16 +462,15 @@ static void dispatch_queued_ios(struct multipath *m)
                list_del_init(&clone->queuelist);
 
                info = dm_get_rq_mapinfo(clone);
-               mpio = info->ptr;
 
-               r = map_io(m, clone, mpio, 1);
+               r = map_io(m, clone, info, 1);
                if (r < 0) {
-                       mempool_free(mpio, m->mpio_pool);
+                       clear_mapinfo(m, info);
                        dm_kill_unmapped_request(clone, r);
                } else if (r == DM_MAPIO_REMAPPED)
                        dm_dispatch_request(clone);
                else if (r == DM_MAPIO_REQUEUE) {
-                       mempool_free(mpio, m->mpio_pool);
+                       clear_mapinfo(m, info);
                        dm_requeue_unmapped_request(clone);
                }
        }
@@ -545,6 +570,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 {
        int r;
        struct pgpath *p;
+       char *path;
        struct multipath *m = ti->private;
 
        /* we need at least a path arg */
@@ -557,30 +583,57 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        if (!p)
                return ERR_PTR(-ENOMEM);
 
-       r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+       path = dm_shift_arg(as);
+       r = dm_get_device(ti, path, dm_table_get_mode(ti->table),
                          &p->path.dev);
        if (r) {
-               ti->error = "error getting device";
-               goto bad;
+               unsigned major, minor;
+
+               /* Try to add a failed device */
+               if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
+                       dev_t dev;
+
+                       /* Extract the major/minor numbers */
+                       dev = MKDEV(major, minor);
+                       if (MAJOR(dev) != major || MINOR(dev) != minor) {
+                               /* Nice try, didn't work */
+                               DMWARN("Invalid device path %s", path);
+                               ti->error = "error converting devnum";
+                               goto bad;
+                       }
+                       DMWARN("adding disabled device %d:%d", major, minor);
+                       p->path.dev = NULL;
+                       format_dev_t(p->path.pdev, dev);
+                       p->is_active = 0;
+               } else {
+                       ti->error = "error getting device";
+                       goto bad;
+               }
+       } else {
+               memcpy(p->path.pdev, p->path.dev->name, 16);
        }
 
-       if (m->hw_handler_name) {
+       if (p->path.dev) {
                struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
 
-               r = scsi_dh_attach(q, m->hw_handler_name);
-               if (r == -EBUSY) {
-                       /*
-                        * Already attached to different hw_handler,
-                        * try to reattach with correct one.
-                        */
-                       scsi_dh_detach(q);
+               if (m->hw_handler_name) {
                        r = scsi_dh_attach(q, m->hw_handler_name);
-               }
-
-               if (r < 0) {
-                       ti->error = "error attaching hardware handler";
-                       dm_put_device(ti, p->path.dev);
-                       goto bad;
+                       if (r == -EBUSY) {
+                               /*
+                                * Already attached to different hw_handler,
+                                * try to reattach with correct one.
+                                */
+                               scsi_dh_detach(q);
+                               r = scsi_dh_attach(q, m->hw_handler_name);
+                       }
+                       if (r < 0) {
+                               ti->error = "error attaching hardware handler";
+                               dm_put_device(ti, p->path.dev);
+                               goto bad;
+                       }
+               } else {
+                       /* Play safe and detach hardware handler */
+                       scsi_dh_detach(q);
                }
 
                if (m->hw_handler_params) {
@@ -601,6 +654,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
                goto bad;
        }
 
+       if (!p->is_active) {
+               ps->type->fail_path(ps, &p->path);
+               p->fail_count++;
+               m->nr_valid_paths--;
+       }
        return p;
 
  bad:
@@ -698,8 +756,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
                return 0;
 
        m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
-       request_module("scsi_dh_%s", m->hw_handler_name);
-       if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
+       if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
+                                    "scsi_dh_%s", m->hw_handler_name)) {
                ti->error = "unknown hardware handler type";
                ret = -EINVAL;
                goto fail;
@@ -912,20 +970,16 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
                         union map_info *map_context)
 {
        int r;
-       struct dm_mpath_io *mpio;
        struct multipath *m = (struct multipath *) ti->private;
 
-       mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
-       if (!mpio)
+       if (set_mapinfo(m, map_context) < 0)
                /* ENOMEM, requeue */
                return DM_MAPIO_REQUEUE;
-       memset(mpio, 0, sizeof(*mpio));
 
-       map_context->ptr = mpio;
        clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
-       r = map_io(m, clone, mpio, 0);
+       r = map_io(m, clone, map_context, 0);
        if (r < 0 || r == DM_MAPIO_REQUEUE)
-               mempool_free(mpio, m->mpio_pool);
+               clear_mapinfo(m, map_context);
 
        return r;
 }
@@ -943,7 +997,7 @@ static int fail_path(struct pgpath *pgpath)
        if (!pgpath->is_active)
                goto out;
 
-       DMWARN("Failing path %s.", pgpath->path.dev->name);
+       DMWARN("Failing path %s.", pgpath->path.pdev);
 
        pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
        pgpath->is_active = 0;
@@ -955,7 +1009,7 @@ static int fail_path(struct pgpath *pgpath)
                m->current_pgpath = NULL;
 
        dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
-                     pgpath->path.dev->name, m->nr_valid_paths);
+                      pgpath->path.pdev, m->nr_valid_paths);
 
        schedule_work(&m->trigger_event);
 
@@ -979,6 +1033,12 @@ static int reinstate_path(struct pgpath *pgpath)
        if (pgpath->is_active)
                goto out;
 
+       if (!pgpath->path.dev) {
+               DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
+               r = -ENODEV;
+               goto out;
+       }
+
        if (!pgpath->pg->ps.type->reinstate_path) {
                DMWARN("Reinstate path not supported by path selector %s",
                       pgpath->pg->ps.type->name);
@@ -1001,7 +1061,7 @@ static int reinstate_path(struct pgpath *pgpath)
        }
 
        dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
-                     pgpath->path.dev->name, m->nr_valid_paths);
+                      pgpath->path.pdev, m->nr_valid_paths);
 
        schedule_work(&m->trigger_event);
 
@@ -1021,6 +1081,9 @@ static int action_dev(struct multipath *m, struct dm_dev *dev,
        struct pgpath *pgpath;
        struct priority_group *pg;
 
+       if (!dev)
+               return 0;
+
        list_for_each_entry(pg, &m->priority_groups, list) {
                list_for_each_entry(pgpath, &pg->pgpaths, list) {
                        if (pgpath->path.dev == dev)
@@ -1058,8 +1121,9 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
        struct priority_group *pg;
        unsigned pgnum;
        unsigned long flags;
+       char dummy;
 
-       if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+       if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
            (pgnum > m->nr_priority_groups)) {
                DMWARN("invalid PG number supplied to switch_pg_num");
                return -EINVAL;
@@ -1089,8 +1153,9 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
 {
        struct priority_group *pg;
        unsigned pgnum;
+       char dummy;
 
-       if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+       if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
            (pgnum > m->nr_priority_groups)) {
                DMWARN("invalid PG number supplied to bypass_pg");
                return -EINVAL;
@@ -1214,8 +1279,9 @@ static void activate_path(struct work_struct *work)
        struct pgpath *pgpath =
                container_of(work, struct pgpath, activate_path.work);
 
-       scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
-                               pg_init_done, pgpath);
+       if (pgpath->path.dev)
+               scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+                                pg_init_done, pgpath);
 }
 
 /*
@@ -1271,13 +1337,15 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
        struct path_selector *ps;
        int r;
 
+       BUG_ON(!mpio);
+
        r  = do_end_io(m, clone, error, mpio);
        if (pgpath) {
                ps = &pgpath->pg->ps;
                if (ps->type->end_io)
                        ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
        }
-       mempool_free(mpio, m->mpio_pool);
+       clear_mapinfo(m, map_context);
 
        return r;
 }
@@ -1403,7 +1471,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
                               pg->ps.type->info_args);
 
                        list_for_each_entry(p, &pg->pgpaths, list) {
-                               DMEMIT("%s %s %u ", p->path.dev->name,
+                               DMEMIT("%s %s %u ", p->path.pdev,
                                       p->is_active ? "A" : "F",
                                       p->fail_count);
                                if (pg->ps.type->status)
@@ -1429,7 +1497,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
                               pg->ps.type->table_args);
 
                        list_for_each_entry(p, &pg->pgpaths, list) {
-                               DMEMIT("%s ", p->path.dev->name);
+                               DMEMIT("%s ", p->path.pdev);
                                if (pg->ps.type->status)
                                        sz += pg->ps.type->status(&pg->ps,
                                              &p->path, type, result + sz,
@@ -1521,7 +1589,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
        if (!m->current_pgpath)
                __choose_pgpath(m, 0);
 
-       if (m->current_pgpath) {
+       if (m->current_pgpath && m->current_pgpath->path.dev) {
                bdev = m->current_pgpath->path.dev->bdev;
                mode = m->current_pgpath->path.dev->mode;
        }
@@ -1533,6 +1601,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
 
        spin_unlock_irqrestore(&m->lock, flags);
 
+       /*
+        * Only pass ioctls through if the device sizes match exactly.
+        */
+       if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+               r = scsi_verify_blk_ioctl(NULL, cmd);
+
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }