kfree(m);
}
+static int set_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio;
+
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ return -ENOMEM;
+
+ memset(mpio, 0, sizeof(*mpio));
+ info->ptr = mpio;
+
+ return 0;
+}
+
+static void clear_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio = info->ptr;
+
+ info->ptr = NULL;
+ mempool_free(mpio, m->mpio_pool);
+}
/*-----------------------------------------------
* Path selection
m->current_pgpath = path_to_pgpath(path);
+ if (!m->current_pgpath->path.dev) {
+ m->current_pgpath = NULL;
+ return -ENODEV;
+ }
+
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
}
static int map_io(struct multipath *m, struct request *clone,
- struct dm_mpath_io *mpio, unsigned was_queued)
+ union map_info *map_context, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
struct block_device *bdev;
+ struct dm_mpath_io *mpio = map_context->ptr;
spin_lock_irqsave(&m->lock, flags);
{
int r;
unsigned long flags;
- struct dm_mpath_io *mpio;
union map_info *info;
struct request *clone, *n;
LIST_HEAD(cl);
list_del_init(&clone->queuelist);
info = dm_get_rq_mapinfo(clone);
- mpio = info->ptr;
- r = map_io(m, clone, mpio, 1);
+ r = map_io(m, clone, info, 1);
if (r < 0) {
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, info);
dm_kill_unmapped_request(clone, r);
} else if (r == DM_MAPIO_REMAPPED)
dm_dispatch_request(clone);
else if (r == DM_MAPIO_REQUEUE) {
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, info);
dm_requeue_unmapped_request(clone);
}
}
{
int r;
struct pgpath *p;
+ char *path;
struct multipath *m = ti->private;
/* we need at least a path arg */
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ path = dm_shift_arg(as);
+ r = dm_get_device(ti, path, dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
- ti->error = "error getting device";
- goto bad;
+ unsigned major, minor;
+
+ /* Try to add a failed device */
+ if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
+ dev_t dev;
+
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor) {
+ /* Nice try, didn't work */
+ DMWARN("Invalid device path %s", path);
+ ti->error = "error converting devnum";
+ goto bad;
+ }
+ DMWARN("adding disabled device %d:%d", major, minor);
+ p->path.dev = NULL;
+ format_dev_t(p->path.pdev, dev);
+ p->is_active = 0;
+ } else {
+ ti->error = "error getting device";
+ goto bad;
+ }
+ } else {
+ memcpy(p->path.pdev, p->path.dev->name, 16);
}
- if (m->hw_handler_name) {
+ if (p->path.dev) {
struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
- r = scsi_dh_attach(q, m->hw_handler_name);
- if (r == -EBUSY) {
- /*
- * Already attached to different hw_handler,
- * try to reattach with correct one.
- */
- scsi_dh_detach(q);
+ if (m->hw_handler_name) {
r = scsi_dh_attach(q, m->hw_handler_name);
- }
-
- if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+ if (r < 0) {
+ ti->error = "error attaching hardware handler";
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ } else {
+ /* Play safe and detach hardware handler */
+ scsi_dh_detach(q);
}
if (m->hw_handler_params) {
goto bad;
}
+ if (!p->is_active) {
+ ps->type->fail_path(ps, &p->path);
+ p->fail_count++;
+ m->nr_valid_paths--;
+ }
return p;
bad:
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
- request_module("scsi_dh_%s", m->hw_handler_name);
- if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
+ if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
+ "scsi_dh_%s", m->hw_handler_name)) {
ti->error = "unknown hardware handler type";
ret = -EINVAL;
goto fail;
union map_info *map_context)
{
int r;
- struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
- if (!mpio)
+ if (set_mapinfo(m, map_context) < 0)
/* ENOMEM, requeue */
return DM_MAPIO_REQUEUE;
- memset(mpio, 0, sizeof(*mpio));
- map_context->ptr = mpio;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- r = map_io(m, clone, mpio, 0);
+ r = map_io(m, clone, map_context, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("Failing path %s.", pgpath->path.pdev);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = 0;
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
schedule_work(&m->trigger_event);
if (pgpath->is_active)
goto out;
+ if (!pgpath->path.dev) {
+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
+ r = -ENODEV;
+ goto out;
+ }
+
if (!pgpath->pg->ps.type->reinstate_path) {
DMWARN("Reinstate path not supported by path selector %s",
pgpath->pg->ps.type->name);
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
schedule_work(&m->trigger_event);
struct pgpath *pgpath;
struct priority_group *pg;
+ if (!dev)
+ return 0;
+
list_for_each_entry(pg, &m->priority_groups, list) {
list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->path.dev == dev)
struct priority_group *pg;
unsigned pgnum;
unsigned long flags;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
{
struct priority_group *pg;
unsigned pgnum;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->path.dev)
+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+ pg_init_done, pgpath);
}
/*
struct path_selector *ps;
int r;
+ BUG_ON(!mpio);
+
r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s %s %u ", p->path.dev->name,
+ DMEMIT("%s %s %u ", p->path.pdev,
p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s ", p->path.dev->name);
+ DMEMIT("%s ", p->path.pdev);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
if (!m->current_pgpath)
__choose_pgpath(m, 0);
- if (m->current_pgpath) {
+ if (m->current_pgpath && m->current_pgpath->path.dev) {
bdev = m->current_pgpath->path.dev->bdev;
mode = m->current_pgpath->path.dev->mode;
}
spin_unlock_irqrestore(&m->lock, flags);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}