* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
-#include "dm-bio-list.h"
-#include "dm-bio-record.h"
#include "dm-uevent.h"
#include <linux/ctype.h>
#include <linux/time.h>
#include <linux/workqueue.h>
#include <scsi/scsi_dh.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath"
-#define MESG_STR(x) x, sizeof(x)
+#define DM_PG_INIT_DELAY_MSECS 2000
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
/* Path properties */
struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
+ unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
+ struct delayed_work activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
struct list_head pgpaths;
};
+#define FEATURE_NO_PARTITIONS 1
+
/* Multipath context */
struct multipath {
struct list_head list;
spinlock_t lock;
const char *hw_handler_name;
- struct work_struct activate_path;
+ char *hw_handler_params;
+
unsigned nr_priority_groups;
struct list_head priority_groups;
+
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
+ unsigned pg_init_delay_retry; /* Delay pg_init retry? */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
unsigned saved_queue_if_no_path;/* Saved state during suspension */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
+ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned features; /* Additional selected features */
struct work_struct process_queued_ios;
- struct bio_list queued_bios;
- struct list_head queued_reqs;
+ struct list_head queued_ios;
unsigned queue_size;
struct work_struct trigger_event;
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
+
+ struct mutex work_mutex;
};
/*
*/
struct dm_mpath_io {
struct pgpath *pgpath;
- struct dm_bio_details details;
+ size_t nr_bytes;
};
typedef int (*action_fn) (struct pgpath *pgpath);
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath)
- pgpath->path.is_active = 1;
+ if (pgpath) {
+ pgpath->is_active = 1;
+ INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+ }
return pgpath;
}
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
- struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
- if (m->hw_handler_name)
- scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
- INIT_LIST_HEAD(&m->queued_reqs);
+ INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
- INIT_WORK(&m->activate_path, activate_path);
+ init_waitqueue_head(&m->pg_init_wait);
+ mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
}
kfree(m->hw_handler_name);
+ kfree(m->hw_handler_params);
mempool_destroy(m->mpio_pool);
kfree(m);
}
+static int set_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio;
+
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ return -ENOMEM;
+
+ memset(mpio, 0, sizeof(*mpio));
+ info->ptr = mpio;
+
+ return 0;
+}
+
+static void clear_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio = info->ptr;
+
+ info->ptr = NULL;
+ mempool_free(mpio, m->mpio_pool);
+}
/*-----------------------------------------------
* Path selection
*-----------------------------------------------*/
+static void __pg_init_all_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+ unsigned long pg_init_delay = 0;
+
+ m->pg_init_count++;
+ m->pg_init_required = 0;
+ if (m->pg_init_delay_retry)
+ pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
+ m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
+ list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
+ /* Skip failed paths */
+ if (!pgpath->is_active)
+ continue;
+ if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
+ pg_init_delay))
+ m->pg_init_in_progress++;
+ }
+}
+
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
m->current_pg = pgpath->pg;
m->pg_init_count = 0;
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
+static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
+ size_t nr_bytes)
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
+ path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
m->current_pgpath = path_to_pgpath(path);
+ if (!m->current_pgpath->path.dev) {
+ m->current_pgpath = NULL;
+ return -ENODEV;
+ }
+
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
return 0;
}
-static void __choose_pgpath(struct multipath *m)
+static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
unsigned bypassed = 1;
if (m->next_pg) {
pg = m->next_pg;
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
+ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
return;
/*
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
} while (bypassed--);
dm_noflush_suspending(m->ti));
}
-static int map_bio(struct multipath *m, struct bio *bio,
- struct dm_mpath_io *mpio, unsigned was_queued)
-{
- int r = DM_MAPIO_REMAPPED;
- unsigned long flags;
- struct pgpath *pgpath;
-
- spin_lock_irqsave(&m->lock, flags);
-
- /* Do we need to select a new pgpath? */
- if (!m->current_pgpath ||
- (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
- __choose_pgpath(m);
-
- pgpath = m->current_pgpath;
-
- if (was_queued)
- m->queue_size--;
-
- if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
- /* Queue for the daemon to resubmit */
- bio_list_add(&m->queued_bios, bio);
- m->queue_size++;
- if ((m->pg_init_required && !m->pg_init_in_progress) ||
- !m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
- pgpath = NULL;
- r = DM_MAPIO_SUBMITTED;
- } else if (pgpath)
- bio->bi_bdev = pgpath->path.dev->bdev;
- else if (__must_push_back(m))
- r = DM_MAPIO_REQUEUE;
- else
- r = -EIO; /* Failed */
-
- mpio->pgpath = pgpath;
-
- spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
-}
-
-static int map_req(struct multipath *m, struct request *clone,
- struct dm_mpath_io *mpio, unsigned was_queued)
+static int map_io(struct multipath *m, struct request *clone,
+ union map_info *map_context, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
+ size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
struct block_device *bdev;
+ struct dm_mpath_io *mpio = map_context->ptr;
spin_lock_irqsave(&m->lock, flags);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
- __choose_pgpath(m);
+ __choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
- list_add_tail(&clone->queuelist, &m->queued_reqs);
+ list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
- clone->q = NULL;
- clone->rq_disk = NULL;
r = DM_MAPIO_SUBMITTED;
} else if (pgpath) {
bdev = pgpath->path.dev->bdev;
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
- } else if (__must_push_back(m)) {
- clone->q = NULL;
- clone->rq_disk = NULL;
+ } else if (__must_push_back(m))
r = DM_MAPIO_REQUEUE;
- } else {
- clone->q = NULL;
- clone->rq_disk = NULL;
+ else
r = -EIO; /* Failed */
- }
mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
+ nr_bytes);
spin_unlock_irqrestore(&m->lock, flags);
* The multipath daemon is responsible for resubmitting queued ios.
*---------------------------------------------------------------*/
-static void dispatch_queued_bios(struct multipath *m)
-{
- int r;
- unsigned long flags;
- struct bio *bio = NULL, *next;
- struct dm_mpath_io *mpio;
- union map_info *info;
-
- spin_lock_irqsave(&m->lock, flags);
- bio = bio_list_get(&m->queued_bios);
- spin_unlock_irqrestore(&m->lock, flags);
-
- while (bio) {
- next = bio->bi_next;
- bio->bi_next = NULL;
-
- info = dm_get_mapinfo(bio);
- mpio = info->ptr;
-
- r = map_bio(m, bio, mpio, 1);
- if (r < 0)
- bio_endio(bio, r);
- else if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
- else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, -EIO);
-
- bio = next;
- }
-}
-
-static void dispatch_queued_reqs(struct multipath *m)
+static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
- struct dm_mpath_io *mpio;
union map_info *info;
struct request *clone, *n;
LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
- list_splice_init(&m->queued_reqs, &cl);
+ list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
list_for_each_entry_safe(clone, n, &cl, queuelist) {
list_del_init(&clone->queuelist);
info = dm_get_rq_mapinfo(clone);
- mpio = info->ptr;
-
- r = map_req(m, clone, mpio, 1);
- if (r < 0 || r == DM_MAPIO_REQUEUE) {
- mempool_free(mpio, m->mpio_pool);
- if (r == DM_MAPIO_REQUEUE)
- r = DM_ENDIO_REQUEUE;
- dm_end_request(clone, r);
+
+ r = map_io(m, clone, info, 1);
+ if (r < 0) {
+ clear_mapinfo(m, info);
+ dm_kill_unmapped_request(clone, r);
} else if (r == DM_MAPIO_REMAPPED)
dm_dispatch_request(clone);
+ else if (r == DM_MAPIO_REQUEUE) {
+ clear_mapinfo(m, info);
+ dm_requeue_unmapped_request(clone);
+ }
}
}
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
struct pgpath *pgpath = NULL;
- unsigned init_required = 0, must_queue = 1;
+ unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
goto out;
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
pgpath = m->current_pgpath;
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress) {
- m->pg_init_count++;
- m->pg_init_required = 0;
- m->pg_init_in_progress = 1;
- init_required = 1;
- }
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ __pg_init_all_paths(m);
out:
spin_unlock_irqrestore(&m->lock, flags);
-
- if (init_required)
- queue_work(kmpath_handlerd, &m->activate_path);
-
- if (!must_queue) {
- if (dm_table_request_based(m->ti->table))
- dispatch_queued_reqs(m);
- else
- dispatch_queued_bios(m);
- }
+ if (!must_queue)
+ dispatch_queued_ios(m);
}
/*
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
-struct param {
- unsigned min;
- unsigned max;
- char *error;
-};
-
-static int read_param(struct param *param, char *str, unsigned *v, char **error)
-{
- if (!str ||
- (sscanf(str, "%u", v) != 1) ||
- (*v < param->min) ||
- (*v > param->max)) {
- *error = param->error;
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct arg_set {
- unsigned argc;
- char **argv;
-};
-
-static char *shift(struct arg_set *as)
-{
- char *r;
-
- if (as->argc) {
- as->argc--;
- r = *as->argv;
- as->argv++;
- return r;
- }
-
- return NULL;
-}
-
-static void consume(struct arg_set *as, unsigned n)
-{
- BUG_ON (as->argc < n);
- as->argc -= n;
- as->argv += n;
-}
-
-static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
+static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
};
- pst = dm_get_path_selector(shift(as));
+ pst = dm_get_path_selector(dm_shift_arg(as));
if (!pst) {
ti->error = "unknown path selector type";
return -EINVAL;
}
- r = read_param(_params, shift(as), &ps_argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
if (r) {
dm_put_path_selector(pst);
return -EINVAL;
}
pg->ps.type = pst;
- consume(as, ps_argc);
+ dm_consume_args(as, ps_argc);
return 0;
}
-static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
struct dm_target *ti)
{
int r;
struct pgpath *p;
+ char *path;
struct multipath *m = ti->private;
/* we need at least a path arg */
if (as->argc < 1) {
ti->error = "no device given";
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = alloc_pgpath();
if (!p)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), ti->begin, ti->len,
- dm_table_get_mode(ti->table), &p->path.dev);
+ path = dm_shift_arg(as);
+ r = dm_get_device(ti, path, dm_table_get_mode(ti->table),
+ &p->path.dev);
if (r) {
- ti->error = "error getting device";
- goto bad;
+ unsigned major, minor;
+
+ /* Try to add a failed device */
+ if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
+ dev_t dev;
+
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor) {
+ /* Nice try, didn't work */
+ DMWARN("Invalid device path %s", path);
+ ti->error = "error converting devnum";
+ goto bad;
+ }
+ DMWARN("adding disabled device %d:%d", major, minor);
+ p->path.dev = NULL;
+ format_dev_t(p->path.pdev, dev);
+ p->is_active = 0;
+ } else {
+ ti->error = "error getting device";
+ goto bad;
+ }
+ } else {
+ memcpy(p->path.pdev, p->path.dev->name, 16);
}
- if (m->hw_handler_name) {
- r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
- m->hw_handler_name);
- if (r < 0) {
- dm_put_device(ti, p->path.dev);
- goto bad;
+ if (p->path.dev) {
+ struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+
+ if (m->hw_handler_name) {
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+ if (r < 0) {
+ ti->error = "error attaching hardware handler";
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ } else {
+ /* Play safe and detach hardware handler */
+ scsi_dh_detach(q);
+ }
+
+ if (m->hw_handler_params) {
+ r = scsi_dh_set_params(q, m->hw_handler_params);
+ if (r < 0) {
+ ti->error = "unable to set hardware "
+ "handler parameters";
+ scsi_dh_detach(q);
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
}
}
goto bad;
}
+ if (!p->is_active) {
+ ps->type->fail_path(ps, &p->path);
+ p->fail_count++;
+ m->nr_valid_paths--;
+ }
return p;
bad:
free_pgpath(p);
- return NULL;
+ return ERR_PTR(r);
}
-static struct priority_group *parse_priority_group(struct arg_set *as,
+static struct priority_group *parse_priority_group(struct dm_arg_set *as,
struct multipath *m)
{
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{1, 1024, "invalid number of paths"},
{0, 1024, "invalid number of selector args"}
};
int r;
- unsigned i, nr_selector_args, nr_params;
+ unsigned i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
if (as->argc < 2) {
as->argc = 0;
- ti->error = "not enough priority group aruments";
- return NULL;
+ ti->error = "not enough priority group arguments";
+ return ERR_PTR(-EINVAL);
}
pg = alloc_priority_group();
if (!pg) {
ti->error = "couldn't allocate priority group";
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pg->m = m;
/*
* read the paths
*/
- r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
+ r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
+ r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
if (r)
goto bad;
- nr_params = 1 + nr_selector_args;
+ nr_args = 1 + nr_selector_args;
for (i = 0; i < pg->nr_pgpaths; i++) {
struct pgpath *pgpath;
- struct arg_set path_args;
+ struct dm_arg_set path_args;
- if (as->argc < nr_params) {
+ if (as->argc < nr_args) {
ti->error = "not enough path parameters";
+ r = -EINVAL;
goto bad;
}
- path_args.argc = nr_params;
+ path_args.argc = nr_args;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
- if (!pgpath)
+ if (IS_ERR(pgpath)) {
+ r = PTR_ERR(pgpath);
goto bad;
+ }
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
- consume(as, nr_params);
+ dm_consume_args(as, nr_args);
}
return pg;
bad:
free_priority_group(pg, ti);
- return NULL;
+ return ERR_PTR(r);
}
-static int parse_hw_handler(struct arg_set *as, struct multipath *m)
+static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
+ int ret;
struct dm_target *ti = m->ti;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
- if (read_param(_params, shift(as), &hw_argc, &ti->error))
+ if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
- m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
- request_module("scsi_dh_%s", m->hw_handler_name);
- if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
+ m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+ if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
+ "scsi_dh_%s", m->hw_handler_name)) {
ti->error = "unknown hardware handler type";
- kfree(m->hw_handler_name);
- m->hw_handler_name = NULL;
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (hw_argc > 1) {
+ char *p;
+ int i, j, len = 4;
+
+ for (i = 0; i <= hw_argc - 2; i++)
+ len += strlen(as->argv[i]) + 1;
+ p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
+ if (!p) {
+ ti->error = "memory allocation failed";
+ ret = -ENOMEM;
+ goto fail;
+ }
+ j = sprintf(p, "%d", hw_argc - 1);
+ for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
+ j = sprintf(p, "%s", as->argv[i]);
}
- consume(as, hw_argc - 1);
+ dm_consume_args(as, hw_argc - 1);
return 0;
+fail:
+ kfree(m->hw_handler_name);
+ m->hw_handler_name = NULL;
+ return ret;
}
-static int parse_features(struct arg_set *as, struct multipath *m)
+static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
struct dm_target *ti = m->ti;
- const char *param_name;
+ const char *arg_name;
- static struct param _params[] = {
- {0, 4, "invalid number of feature args"},
+ static struct dm_arg _args[] = {
+ {0, 5, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
+ {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
- r = read_param(_params, shift(as), &argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
return 0;
do {
- param_name = shift(as);
+ arg_name = dm_shift_arg(as);
argc--;
- if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(arg_name, "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
+ if (!strcasecmp(arg_name, "no_partitions")) {
+ m->features |= FEATURE_NO_PARTITIONS;
+ continue;
+ }
+ if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
- r = read_param(_params + 1, shift(as),
- &m->pg_init_retries, &ti->error);
+ r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
argc--;
continue;
}
- if (!strnicmp(param_name, MESG_STR("rq_based"))) {
- dm_table_set_request_based(ti->table);
+ if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
+ (argc >= 1)) {
+ r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
+ argc--;
continue;
}
static int multipath_ctr(struct dm_target *ti, unsigned int argc,
char **argv)
{
- /* target parameters */
- static struct param _params[] = {
- {1, 1024, "invalid number of priority groups"},
- {1, 1024, "invalid initial priority group number"},
+ /* target arguments */
+ static struct dm_arg _args[] = {
+ {0, 1024, "invalid number of priority groups"},
+ {0, 1024, "invalid initial priority group number"},
};
int r;
struct multipath *m;
- struct arg_set as;
+ struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
if (r)
goto bad;
- r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
+ r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
if (r)
goto bad;
+ if ((!m->nr_priority_groups && next_pg_num) ||
+ (m->nr_priority_groups && !next_pg_num)) {
+ ti->error = "invalid initial priority group";
+ r = -EINVAL;
+ goto bad;
+ }
+
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
pg = parse_priority_group(&as, m);
- if (!pg) {
- r = -EINVAL;
+ if (IS_ERR(pg)) {
+ r = PTR_ERR(pg);
goto bad;
}
goto bad;
}
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
+
return 0;
bad:
return r;
}
-static void multipath_dtr(struct dm_target *ti)
+static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
- struct multipath *m = (struct multipath *) ti->private;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+
+ add_wait_queue(&m->pg_init_wait, &wait);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&m->lock, flags);
+ if (!m->pg_init_in_progress) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ io_schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ remove_wait_queue(&m->pg_init_wait, &wait);
+}
+
+static void flush_multipath_work(struct multipath *m)
+{
flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
- free_multipath(m);
+ flush_work_sync(&m->trigger_event);
}
-/*
- * Map cloned requests
- */
-static int multipath_map_bio(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static void multipath_dtr(struct dm_target *ti)
{
- int r;
- struct dm_mpath_io *mpio;
- struct multipath *m = (struct multipath *) ti->private;
-
- mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
- dm_bio_record(&mpio->details, bio);
-
- map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
- r = map_bio(m, bio, mpio, 0);
- if (r < 0 || r == DM_MAPIO_REQUEUE)
- mempool_free(mpio, m->mpio_pool);
+ struct multipath *m = ti->private;
- return r;
+ flush_multipath_work(m);
+ free_multipath(m);
}
-static int multipath_map_req(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
+/*
+ * Map cloned requests
+ */
+static int multipath_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
{
int r;
- struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
- if (!mpio)
+ if (set_mapinfo(m, map_context) < 0)
/* ENOMEM, requeue */
return DM_MAPIO_REQUEUE;
- memset(mpio, 0, sizeof(*mpio));
-
- map_context->ptr = mpio;
- clone->cmd_flags |= REQ_FAILFAST;
- r = map_req(m, clone, mpio, 0);
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ r = map_io(m, clone, map_context, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
spin_lock_irqsave(&m->lock, flags);
- if (!pgpath->path.is_active)
+ if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("Failing path %s.", pgpath->path.pdev);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
- pgpath->path.is_active = 0;
+ pgpath->is_active = 0;
pgpath->fail_count++;
m->nr_valid_paths--;
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
- queue_work(kmultipathd, &m->trigger_event);
+ schedule_work(&m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
spin_lock_irqsave(&m->lock, flags);
- if (pgpath->path.is_active)
+ if (pgpath->is_active)
goto out;
+ if (!pgpath->path.dev) {
+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
+ r = -ENODEV;
+ goto out;
+ }
+
if (!pgpath->pg->ps.type->reinstate_path) {
DMWARN("Reinstate path not supported by path selector %s",
pgpath->pg->ps.type->name);
if (r)
goto out;
- pgpath->path.is_active = 1;
+ pgpath->is_active = 1;
- m->current_pgpath = NULL;
- if (!m->nr_valid_paths++ && m->queue_size)
+ if (!m->nr_valid_paths++ && m->queue_size) {
+ m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
+ } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
+ m->pg_init_in_progress++;
+ }
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
- queue_work(kmultipathd, &m->trigger_event);
+ schedule_work(&m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
static int action_dev(struct multipath *m, struct dm_dev *dev,
action_fn action)
{
- int r = 0;
+ int r = -EINVAL;
struct pgpath *pgpath;
struct priority_group *pg;
+ if (!dev)
+ return 0;
+
list_for_each_entry(pg, &m->priority_groups, list) {
list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->path.dev == dev)
spin_unlock_irqrestore(&m->lock, flags);
- queue_work(kmultipathd, &m->trigger_event);
+ schedule_work(&m->trigger_event);
}
/*
struct priority_group *pg;
unsigned pgnum;
unsigned long flags;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
spin_unlock_irqrestore(&m->lock, flags);
- queue_work(kmultipathd, &m->trigger_event);
+ schedule_work(&m->trigger_event);
return 0;
}
{
struct priority_group *pg;
unsigned pgnum;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
return limit_reached;
}
-static void pg_init_done(struct dm_path *path, int errors)
+static void pg_init_done(void *data, int errors)
{
- struct pgpath *pgpath = path_to_pgpath(path);
+ struct pgpath *pgpath = data;
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
+ unsigned delay_retry = 0;
/* device or driver problems */
switch (errors) {
errors = 0;
break;
}
- DMERR("Cannot failover device because scsi_dh_%s was not "
- "loaded.", m->hw_handler_name);
+ DMERR("Count not failover device %s: Handler scsi_dh_%s "
+ "was not loaded.", pgpath->path.dev->name,
+ m->hw_handler_name);
/*
* Fail path for now, so we do not ping pong
*/
*/
bypass_pg(m, pg, 1);
break;
- /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
+ case SCSI_DH_DEV_OFFLINED:
+ DMWARN("Device %s offlined.", pgpath->path.dev->name);
+ errors = 0;
+ break;
case SCSI_DH_RETRY:
+ /* Wait before retrying. */
+ delay_retry = 1;
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
spin_lock_irqsave(&m->lock, flags);
if (errors) {
- DMERR("Could not failover device. Error %d.", errors);
- m->current_pgpath = NULL;
- m->current_pg = NULL;
- } else if (!m->pg_init_required) {
- m->queue_io = 0;
+ if (pgpath == m->current_pgpath) {
+ DMERR("Could not failover device %s, error %d.",
+ pgpath->path.dev->name, errors);
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
+ } else if (!m->pg_init_required)
pg->bypassed = 0;
- }
- m->pg_init_in_progress = 0;
+ if (--m->pg_init_in_progress)
+ /* Activations of other paths are still on going */
+ goto out;
+
+ if (!m->pg_init_required)
+ m->queue_io = 0;
+
+ m->pg_init_delay_retry = delay_retry;
queue_work(kmultipathd, &m->process_queued_ios);
+
+ /*
+ * Wake up any thread waiting to suspend.
+ */
+ wake_up(&m->pg_init_wait);
+
+out:
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
- int ret;
- struct multipath *m =
- container_of(work, struct multipath, activate_path);
- struct dm_path *path = &m->current_pgpath->path;
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path.work);
- ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
- pg_init_done(path, ret);
+ if (pgpath->path.dev)
+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+ pg_init_done, pgpath);
}
/*
* end_io handling
*/
-static int do_end_io(struct multipath *m, struct bio *bio,
+static int do_end_io(struct multipath *m, struct request *clone,
int error, struct dm_mpath_io *mpio)
{
+ /*
+ * We don't queue any clone request inside the multipath target
+ * during end I/O handling, since those clone requests don't have
+ * bio clones. If we queue them inside the multipath target,
+ * we need to make bio clones, that requires memory allocation.
+ * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * don't have bio clones.)
+ * Instead of queueing the clone request here, we queue the original
+ * request into dm core, which will remake a clone request and
+ * clone bios for it and resubmit it later.
+ */
+ int r = DM_ENDIO_REQUEUE;
unsigned long flags;
- if (!error)
+ if (!error && !clone->errors)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
- return error;
-
- if (error == -EOPNOTSUPP)
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
return error;
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (__must_push_back(m)) {
- spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_REQUEUE;
- } else if (!m->queue_if_no_path) {
- spin_unlock_irqrestore(&m->lock, flags);
- return -EIO;
- } else {
- spin_unlock_irqrestore(&m->lock, flags);
- goto requeue;
- }
- }
- spin_unlock_irqrestore(&m->lock, flags);
-
if (mpio->pgpath)
fail_path(mpio->pgpath);
-requeue:
- dm_bio_restore(&mpio->details, bio);
-
- /* queue for the daemon to resubmit or fail */
- spin_lock_irqsave(&m->lock, flags);
- bio_list_add(&m->queued_bios, bio);
- m->queue_size++;
- if (!m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
- spin_unlock_irqrestore(&m->lock, flags);
-
- return DM_ENDIO_INCOMPLETE; /* io not complete */
-}
-
-static int do_end_req(struct multipath *m, struct request *clone,
- int error, struct dm_mpath_io *mpio)
-{
- unsigned long flags;
- int r;
-
- if (!error && !clone->errors)
- return 0; /* I/O complete */
-
- if (error == -EOPNOTSUPP)
- return error;
-
spin_lock_irqsave(&m->lock, flags);
if (!m->nr_valid_paths) {
- if (__must_push_back(m) || m->queue_if_no_path)
- r = DM_ENDIO_REQUEUE;
- else
- r = -EIO;
-
- spin_unlock_irqrestore(&m->lock, flags);
- return r;
+ if (!m->queue_if_no_path) {
+ if (!__must_push_back(m))
+ r = -EIO;
+ } else {
+ if (error == -EBADE)
+ r = error;
+ }
}
spin_unlock_irqrestore(&m->lock, flags);
- if (mpio->pgpath)
- fail_path(mpio->pgpath);
-
- return DM_ENDIO_REQUEUE;
-}
-
-static int multipath_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
-{
- struct multipath *m = ti->private;
- struct dm_mpath_io *mpio = map_context->ptr;
- struct pgpath *pgpath = mpio->pgpath;
- struct path_selector *ps;
- int r;
-
- r = do_end_io(m, bio, error, mpio);
- if (pgpath) {
- ps = &pgpath->pg->ps;
- if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path);
- }
- if (r != DM_ENDIO_INCOMPLETE)
- mempool_free(mpio, m->mpio_pool);
-
return r;
}
-static int multipath_end_req(struct dm_target *ti, struct request *clone,
+static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
struct path_selector *ps;
int r;
- r = do_end_req(m, clone, error, mpio);
+ BUG_ON(!mpio);
+
+ r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
queue_if_no_path(m, 0, 1);
}
+static void multipath_postsuspend(struct dm_target *ti)
+{
+ struct multipath *m = ti->private;
+
+ mutex_lock(&m->work_mutex);
+ flush_multipath_work(m);
+ mutex_unlock(&m->work_mutex);
+}
+
/*
* Restore the queue_if_no_path setting.
*/
if (type == STATUSTYPE_INFO)
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
else {
- int rq_based = dm_table_request_based(ti->table);
-
- DMEMIT("%u ", m->queue_if_no_path + rq_based +
- (m->pg_init_retries > 0) * 2);
+ DMEMIT("%u ", m->queue_if_no_path +
+ (m->pg_init_retries > 0) * 2 +
+ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
+ (m->features & FEATURE_NO_PARTITIONS));
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
- if (rq_based)
- DMEMIT("rq_based ");
+ if (m->features & FEATURE_NO_PARTITIONS)
+ DMEMIT("no_partitions ");
+ if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
+ DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
else if (m->current_pg)
pg_num = m->current_pg->pg_num;
else
- pg_num = 1;
+ pg_num = (m->nr_priority_groups ? 1 : 0);
DMEMIT("%u ", pg_num);
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s %s %u ", p->path.dev->name,
- p->path.is_active ? "A" : "F",
+ DMEMIT("%s %s %u ", p->path.pdev,
+ p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s ", p->path.dev->name);
+ DMEMIT("%s ", p->path.pdev);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
- int r;
+ int r = -EINVAL;
struct dm_dev *dev;
struct multipath *m = (struct multipath *) ti->private;
action_fn action;
+ mutex_lock(&m->work_mutex);
+
+ if (dm_suspended(ti)) {
+ r = -EBUSY;
+ goto out;
+ }
+
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
- return queue_if_no_path(m, 1, 0);
- else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
- return queue_if_no_path(m, 0, 0);
+ if (!strcasecmp(argv[0], "queue_if_no_path")) {
+ r = queue_if_no_path(m, 1, 0);
+ goto out;
+ } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
+ r = queue_if_no_path(m, 0, 0);
+ goto out;
+ }
}
- if (argc != 2)
- goto error;
+ if (argc != 2) {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
- if (!strnicmp(argv[0], MESG_STR("disable_group")))
- return bypass_pg_num(m, argv[1], 1);
- else if (!strnicmp(argv[0], MESG_STR("enable_group")))
- return bypass_pg_num(m, argv[1], 0);
- else if (!strnicmp(argv[0], MESG_STR("switch_group")))
- return switch_pg_num(m, argv[1]);
- else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ if (!strcasecmp(argv[0], "disable_group")) {
+ r = bypass_pg_num(m, argv[1], 1);
+ goto out;
+ } else if (!strcasecmp(argv[0], "enable_group")) {
+ r = bypass_pg_num(m, argv[1], 0);
+ goto out;
+ } else if (!strcasecmp(argv[0], "switch_group")) {
+ r = switch_pg_num(m, argv[1]);
+ goto out;
+ } else if (!strcasecmp(argv[0], "reinstate_path"))
action = reinstate_path;
- else if (!strnicmp(argv[0], MESG_STR("fail_path")))
+ else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
- else
- goto error;
+ else {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
- r = dm_get_device(ti, argv[1], ti->begin, ti->len,
- dm_table_get_mode(ti->table), &dev);
+ r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
- return -EINVAL;
+ goto out;
}
r = action_dev(m, dev, action);
dm_put_device(ti, dev);
+out:
+ mutex_unlock(&m->work_mutex);
return r;
-
-error:
- DMWARN("Unrecognised multipath message received.");
- return -EINVAL;
}
-static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct multipath *m = (struct multipath *) ti->private;
struct block_device *bdev = NULL;
+ fmode_t mode = 0;
unsigned long flags;
- struct file fake_file = {};
- struct dentry fake_dentry = {};
int r = 0;
- fake_file.f_path.dentry = &fake_dentry;
-
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
- if (m->current_pgpath) {
+ if (m->current_pgpath && m->current_pgpath->path.dev) {
bdev = m->current_pgpath->path.dev->bdev;
- fake_dentry.d_inode = bdev->bd_inode;
- fake_file.f_mode = m->current_pgpath->path.dev->mode;
+ mode = m->current_pgpath->path.dev->mode;
}
if (m->queue_io)
spin_unlock_irqrestore(&m->lock, flags);
- return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file,
- bdev->bd_disk, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
-static int __pgpath_congested(struct pgpath *pgpath)
+static int multipath_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
{
- struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *p;
+ int ret = 0;
- if (dm_underlying_device_congested(q))
- return 1;
+ list_for_each_entry(pg, &m->priority_groups, list) {
+ list_for_each_entry(p, &pg->pgpaths, list) {
+ ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
+ if (ret)
+ goto out;
+ }
+ }
- return 0;
+out:
+ return ret;
}
-static int multipath_congested(struct dm_target *ti)
+static int __pgpath_busy(struct pgpath *pgpath)
{
- int congested = 0;
- struct multipath *m = (struct multipath *) ti->private;
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+ return dm_underlying_device_busy(q);
+}
+
+/*
+ * We return "busy", only when we can map I/Os but underlying devices
+ * are busy (so even if we map I/Os now, the I/Os will wait on
+ * the underlying queue).
+ * In other words, if we want to kill I/Os or queue them inside us
+ * due to map unavailability, we don't return "busy". Otherwise,
+ * dm core won't give us the I/Os and we can't do what we want.
+ */
+static int multipath_busy(struct dm_target *ti)
+{
+ int busy = 0, has_active = 0;
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *pgpath;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- if (m->current_pgpath && m->repeat_count > 1) {
- /* m->current_pgpath is surely used at next mapping time. */
- if (__pgpath_congested(m->current_pgpath))
- congested = 1;
-
+ /* Guess which priority_group will be used at next mapping time */
+ if (unlikely(!m->current_pgpath && m->next_pg))
+ pg = m->next_pg;
+ else if (likely(m->current_pg))
+ pg = m->current_pg;
+ else
+ /*
+ * We don't know which pg will be used at next mapping time.
+ * We don't call __choose_pgpath() here to avoid to trigger
+ * pg_init just by busy checking.
+ * So we don't know whether underlying devices we will be using
+ * at next mapping time are busy or not. Just try mapping.
+ */
goto out;
- }
/*
- * We are here means that path selection will be executed
- * at next mapping time.
- * We run the path selection here and check congestion status
- * of the next path.
- * And increment repeat_count to avoid path selection again
- * in map_io().
+ * If there is one non-busy active path at least, the path selector
+ * will be able to select it. So we consider such a pg as not busy.
*/
- __choose_pgpath(m);
- if (m->current_pgpath) {
- if (__pgpath_congested(m->current_pgpath))
- congested = 1;
+ busy = 1;
+ list_for_each_entry(pgpath, &pg->pgpaths, list)
+ if (pgpath->is_active) {
+ has_active = 1;
+
+ if (!__pgpath_busy(pgpath)) {
+ busy = 0;
+ break;
+ }
+ }
- m->repeat_count++;
- }
+ if (!has_active)
+ /*
+ * No active path in this pg, so this pg won't be used and
+ * the current_pg will be changed at next mapping time.
+ * We need to try mapping to determine it.
+ */
+ busy = 0;
out:
spin_unlock_irqrestore(&m->lock, flags);
- return congested;
+ return busy;
}
/*-----------------------------------------------------------------
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 0, 5},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map = multipath_map_bio,
- .end_io = multipath_end_io,
- .map_rq = multipath_map_req,
- .rq_end_io = multipath_end_req,
+ .map_rq = multipath_map,
+ .rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
+ .postsuspend = multipath_postsuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
.ioctl = multipath_ioctl,
- .congested = multipath_congested,
+ .iterate_devices = multipath_iterate_devices,
+ .busy = multipath_busy,
};
static int __init dm_multipath_init(void)
return -EINVAL;
}
- kmultipathd = create_workqueue("kmpathd");
+ kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
dm_unregister_target(&multipath_target);
* old workqueue would also create a bottleneck in the
* path of the storage hardware device activation.
*/
- kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+ kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
+ WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd);
static void __exit dm_multipath_exit(void)
{
- int r;
-
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd);
- r = dm_unregister_target(&multipath_target);
- if (r < 0)
- DMERR("target unregister failed %d", r);
+ dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
}