static void blk_invoke_request_fn(struct request_queue *q)
{
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * See @blk_run_queue. This variant must be called with the queue lock
+ * held and interrupts disabled.
+ *
*/
void __blk_run_queue(struct request_queue *q)
{
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on this queue, if it has pending work to do.
+ * May be used to restart queueing when a request has completed. Also
+ * See @blk_start_queueing.
+ *
*/
void blk_run_queue(struct request_queue *q)
{
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
kobject_init(&q->kobj, &blk_queue_ktype);
1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;
- blk_queue_segment_boundary(q, 0xffffffff);
+ blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
blk_queue_make_request(q, __make_request);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
*
* This is basically a helper to remove the need to know whether a queue
* is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
*
* The queue lock must be held with interrupts disabled.
*/
struct dm_path path;
struct work_struct deactivate_path;
+ struct work_struct activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
spinlock_t lock;
const char *hw_handler_name;
- struct work_struct activate_path;
- struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
if (pgpath) {
pgpath->is_active = 1;
INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ INIT_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
dm_put_device(ti, pgpath->path.dev);
- spin_lock_irqsave(&m->lock, flags);
- if (m->pgpath_to_activate == pgpath)
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
- INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
m->current_pgpath = path_to_pgpath(path);
+ if (!m->current_pgpath->path.dev) {
+ m->current_pgpath = NULL;
+ return -ENODEV;
+ }
+
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL;
- unsigned init_required = 0, must_queue = 1;
+ struct pgpath *pgpath = NULL, *tmp;
+ unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pgpath_to_activate = pgpath;
m->pg_init_count++;
m->pg_init_required = 0;
- m->pg_init_in_progress = 1;
- init_required = 1;
+ list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
+ queue_work(kmpath_handlerd, &tmp->activate_path);
+ m->pg_init_in_progress++;
+ }
}
-
out:
spin_unlock_irqrestore(&m->lock, flags);
-
- if (init_required)
- queue_work(kmpath_handlerd, &m->activate_path);
-
if (!must_queue)
dispatch_queued_ios(m);
}
/* Try to add a failed device */
if (sscanf(path, "%u:%u", &major, &minor) == 2) {
+ dev_t dev;
+
/* Extract the major/minor numbers */
- p->path.pdev = MKDEV(major, minor);
- if (MAJOR(p->path.pdev) != major ||
- MINOR(p->path.pdev) != minor) {
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor) {
/* Nice try, didn't work */
DMWARN("Invalid device path %s", path);
ti->error = "error converting devnum";
}
DMWARN("adding disabled device %d:%d", major, minor);
p->path.dev = NULL;
+ format_dev_t(p->path.pdev, dev);
p->is_active = 0;
} else {
ti->error = "error getting device";
goto bad;
}
+ } else {
+ memcpy(p->path.pdev, p->path.dev->name, 16);
}
if (m->hw_handler_name && p->path.dev) {
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("Failing path %s.", pgpath->path.pdev);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = 0;
if (pgpath == m->current_pgpath)
m->current_pgpath = NULL;
- if (pgpath->path.dev)
- dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
+ pgpath->path.pdev, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
queue_work(kmultipathd, &pgpath->deactivate_path);
goto out;
if (!pgpath->path.dev) {
- DMWARN("Cannot reinstate disabled path %d:%d",
- MAJOR(pgpath->path.pdev), MINOR(pgpath->path.pdev));
+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
r = -ENODEV;
goto out;
}
queue_work(kmultipathd, &m->process_queued_ios);
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
pg->bypassed = 0;
}
- m->pg_init_in_progress = 0;
- queue_work(kmultipathd, &m->process_queued_ios);
- spin_unlock_irqrestore(&m->lock, flags);
+ m->pg_init_in_progress--;
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
+ spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
- int ret;
- struct multipath *m =
- container_of(work, struct multipath, activate_path);
- struct dm_path *path;
- unsigned long flags;
+ int ret = SCSI_DH_DEV_OFFLINED;
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path);
- spin_lock_irqsave(&m->lock, flags);
- path = &m->pgpath_to_activate->path;
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
- if (!path)
- return;
- ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
- pg_init_done(path, ret);
+ if (pgpath->path.dev)
+ ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
+ pg_init_done(&pgpath->path, ret);
}
/*
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- if (p->path.dev) {
- DMEMIT("%s %s %u ", p->path.dev->name,
- p->is_active ? "A" : "F",
- p->fail_count);
- } else {
- DMEMIT("%d:%d F %u ",
- MAJOR(p->path.pdev),
- MINOR(p->path.pdev),
- p->fail_count);
- }
+ DMEMIT("%s %s %u ", p->path.pdev,
+ p->is_active ? "A" : "F",
+ p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- if (p->path.dev)
- DMEMIT("%s ", p->path.dev->name);
- else
- DMEMIT("%d:%d ",
- MAJOR(p->path.pdev),
- MINOR(p->path.pdev));
+ DMEMIT("%s ", p->path.pdev);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,