2 * $Id: mtd_blkdevs.c,v 1.12 2003/05/21 01:00:59 dwmw2 Exp $
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
15 #include <linux/mtd/blktrans.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/blkdev.h>
18 #include <linux/blk.h>
19 #include <linux/blkpg.h>
20 #include <linux/spinlock.h>
21 #include <linux/init.h>
22 #include <asm/semaphore.h>
23 #include <linux/devfs_fs_kernel.h>
25 static LIST_HEAD(blktrans_majors);
27 extern struct semaphore mtd_table_mutex;
28 extern struct mtd_info *mtd_table[];
30 struct mtd_blkcore_priv {
31 struct completion thread_dead;
33 wait_queue_head_t thread_wq;
34 struct request_queue rq;
35 spinlock_t queue_lock;
38 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
39 struct mtd_blktrans_dev *dev,
42 unsigned long block, nsect;
46 nsect = req->current_nr_sectors;
49 if (!req->flags & REQ_CMD)
52 if (block + nsect > get_capacity(req->rq_disk))
55 switch(rq_data_dir(req)) {
57 for (; nsect > 0; nsect--, block++, buf += 512)
58 if (tr->readsect(dev, block, buf))
66 for (; nsect > 0; nsect--, block++, buf += 512)
67 if (tr->writesect(dev, block, buf))
72 printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
77 static int mtd_blktrans_thread(void *arg)
79 struct mtd_blktrans_ops *tr = arg;
80 struct request_queue *rq = &tr->blkcore_priv->rq;
82 /* we might get involved when memory gets low, so use PF_MEMALLOC */
83 current->flags |= PF_MEMALLOC;
85 daemonize("%sd", tr->name);
87 /* daemonize() doesn't do this for us since some kernel threads
88 actually want to deal with signals. We can't just call
89 exit_sighand() since that'll cause an oops when we finally
91 spin_lock_irq(¤t->sighand->siglock);
92 sigfillset(¤t->blocked);
94 spin_unlock_irq(¤t->sighand->siglock);
96 while (!tr->blkcore_priv->exiting) {
98 struct mtd_blktrans_dev *dev;
100 DECLARE_WAITQUEUE(wait, current);
102 spin_lock_irq(rq->queue_lock);
104 req = elv_next_request(rq);
107 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
108 set_current_state(TASK_INTERRUPTIBLE);
110 spin_unlock_irq(rq->queue_lock);
113 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
118 dev = req->rq_disk->private_data;
121 spin_unlock_irq(rq->queue_lock);
124 res = do_blktrans_request(tr, dev, req);
127 spin_lock_irq(rq->queue_lock);
129 end_request(req, res);
131 complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
134 static void mtd_blktrans_request(struct request_queue *rq)
136 struct mtd_blktrans_ops *tr = rq->queuedata;
137 wake_up(&tr->blkcore_priv->thread_wq);
141 int blktrans_open(struct inode *i, struct file *f)
143 struct mtd_blktrans_dev *dev;
144 struct mtd_blktrans_ops *tr;
147 dev = i->i_bdev->bd_disk->private_data;
150 if (!try_module_get(dev->mtd->owner))
153 if (!try_module_get(tr->owner))
156 /* FIXME: Locking. A hot pluggable device can go away
157 (del_mtd_device can be called for it) without its module
159 dev->mtd->usecount++;
162 if (tr->open && (ret = tr->open(dev, i, f))) {
163 dev->mtd->usecount--;
164 module_put(dev->mtd->owner);
166 module_put(tr->owner);
172 int blktrans_release(struct inode *i, struct file *f)
174 struct mtd_blktrans_dev *dev;
175 struct mtd_blktrans_ops *tr;
178 dev = i->i_bdev->bd_disk->private_data;
182 ret = tr->release(dev, i, f);
185 dev->mtd->usecount--;
186 module_put(dev->mtd->owner);
187 module_put(tr->owner);
194 static int blktrans_ioctl(struct inode *inode, struct file *file,
195 unsigned int cmd, unsigned long arg)
197 struct mtd_blktrans_dev *dev;
198 struct mtd_blktrans_ops *tr;
201 dev = inode->i_bdev->bd_disk->private_data;
205 ret = tr->ioctl(dev, inode, file, cmd, arg);
207 if (ret == -ENOTTY && (cmd == BLKROSET || cmd == BLKFLSBUF)) {
208 /* The core code did the work, we had nothing to do. */
214 struct block_device_operations mtd_blktrans_ops = {
215 .owner = THIS_MODULE,
216 .open = blktrans_open,
217 .release = blktrans_release,
218 .ioctl = blktrans_ioctl,
221 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
223 struct mtd_blktrans_ops *tr = new->tr;
224 struct list_head *this;
225 int last_devnum = -1;
228 if (!down_trylock(&mtd_table_mutex)) {
229 up(&mtd_table_mutex);
233 list_for_each(this, &tr->devs) {
234 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
235 if (new->devnum == -1) {
236 /* Use first free number */
237 if (d->devnum != last_devnum+1) {
238 /* Found a free devnum. Plug it in here */
239 new->devnum = last_devnum+1;
240 list_add_tail(&new->list, &d->list);
243 } else if (d->devnum == new->devnum) {
244 /* Required number taken */
246 } else if (d->devnum > new->devnum) {
247 /* Required number was free */
248 list_add_tail(&new->list, &d->list);
251 last_devnum = d->devnum;
253 if (new->devnum == -1)
254 new->devnum = last_devnum+1;
256 if ((new->devnum << tr->part_bits) > 256) {
260 init_MUTEX(&new->sem);
261 list_add_tail(&new->list, &tr->devs);
266 gd = alloc_disk(1 << tr->part_bits);
268 list_del(&new->list);
271 gd->major = tr->major;
272 gd->first_minor = (new->devnum) << tr->part_bits;
273 gd->fops = &mtd_blktrans_ops;
275 snprintf(gd->disk_name, sizeof(gd->disk_name),
276 "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
277 snprintf(gd->devfs_name, sizeof(gd->devfs_name),
278 "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
280 set_capacity(gd, new->size);
281 gd->private_data = new;
282 new->blkcore_priv = gd;
283 gd->queue = &tr->blkcore_priv->rq;
293 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
295 if (!down_trylock(&mtd_table_mutex)) {
296 up(&mtd_table_mutex);
300 list_del(&old->list);
302 del_gendisk(old->blkcore_priv);
303 put_disk(old->blkcore_priv);
308 void blktrans_notify_remove(struct mtd_info *mtd)
310 struct list_head *this, *this2, *next;
312 list_for_each(this, &blktrans_majors) {
313 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
315 list_for_each_safe(this2, next, &tr->devs) {
316 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
324 void blktrans_notify_add(struct mtd_info *mtd)
326 struct list_head *this;
328 if (mtd->type == MTD_ABSENT)
331 printk("%s:%s %d: count %d\n", __FILE__, __func__, __LINE__, atomic_read(&mtd_table_mutex.count));
333 list_for_each(this, &blktrans_majors) {
334 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
336 tr->add_mtd(tr, mtd);
341 static struct mtd_notifier blktrans_notifier = {
342 .add = blktrans_notify_add,
343 .remove = blktrans_notify_remove,
346 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
350 /* Register the notifier if/when the first device type is
351 registered, to prevent the link/init ordering from fucking
353 if (!blktrans_notifier.list.next)
354 register_mtd_user(&blktrans_notifier);
356 tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
357 if (!tr->blkcore_priv)
360 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
362 down(&mtd_table_mutex);
364 ret = register_blkdev(tr->major, tr->name);
366 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
367 tr->name, tr->major, ret);
368 kfree(tr->blkcore_priv);
369 up(&mtd_table_mutex);
372 spin_lock_init(&tr->blkcore_priv->queue_lock);
373 init_completion(&tr->blkcore_priv->thread_dead);
374 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
376 blk_init_queue(&tr->blkcore_priv->rq, mtd_blktrans_request,
377 &tr->blkcore_priv->queue_lock);
378 tr->blkcore_priv->rq.queuedata = tr;
380 ret = kernel_thread(mtd_blktrans_thread, tr,
381 CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
383 blk_cleanup_queue(&tr->blkcore_priv->rq);
384 unregister_blkdev(tr->major, tr->name);
385 kfree(tr->blkcore_priv);
386 up(&mtd_table_mutex);
390 devfs_mk_dir(tr->name);
392 INIT_LIST_HEAD(&tr->devs);
393 list_add(&tr->list, &blktrans_majors);
395 printk("%s:%s %d: count %d\n", __FILE__, __func__, __LINE__, atomic_read(&mtd_table_mutex.count));
397 for (i=0; i<MAX_MTD_DEVICES; i++) {
398 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
399 tr->add_mtd(tr, mtd_table[i]);
402 up(&mtd_table_mutex);
407 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
409 struct list_head *this, *next;
411 down(&mtd_table_mutex);
413 /* Clean up the kernel thread */
414 tr->blkcore_priv->exiting = 1;
415 wake_up(&tr->blkcore_priv->thread_wq);
416 wait_for_completion(&tr->blkcore_priv->thread_dead);
418 /* Remove it from the list of active majors */
421 list_for_each_safe(this, next, &tr->devs) {
422 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
426 devfs_remove(tr->name);
427 blk_cleanup_queue(&tr->blkcore_priv->rq);
428 unregister_blkdev(tr->major, tr->name);
430 up(&mtd_table_mutex);
432 kfree(tr->blkcore_priv);
434 if (!list_empty(&tr->devs))
439 static void __exit mtd_blktrans_exit(void)
441 /* No race here -- if someone's currently in register_mtd_blktrans
442 we're screwed anyway. */
443 if (blktrans_notifier.list.next)
444 unregister_mtd_user(&blktrans_notifier);
447 module_exit(mtd_blktrans_exit);
449 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
450 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
451 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
452 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
454 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
455 MODULE_LICENSE("GPL");
456 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");