2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
43 * - Advisory locking is ignored here.
44 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
47 * - The block number as IV passing to low level transfer functions is broken:
48 * it passes the underlying device's block number instead of the
49 * offset. This makes it change for a given block when the file is
50 * moved/restored/copied and also doesn't work over NFS.
51 * AV, Feb 12, 2000: we pass the logical block number now. It fixes the
52 * problem above. Encryption modules that used to rely on the old scheme
53 * should just call ->i_mapping->bmap() to calculate the physical block
57 #include <linux/config.h>
58 #include <linux/module.h>
60 #include <linux/sched.h>
62 #include <linux/file.h>
63 #include <linux/bio.h>
64 #include <linux/stat.h>
65 #include <linux/errno.h>
66 #include <linux/major.h>
67 #include <linux/wait.h>
68 #include <linux/blk.h>
69 #include <linux/blkpg.h>
70 #include <linux/init.h>
71 #include <linux/devfs_fs_kernel.h>
72 #include <linux/smp_lock.h>
73 #include <linux/swap.h>
74 #include <linux/slab.h>
75 #include <linux/loop.h>
76 #include <linux/suspend.h>
77 #include <linux/writeback.h>
78 #include <linux/buffer_head.h> /* for invalidate_bdev() */
80 #include <asm/uaccess.h>
82 static int max_loop = 8;
83 static struct loop_device *loop_dev;
84 static struct gendisk **disks;
89 static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf,
90 char *loop_buf, int size, sector_t real_block)
92 if (raw_buf != loop_buf) {
94 memcpy(loop_buf, raw_buf, size);
96 memcpy(raw_buf, loop_buf, size);
102 static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf,
103 char *loop_buf, int size, sector_t real_block)
105 char *in, *out, *key;
116 key = lo->lo_encrypt_key;
117 keysize = lo->lo_encrypt_key_size;
118 for (i = 0; i < size; i++)
119 *out++ = *in++ ^ key[(i & 511) % keysize];
123 static int xor_status(struct loop_device *lo, const struct loop_info64 *info)
125 if (info->lo_encrypt_key_size <= 0)
130 struct loop_func_table none_funcs = {
131 .number = LO_CRYPT_NONE,
132 .transfer = transfer_none,
135 struct loop_func_table xor_funcs = {
136 .number = LO_CRYPT_XOR,
137 .transfer = transfer_xor,
141 /* xfer_funcs[0] is special - its release function is never called */
142 struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
147 static int figure_loop_size(struct loop_device *lo)
149 loff_t size = lo->lo_backing_file->f_dentry->d_inode->i_mapping->host->i_size;
152 * Unfortunately, if we want to do I/O on the device,
153 * the number of 512-byte sectors has to fit into a sector_t.
155 size = (size - lo->lo_offset) >> 9;
157 if ((loff_t)x != size)
160 set_capacity(disks[lo->lo_number], size);
164 static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf,
165 char *lbuf, int size, sector_t rblock)
170 return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock);
174 do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
176 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
177 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
178 struct address_space_operations *aops = mapping->a_ops;
182 unsigned size, offset;
186 down(&mapping->host->i_sem);
187 index = pos >> PAGE_CACHE_SHIFT;
188 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
189 data = kmap(bvec->bv_page) + bvec->bv_offset;
192 sector_t IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
195 size = PAGE_CACHE_SIZE - offset;
199 page = grab_cache_page(mapping, index);
202 if (aops->prepare_write(file, page, offset, offset+size))
205 transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
206 if (transfer_result) {
208 * The transfer failed, but we still write the data to
209 * keep prepare/commit calls balanced.
211 printk(KERN_ERR "loop: transfer error block %llu\n",
212 (unsigned long long)index);
213 memset(kaddr + offset, 0, size);
215 flush_dcache_page(page);
217 if (aops->commit_write(file, page, offset, offset+size))
227 page_cache_release(page);
229 up(&mapping->host->i_sem);
231 kunmap(bvec->bv_page);
236 page_cache_release(page);
238 up(&mapping->host->i_sem);
244 lo_send(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
249 for (vecnr = 0; vecnr < bio->bi_vcnt; vecnr++) {
250 struct bio_vec *bvec = &bio->bi_io_vec[vecnr];
252 ret = do_lo_send(lo, bvec, bsize, pos);
260 struct lo_read_data {
261 struct loop_device *lo;
267 lo_read_actor(read_descriptor_t *desc, struct page *page,
268 unsigned long offset, unsigned long size)
271 unsigned long count = desc->count;
272 struct lo_read_data *p = (struct lo_read_data*)desc->buf;
273 struct loop_device *lo = p->lo;
274 int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
280 if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) {
282 printk(KERN_ERR "loop: transfer error block %ld\n",
284 desc->error = -EINVAL;
288 desc->count = count - size;
289 desc->written += size;
295 do_lo_receive(struct loop_device *lo,
296 struct bio_vec *bvec, int bsize, loff_t pos)
298 struct lo_read_data cookie;
303 cookie.data = kmap(bvec->bv_page) + bvec->bv_offset;
304 cookie.bsize = bsize;
305 file = lo->lo_backing_file;
306 retval = file->f_op->sendfile(file, &pos, bvec->bv_len,
307 lo_read_actor, &cookie);
308 kunmap(bvec->bv_page);
309 return (retval < 0)? retval: 0;
313 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
318 for (vecnr = 0; vecnr < bio->bi_vcnt; vecnr++) {
319 struct bio_vec *bvec = &bio->bi_io_vec[vecnr];
321 ret = do_lo_receive(lo, bvec, bsize, pos);
329 static inline unsigned long
330 loop_get_iv(struct loop_device *lo, unsigned long sector)
332 int bs = lo->lo_blocksize;
333 unsigned long offset, IV;
335 IV = sector / (bs >> 9) + lo->lo_offset / bs;
336 offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
343 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
348 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
349 if (bio_rw(bio) == WRITE)
350 ret = lo_send(lo, bio, lo->lo_blocksize, pos);
352 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
356 static int loop_end_io_transfer(struct bio *, unsigned int, int);
358 static void loop_put_buffer(struct bio *bio)
361 * check bi_end_io, may just be a remapped bio
363 if (bio && bio->bi_end_io == loop_end_io_transfer) {
366 for (i = 0; i < bio->bi_vcnt; i++)
367 __free_page(bio->bi_io_vec[i].bv_page);
374 * Add bio to back of pending list
376 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
380 spin_lock_irqsave(&lo->lo_lock, flags);
381 if (lo->lo_biotail) {
382 lo->lo_biotail->bi_next = bio;
383 lo->lo_biotail = bio;
385 lo->lo_bio = lo->lo_biotail = bio;
386 spin_unlock_irqrestore(&lo->lo_lock, flags);
388 up(&lo->lo_bh_mutex);
392 * Grab first pending buffer
394 static struct bio *loop_get_bio(struct loop_device *lo)
398 spin_lock_irq(&lo->lo_lock);
399 if ((bio = lo->lo_bio)) {
400 if (bio == lo->lo_biotail)
401 lo->lo_biotail = NULL;
402 lo->lo_bio = bio->bi_next;
405 spin_unlock_irq(&lo->lo_lock);
411 * if this was a WRITE lo->transfer stuff has already been done. for READs,
412 * queue it for the loop thread and let it do the transfer out of
413 * bi_end_io context (we don't want to do decrypt of a page with irqs
416 static int loop_end_io_transfer(struct bio *bio, unsigned int bytes_done, int err)
418 struct bio *rbh = bio->bi_private;
419 struct loop_device *lo = rbh->bi_bdev->bd_disk->private_data;
424 if (err || bio_rw(bio) == WRITE) {
425 bio_endio(rbh, rbh->bi_size, err);
426 if (atomic_dec_and_test(&lo->lo_pending))
427 up(&lo->lo_bh_mutex);
428 loop_put_buffer(bio);
430 loop_add_bio(lo, bio);
435 static struct bio *loop_copy_bio(struct bio *rbh)
441 bio = bio_alloc(__GFP_NOWARN, rbh->bi_vcnt);
446 * iterate iovec list and alloc pages
448 __bio_for_each_segment(bv, rbh, i, 0) {
449 struct bio_vec *bbv = &bio->bi_io_vec[i];
451 bbv->bv_page = alloc_page(__GFP_NOWARN|__GFP_HIGHMEM);
452 if (bbv->bv_page == NULL)
455 bbv->bv_len = bv->bv_len;
456 bbv->bv_offset = bv->bv_offset;
459 bio->bi_vcnt = rbh->bi_vcnt;
460 bio->bi_size = rbh->bi_size;
466 __free_page(bio->bi_io_vec[i].bv_page);
472 static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh)
477 * When called on the page reclaim -> writepage path, this code can
478 * trivially consume all memory. So we drop PF_MEMALLOC to avoid
479 * stealing all the page reserves and throttle to the writeout rate.
480 * pdflush will have been woken by page reclaim. Let it do its work.
483 int flags = current->flags;
485 current->flags &= ~PF_MEMALLOC;
486 bio = loop_copy_bio(rbh);
487 if (flags & PF_MEMALLOC)
488 current->flags |= PF_MEMALLOC;
491 blk_congestion_wait(WRITE, HZ/10);
492 } while (bio == NULL);
494 bio->bi_end_io = loop_end_io_transfer;
495 bio->bi_private = rbh;
496 bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9);
497 bio->bi_rw = rbh->bi_rw;
498 bio->bi_bdev = lo->lo_device;
503 static int loop_transfer_bio(struct loop_device *lo,
504 struct bio *to_bio, struct bio *from_bio)
506 unsigned long IV = loop_get_iv(lo, from_bio->bi_sector);
507 struct bio_vec *from_bvec, *to_bvec;
511 __bio_for_each_segment(from_bvec, from_bio, i, 0) {
512 to_bvec = &to_bio->bi_io_vec[i];
514 kmap(from_bvec->bv_page);
515 kmap(to_bvec->bv_page);
516 vfrom = page_address(from_bvec->bv_page) + from_bvec->bv_offset;
517 vto = page_address(to_bvec->bv_page) + to_bvec->bv_offset;
518 ret |= lo_do_transfer(lo, bio_data_dir(to_bio), vto, vfrom,
519 from_bvec->bv_len, IV);
520 kunmap(from_bvec->bv_page);
521 kunmap(to_bvec->bv_page);
527 static int loop_make_request(request_queue_t *q, struct bio *old_bio)
529 struct bio *new_bio = NULL;
530 struct loop_device *lo = q->queuedata;
531 int rw = bio_rw(old_bio);
536 spin_lock_irq(&lo->lo_lock);
537 if (lo->lo_state != Lo_bound)
539 atomic_inc(&lo->lo_pending);
540 spin_unlock_irq(&lo->lo_lock);
543 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
545 } else if (rw == READA) {
547 } else if (rw != READ) {
548 printk(KERN_ERR "loop: unknown command (%x)\n", rw);
553 * file backed, queue for loop_thread to handle
555 if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
556 loop_add_bio(lo, old_bio);
561 * piggy old buffer on original, and submit for I/O
563 new_bio = loop_get_buffer(lo, old_bio);
565 if (loop_transfer_bio(lo, new_bio, old_bio))
569 generic_make_request(new_bio);
573 if (atomic_dec_and_test(&lo->lo_pending))
574 up(&lo->lo_bh_mutex);
575 loop_put_buffer(new_bio);
577 bio_io_error(old_bio, old_bio->bi_size);
580 spin_unlock_irq(&lo->lo_lock);
584 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
589 * For block backed loop, we know this is a READ
591 if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
592 ret = do_bio_filebacked(lo, bio);
593 bio_endio(bio, bio->bi_size, ret);
595 struct bio *rbh = bio->bi_private;
597 ret = loop_transfer_bio(lo, bio, rbh);
599 bio_endio(rbh, rbh->bi_size, ret);
600 loop_put_buffer(bio);
605 * worker thread that handles reads/writes to file backed loop devices,
606 * to avoid blocking in our make_request_fn. it also does loop decrypting
607 * on reads for block backed loop, as that is too heavy to do from
608 * b_end_io context where irqs may be disabled.
610 static int loop_thread(void *data)
612 struct loop_device *lo = data;
615 daemonize("loop%d", lo->lo_number);
617 current->flags |= PF_IOTHREAD; /* loop can be used in an encrypted device
618 hence, it mustn't be stopped at all because it could
619 be indirectly used during suspension */
621 set_user_nice(current, -20);
623 lo->lo_state = Lo_bound;
624 atomic_inc(&lo->lo_pending);
627 * up sem, we are running
632 down_interruptible(&lo->lo_bh_mutex);
634 * could be upped because of tear-down, not because of
637 if (!atomic_read(&lo->lo_pending))
640 bio = loop_get_bio(lo);
642 printk("loop: missing bio\n");
645 loop_handle_bio(lo, bio);
648 * upped both for pending work and tear-down, lo_pending
651 if (atomic_dec_and_test(&lo->lo_pending))
659 static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
660 struct block_device *bdev, unsigned int arg)
664 struct block_device *lo_device = NULL;
665 unsigned lo_blocksize;
669 /* This is safe, since we have a reference from open(). */
670 __module_get(THIS_MODULE);
673 if (lo->lo_state != Lo_unbound)
682 inode = file->f_dentry->d_inode;
684 if (!(file->f_mode & FMODE_WRITE))
685 lo_flags |= LO_FLAGS_READ_ONLY;
687 if (S_ISBLK(inode->i_mode)) {
688 lo_device = inode->i_bdev;
689 if (lo_device == bdev) {
693 lo_blocksize = block_size(lo_device);
694 if (bdev_read_only(lo_device))
695 lo_flags |= LO_FLAGS_READ_ONLY;
696 } else if (S_ISREG(inode->i_mode)) {
697 struct address_space_operations *aops = inode->i_mapping->a_ops;
699 * If we can't read - sorry. If we only can't write - well,
700 * it's going to be read-only.
702 if (!inode->i_fop->sendfile)
705 if (!aops->prepare_write || !aops->commit_write)
706 lo_flags |= LO_FLAGS_READ_ONLY;
708 lo_blocksize = inode->i_blksize;
709 lo_flags |= LO_FLAGS_DO_BMAP;
716 if (!(lo_file->f_mode & FMODE_WRITE))
717 lo_flags |= LO_FLAGS_READ_ONLY;
719 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
721 lo->lo_blocksize = lo_blocksize;
722 lo->lo_device = lo_device;
723 lo->lo_flags = lo_flags;
724 lo->lo_backing_file = file;
727 if (figure_loop_size(lo)) {
732 lo->old_gfp_mask = inode->i_mapping->gfp_mask;
733 inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS);
735 set_blocksize(bdev, lo_blocksize);
737 lo->lo_bio = lo->lo_biotail = NULL;
740 * set queue make_request_fn, and add limits based on lower level
743 blk_queue_make_request(&lo->lo_queue, loop_make_request);
744 lo->lo_queue.queuedata = lo;
747 * we remap to a block device, make sure we correctly stack limits
749 if (S_ISBLK(inode->i_mode)) {
750 request_queue_t *q = bdev_get_queue(lo_device);
752 blk_queue_max_sectors(&lo->lo_queue, q->max_sectors);
753 blk_queue_max_phys_segments(&lo->lo_queue,q->max_phys_segments);
754 blk_queue_max_hw_segments(&lo->lo_queue, q->max_hw_segments);
755 blk_queue_max_segment_size(&lo->lo_queue, q->max_segment_size);
756 blk_queue_segment_boundary(&lo->lo_queue, q->seg_boundary_mask);
757 blk_queue_merge_bvec(&lo->lo_queue, q->merge_bvec_fn);
760 kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
769 /* This is safe: open() is still holding a reference. */
770 module_put(THIS_MODULE);
774 static int loop_release_xfer(struct loop_device *lo)
777 if (lo->lo_encrypt_type) {
778 struct loop_func_table *xfer= xfer_funcs[lo->lo_encrypt_type];
779 if (xfer && xfer->release)
780 err = xfer->release(lo);
781 if (xfer && xfer->unlock)
783 lo->lo_encrypt_type = 0;
789 loop_init_xfer(struct loop_device *lo, int type, const struct loop_info64 *i)
793 struct loop_func_table *xfer = xfer_funcs[type];
795 err = xfer->init(lo, i);
797 lo->lo_encrypt_type = type;
805 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
807 struct file *filp = lo->lo_backing_file;
808 int gfp = lo->old_gfp_mask;
810 if (lo->lo_state != Lo_bound)
812 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
817 spin_lock_irq(&lo->lo_lock);
818 lo->lo_state = Lo_rundown;
819 if (atomic_dec_and_test(&lo->lo_pending))
820 up(&lo->lo_bh_mutex);
821 spin_unlock_irq(&lo->lo_lock);
825 lo->lo_backing_file = NULL;
827 loop_release_xfer(lo);
830 lo->lo_device = NULL;
831 lo->lo_encrypt_type = 0;
833 lo->lo_encrypt_key_size = 0;
835 lo->lo_queue.queuedata = NULL;
836 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
837 memset(lo->lo_name, 0, LO_NAME_SIZE);
838 invalidate_bdev(bdev, 0);
839 set_capacity(disks[lo->lo_number], 0);
840 filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
841 lo->lo_state = Lo_unbound;
843 /* This is safe: open() is still holding a reference. */
844 module_put(THIS_MODULE);
849 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
855 if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
856 !capable(CAP_SYS_ADMIN))
858 if (lo->lo_state != Lo_bound)
860 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
862 type = info->lo_encrypt_type;
863 if (type >= MAX_LO_CRYPT || xfer_funcs[type] == NULL)
865 if (type == LO_CRYPT_XOR && info->lo_encrypt_key_size == 0)
868 err = loop_release_xfer(lo);
870 err = loop_init_xfer(lo, type, info);
872 offset = lo->lo_offset;
873 if (offset != info->lo_offset) {
874 lo->lo_offset = info->lo_offset;
875 if (figure_loop_size(lo)){
877 lo->lo_offset = offset;
884 strlcpy(lo->lo_name, info->lo_name, LO_NAME_SIZE);
886 lo->transfer = xfer_funcs[type]->transfer;
887 lo->ioctl = xfer_funcs[type]->ioctl;
888 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
889 lo->lo_init[0] = info->lo_init[0];
890 lo->lo_init[1] = info->lo_init[1];
891 if (info->lo_encrypt_key_size) {
892 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
893 info->lo_encrypt_key_size);
894 lo->lo_key_owner = current->uid;
901 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
903 struct file *file = lo->lo_backing_file;
907 if (lo->lo_state != Lo_bound)
909 error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
912 memset(info, 0, sizeof(*info));
913 info->lo_number = lo->lo_number;
914 info->lo_device = stat.dev;
915 info->lo_inode = stat.ino;
916 info->lo_rdevice = lo->lo_device ? stat.rdev : stat.dev;
917 info->lo_offset = lo->lo_offset;
918 info->lo_flags = lo->lo_flags;
919 strlcpy(info->lo_name, lo->lo_name, LO_NAME_SIZE);
920 info->lo_encrypt_type = lo->lo_encrypt_type;
921 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
922 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
923 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
924 lo->lo_encrypt_key_size);
930 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
932 info64->lo_number = info->lo_number;
933 info64->lo_device = info->lo_device;
934 info64->lo_inode = info->lo_inode;
935 info64->lo_rdevice = info->lo_rdevice;
936 info64->lo_offset = info->lo_offset;
937 info64->lo_encrypt_type = info->lo_encrypt_type;
938 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
939 info64->lo_flags = info->lo_flags;
940 info64->lo_init[0] = info->lo_init[0];
941 info64->lo_init[1] = info->lo_init[1];
942 memcpy(info64->lo_name, info->lo_name, LO_NAME_SIZE);
943 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
947 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
949 info->lo_number = info64->lo_number;
950 info->lo_device = info64->lo_device;
951 info->lo_inode = info64->lo_inode;
952 info->lo_rdevice = info64->lo_rdevice;
953 info->lo_offset = info64->lo_offset;
954 info->lo_encrypt_type = info64->lo_encrypt_type;
955 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
956 info->lo_flags = info64->lo_flags;
957 info->lo_init[0] = info64->lo_init[0];
958 info->lo_init[1] = info64->lo_init[1];
959 memcpy(info->lo_name, info64->lo_name, LO_NAME_SIZE);
960 memcpy(info->lo_encrypt_key,info64->lo_encrypt_key,LO_KEY_SIZE);
962 /* error in case values were truncated */
963 if (info->lo_device != info64->lo_device ||
964 info->lo_rdevice != info64->lo_rdevice ||
965 info->lo_inode != info64->lo_inode ||
966 info->lo_offset != info64->lo_offset)
973 loop_set_status_old(struct loop_device *lo, const struct loop_info *arg)
975 struct loop_info info;
976 struct loop_info64 info64;
978 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
980 loop_info64_from_old(&info, &info64);
981 return loop_set_status(lo, &info64);
985 loop_set_status64(struct loop_device *lo, const struct loop_info64 *arg)
987 struct loop_info64 info64;
989 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
991 return loop_set_status(lo, &info64);
995 loop_get_status_old(struct loop_device *lo, struct loop_info *arg) {
996 struct loop_info info;
997 struct loop_info64 info64;
1003 err = loop_get_status(lo, &info64);
1005 err = loop_info64_to_old(&info64, &info);
1006 if (!err && copy_to_user(arg, &info, sizeof(info)))
1013 loop_get_status64(struct loop_device *lo, struct loop_info64 *arg) {
1014 struct loop_info64 info64;
1020 err = loop_get_status(lo, &info64);
1021 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1027 static int lo_ioctl(struct inode * inode, struct file * file,
1028 unsigned int cmd, unsigned long arg)
1030 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1033 down(&lo->lo_ctl_mutex);
1036 err = loop_set_fd(lo, file, inode->i_bdev, arg);
1039 err = loop_clr_fd(lo, inode->i_bdev);
1041 case LOOP_SET_STATUS:
1042 err = loop_set_status_old(lo, (struct loop_info *) arg);
1044 case LOOP_GET_STATUS:
1045 err = loop_get_status_old(lo, (struct loop_info *) arg);
1047 case LOOP_SET_STATUS64:
1048 err = loop_set_status64(lo, (struct loop_info64 *) arg);
1050 case LOOP_GET_STATUS64:
1051 err = loop_get_status64(lo, (struct loop_info64 *) arg);
1054 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1056 up(&lo->lo_ctl_mutex);
1060 static int lo_open(struct inode *inode, struct file *file)
1062 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1065 down(&lo->lo_ctl_mutex);
1067 type = lo->lo_encrypt_type;
1068 if (type && xfer_funcs[type] && xfer_funcs[type]->lock)
1069 xfer_funcs[type]->lock(lo);
1071 up(&lo->lo_ctl_mutex);
1075 static int lo_release(struct inode *inode, struct file *file)
1077 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1080 down(&lo->lo_ctl_mutex);
1081 type = lo->lo_encrypt_type;
1083 if (xfer_funcs[type] && xfer_funcs[type]->unlock)
1084 xfer_funcs[type]->unlock(lo);
1086 up(&lo->lo_ctl_mutex);
1090 static struct block_device_operations lo_fops = {
1091 .owner = THIS_MODULE,
1093 .release = lo_release,
1098 * And now the modules code and kernel interface.
1100 MODULE_PARM(max_loop, "i");
1101 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
1102 MODULE_LICENSE("GPL");
1104 int loop_register_transfer(struct loop_func_table *funcs)
1106 if ((unsigned)funcs->number > MAX_LO_CRYPT || xfer_funcs[funcs->number])
1108 xfer_funcs[funcs->number] = funcs;
1112 int loop_unregister_transfer(int number)
1114 struct loop_device *lo;
1116 if ((unsigned)number >= MAX_LO_CRYPT)
1118 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
1119 int type = lo->lo_encrypt_type;
1120 if (type == number) {
1121 xfer_funcs[type]->release(lo);
1122 lo->transfer = NULL;
1123 lo->lo_encrypt_type = 0;
1126 xfer_funcs[number] = NULL;
1130 EXPORT_SYMBOL(loop_register_transfer);
1131 EXPORT_SYMBOL(loop_unregister_transfer);
1133 int __init loop_init(void)
1137 if ((max_loop < 1) || (max_loop > 256)) {
1138 printk(KERN_WARNING "loop: invalid max_loop (must be between"
1139 " 1 and 256), using default (8)\n");
1143 if (register_blkdev(LOOP_MAJOR, "loop"))
1146 devfs_mk_dir("loop");
1148 loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1152 disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL);
1156 for (i = 0; i < max_loop; i++) {
1157 disks[i] = alloc_disk(1);
1162 for (i = 0; i < max_loop; i++) {
1163 struct loop_device *lo = &loop_dev[i];
1164 struct gendisk *disk = disks[i];
1165 memset(lo, 0, sizeof(*lo));
1166 init_MUTEX(&lo->lo_ctl_mutex);
1167 init_MUTEX_LOCKED(&lo->lo_sem);
1168 init_MUTEX_LOCKED(&lo->lo_bh_mutex);
1170 spin_lock_init(&lo->lo_lock);
1171 disk->major = LOOP_MAJOR;
1172 disk->first_minor = i;
1173 disk->fops = &lo_fops;
1174 sprintf(disk->disk_name, "loop%d", i);
1175 sprintf(disk->devfs_name, "loop/%d", i);
1176 disk->private_data = lo;
1177 disk->queue = &lo->lo_queue;
1180 printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1189 printk(KERN_ERR "loop: ran out of memory\n");
1193 void loop_exit(void)
1196 for (i = 0; i < max_loop; i++) {
1197 del_gendisk(disks[i]);
1200 devfs_remove("loop");
1201 if (unregister_blkdev(LOOP_MAJOR, "loop"))
1202 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1208 module_init(loop_init);
1209 module_exit(loop_exit);
1212 static int __init max_loop_setup(char *str)
1214 max_loop = simple_strtol(str, NULL, 0);
1218 __setup("max_loop=", max_loop_setup);