block: fix problem with sending down discard that isn't of correct granularity
[linux-flexiantxendom0-natty.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static void blkdev_discard_end_io(struct bio *bio, int err)
13 {
14         if (err) {
15                 if (err == -EOPNOTSUPP)
16                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18         }
19
20         if (bio->bi_private)
21                 complete(bio->bi_private);
22
23         bio_put(bio);
24 }
25
26 /**
27  * blkdev_issue_discard - queue a discard
28  * @bdev:       blockdev to issue discard for
29  * @sector:     start sector
30  * @nr_sects:   number of sectors to discard
31  * @gfp_mask:   memory allocation flags (for bio_alloc)
32  * @flags:      BLKDEV_IFL_* flags to control behaviour
33  *
34  * Description:
35  *    Issue a discard request for the sectors in question.
36  */
37 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39 {
40         DECLARE_COMPLETION_ONSTACK(wait);
41         struct request_queue *q = bdev_get_queue(bdev);
42         int type = flags & BLKDEV_IFL_BARRIER ?
43                 DISCARD_BARRIER : DISCARD_NOBARRIER;
44         unsigned int max_discard_sectors;
45         struct bio *bio;
46         int ret = 0;
47
48         if (!q)
49                 return -ENXIO;
50
51         if (!blk_queue_discard(q))
52                 return -EOPNOTSUPP;
53
54         /*
55          * Ensure that max_discard_sectors is of the proper
56          * granularity
57          */
58         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59         if (q->limits.discard_granularity) {
60                 unsigned int disc_sects = q->limits.discard_granularity >> 9;
61
62                 max_discard_sectors &= ~(disc_sects - 1);
63         }
64
65         while (nr_sects && !ret) {
66                 bio = bio_alloc(gfp_mask, 1);
67                 if (!bio) {
68                         ret = -ENOMEM;
69                         break;
70                 }
71
72                 bio->bi_sector = sector;
73                 bio->bi_end_io = blkdev_discard_end_io;
74                 bio->bi_bdev = bdev;
75                 if (flags & BLKDEV_IFL_WAIT)
76                         bio->bi_private = &wait;
77
78                 if (nr_sects > max_discard_sectors) {
79                         bio->bi_size = max_discard_sectors << 9;
80                         nr_sects -= max_discard_sectors;
81                         sector += max_discard_sectors;
82                 } else {
83                         bio->bi_size = nr_sects << 9;
84                         nr_sects = 0;
85                 }
86
87                 bio_get(bio);
88                 submit_bio(type, bio);
89
90                 if (flags & BLKDEV_IFL_WAIT)
91                         wait_for_completion(&wait);
92
93                 if (bio_flagged(bio, BIO_EOPNOTSUPP))
94                         ret = -EOPNOTSUPP;
95                 else if (!bio_flagged(bio, BIO_UPTODATE))
96                         ret = -EIO;
97                 bio_put(bio);
98         }
99
100         return ret;
101 }
102 EXPORT_SYMBOL(blkdev_issue_discard);
103
104 struct bio_batch
105 {
106         atomic_t                done;
107         unsigned long           flags;
108         struct completion       *wait;
109         bio_end_io_t            *end_io;
110 };
111
112 static void bio_batch_end_io(struct bio *bio, int err)
113 {
114         struct bio_batch *bb = bio->bi_private;
115
116         if (err) {
117                 if (err == -EOPNOTSUPP)
118                         set_bit(BIO_EOPNOTSUPP, &bb->flags);
119                 else
120                         clear_bit(BIO_UPTODATE, &bb->flags);
121         }
122         if (bb) {
123                 if (bb->end_io)
124                         bb->end_io(bio, err);
125                 atomic_inc(&bb->done);
126                 complete(bb->wait);
127         }
128         bio_put(bio);
129 }
130
131 /**
132  * blkdev_issue_zeroout generate number of zero filed write bios
133  * @bdev:       blockdev to issue
134  * @sector:     start sector
135  * @nr_sects:   number of sectors to write
136  * @gfp_mask:   memory allocation flags (for bio_alloc)
137  * @flags:      BLKDEV_IFL_* flags to control behaviour
138  *
139  * Description:
140  *  Generate and issue number of bios with zerofiled pages.
141  *  Send barrier at the beginning and at the end if requested. This guarantie
142  *  correct request ordering. Empty barrier allow us to avoid post queue flush.
143  */
144
145 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
146                         sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
147 {
148         int ret = 0;
149         struct bio *bio;
150         struct bio_batch bb;
151         unsigned int sz, issued = 0;
152         DECLARE_COMPLETION_ONSTACK(wait);
153
154         atomic_set(&bb.done, 0);
155         bb.flags = 1 << BIO_UPTODATE;
156         bb.wait = &wait;
157         bb.end_io = NULL;
158
159         if (flags & BLKDEV_IFL_BARRIER) {
160                 /* issue async barrier before the data */
161                 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
162                 if (ret)
163                         return ret;
164         }
165 submit:
166         while (nr_sects != 0) {
167                 bio = bio_alloc(gfp_mask,
168                                 min(nr_sects, (sector_t)BIO_MAX_PAGES));
169                 if (!bio)
170                         break;
171
172                 bio->bi_sector = sector;
173                 bio->bi_bdev   = bdev;
174                 bio->bi_end_io = bio_batch_end_io;
175                 if (flags & BLKDEV_IFL_WAIT)
176                         bio->bi_private = &bb;
177
178                 while (nr_sects != 0) {
179                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
180                         if (sz == 0)
181                                 /* bio has maximum size possible */
182                                 break;
183                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
184                         nr_sects -= ret >> 9;
185                         sector += ret >> 9;
186                         if (ret < (sz << 9))
187                                 break;
188                 }
189                 issued++;
190                 submit_bio(WRITE, bio);
191         }
192         /*
193          * When all data bios are in flight. Send final barrier if requeted.
194          */
195         if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
196                 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
197                                         flags & BLKDEV_IFL_WAIT);
198
199
200         if (flags & BLKDEV_IFL_WAIT)
201                 /* Wait for bios in-flight */
202                 while ( issued != atomic_read(&bb.done))
203                         wait_for_completion(&wait);
204
205         if (!test_bit(BIO_UPTODATE, &bb.flags))
206                 /* One of bios in the batch was completed with error.*/
207                 ret = -EIO;
208
209         if (ret)
210                 goto out;
211
212         if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
213                 ret = -EOPNOTSUPP;
214                 goto out;
215         }
216         if (nr_sects != 0)
217                 goto submit;
218 out:
219         return ret;
220 }
221 EXPORT_SYMBOL(blkdev_issue_zeroout);