block: rename barrier/ordered to flush
[linux-flexiantxendom0-natty.git] / block / blk-flush.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/gfp.h>
9
10 #include "blk.h"
11
12 static struct request *queue_next_fseq(struct request_queue *q);
13
14 unsigned blk_flush_cur_seq(struct request_queue *q)
15 {
16         if (!q->flush_seq)
17                 return 0;
18         return 1 << ffz(q->flush_seq);
19 }
20
21 static struct request *blk_flush_complete_seq(struct request_queue *q,
22                                               unsigned seq, int error)
23 {
24         struct request *next_rq = NULL;
25
26         if (error && !q->flush_err)
27                 q->flush_err = error;
28
29         BUG_ON(q->flush_seq & seq);
30         q->flush_seq |= seq;
31
32         if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
33                 /* not complete yet, queue the next flush sequence */
34                 next_rq = queue_next_fseq(q);
35         } else {
36                 /* complete this flush request */
37                 __blk_end_request_all(q->orig_flush_rq, q->flush_err);
38                 q->orig_flush_rq = NULL;
39                 q->flush_seq = 0;
40
41                 /* dispatch the next flush if there's one */
42                 if (!list_empty(&q->pending_flushes)) {
43                         next_rq = list_entry_rq(q->pending_flushes.next);
44                         list_move(&next_rq->queuelist, &q->queue_head);
45                 }
46         }
47         return next_rq;
48 }
49
50 static void pre_flush_end_io(struct request *rq, int error)
51 {
52         elv_completed_request(rq->q, rq);
53         blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error);
54 }
55
56 static void flush_data_end_io(struct request *rq, int error)
57 {
58         elv_completed_request(rq->q, rq);
59         blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error);
60 }
61
62 static void post_flush_end_io(struct request *rq, int error)
63 {
64         elv_completed_request(rq->q, rq);
65         blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
66 }
67
68 static void queue_flush(struct request_queue *q, struct request *rq,
69                         rq_end_io_fn *end_io)
70 {
71         blk_rq_init(q, rq);
72         rq->cmd_type = REQ_TYPE_FS;
73         rq->cmd_flags = REQ_FLUSH;
74         rq->rq_disk = q->orig_flush_rq->rq_disk;
75         rq->end_io = end_io;
76
77         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
78 }
79
80 static struct request *queue_next_fseq(struct request_queue *q)
81 {
82         struct request *rq = &q->flush_rq;
83
84         switch (blk_flush_cur_seq(q)) {
85         case QUEUE_FSEQ_PREFLUSH:
86                 queue_flush(q, rq, pre_flush_end_io);
87                 break;
88
89         case QUEUE_FSEQ_DATA:
90                 /* initialize proxy request and queue it */
91                 blk_rq_init(q, rq);
92                 init_request_from_bio(rq, q->orig_flush_rq->bio);
93                 rq->cmd_flags &= ~REQ_HARDBARRIER;
94                 if (q->ordered & QUEUE_ORDERED_DO_FUA)
95                         rq->cmd_flags |= REQ_FUA;
96                 rq->end_io = flush_data_end_io;
97
98                 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
99                 break;
100
101         case QUEUE_FSEQ_POSTFLUSH:
102                 queue_flush(q, rq, post_flush_end_io);
103                 break;
104
105         default:
106                 BUG();
107         }
108         return rq;
109 }
110
111 struct request *blk_do_flush(struct request_queue *q, struct request *rq)
112 {
113         unsigned skip = 0;
114
115         if (!(rq->cmd_flags & REQ_HARDBARRIER))
116                 return rq;
117
118         if (q->flush_seq) {
119                 /*
120                  * Sequenced flush is already in progress and they
121                  * can't be processed in parallel.  Queue for later
122                  * processing.
123                  */
124                 list_move_tail(&rq->queuelist, &q->pending_flushes);
125                 return NULL;
126         }
127
128         if (unlikely(q->next_ordered == QUEUE_ORDERED_NONE)) {
129                 /*
130                  * Queue ordering not supported.  Terminate
131                  * with prejudice.
132                  */
133                 blk_dequeue_request(rq);
134                 __blk_end_request_all(rq, -EOPNOTSUPP);
135                 return NULL;
136         }
137
138         /*
139          * Start a new flush sequence
140          */
141         q->flush_err = 0;
142         q->ordered = q->next_ordered;
143         q->flush_seq |= QUEUE_FSEQ_STARTED;
144
145         /*
146          * For an empty barrier, there's no actual BAR request, which
147          * in turn makes POSTFLUSH unnecessary.  Mask them off.
148          */
149         if (!blk_rq_sectors(rq))
150                 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
151                                 QUEUE_ORDERED_DO_POSTFLUSH);
152
153         /* stash away the original request */
154         blk_dequeue_request(rq);
155         q->orig_flush_rq = rq;
156
157         if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
158                 skip |= QUEUE_FSEQ_PREFLUSH;
159
160         if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
161                 skip |= QUEUE_FSEQ_DATA;
162
163         if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
164                 skip |= QUEUE_FSEQ_POSTFLUSH;
165
166         /* complete skipped sequences and return the first sequence */
167         return blk_flush_complete_seq(q, skip, 0);
168 }
169
170 static void bio_end_empty_barrier(struct bio *bio, int err)
171 {
172         if (err) {
173                 if (err == -EOPNOTSUPP)
174                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
175                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
176         }
177         if (bio->bi_private)
178                 complete(bio->bi_private);
179         bio_put(bio);
180 }
181
182 /**
183  * blkdev_issue_flush - queue a flush
184  * @bdev:       blockdev to issue flush for
185  * @gfp_mask:   memory allocation flags (for bio_alloc)
186  * @error_sector:       error sector
187  * @flags:      BLKDEV_IFL_* flags to control behaviour
188  *
189  * Description:
190  *    Issue a flush for the block device in question. Caller can supply
191  *    room for storing the error offset in case of a flush error, if they
192  *    wish to. If WAIT flag is not passed then caller may check only what
193  *    request was pushed in some internal queue for later handling.
194  */
195 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
196                 sector_t *error_sector, unsigned long flags)
197 {
198         DECLARE_COMPLETION_ONSTACK(wait);
199         struct request_queue *q;
200         struct bio *bio;
201         int ret = 0;
202
203         if (bdev->bd_disk == NULL)
204                 return -ENXIO;
205
206         q = bdev_get_queue(bdev);
207         if (!q)
208                 return -ENXIO;
209
210         /*
211          * some block devices may not have their queue correctly set up here
212          * (e.g. loop device without a backing file) and so issuing a flush
213          * here will panic. Ensure there is a request function before issuing
214          * the barrier.
215          */
216         if (!q->make_request_fn)
217                 return -ENXIO;
218
219         bio = bio_alloc(gfp_mask, 0);
220         bio->bi_end_io = bio_end_empty_barrier;
221         bio->bi_bdev = bdev;
222         if (test_bit(BLKDEV_WAIT, &flags))
223                 bio->bi_private = &wait;
224
225         bio_get(bio);
226         submit_bio(WRITE_BARRIER, bio);
227         if (test_bit(BLKDEV_WAIT, &flags)) {
228                 wait_for_completion(&wait);
229                 /*
230                  * The driver must store the error location in ->bi_sector, if
231                  * it supports it. For non-stacked drivers, this should be
232                  * copied from blk_rq_pos(rq).
233                  */
234                 if (error_sector)
235                         *error_sector = bio->bi_sector;
236         }
237
238         if (bio_flagged(bio, BIO_EOPNOTSUPP))
239                 ret = -EOPNOTSUPP;
240         else if (!bio_flagged(bio, BIO_UPTODATE))
241                 ret = -EIO;
242
243         bio_put(bio);
244         return ret;
245 }
246 EXPORT_SYMBOL(blkdev_issue_flush);