2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
12 static struct request *queue_next_fseq(struct request_queue *q);
14 unsigned blk_flush_cur_seq(struct request_queue *q)
18 return 1 << ffz(q->flush_seq);
21 static struct request *blk_flush_complete_seq(struct request_queue *q,
22 unsigned seq, int error)
24 struct request *next_rq = NULL;
26 if (error && !q->flush_err)
29 BUG_ON(q->flush_seq & seq);
32 if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
33 /* not complete yet, queue the next flush sequence */
34 next_rq = queue_next_fseq(q);
36 /* complete this flush request */
37 __blk_end_request_all(q->orig_flush_rq, q->flush_err);
38 q->orig_flush_rq = NULL;
41 /* dispatch the next flush if there's one */
42 if (!list_empty(&q->pending_flushes)) {
43 next_rq = list_entry_rq(q->pending_flushes.next);
44 list_move(&next_rq->queuelist, &q->queue_head);
50 static void pre_flush_end_io(struct request *rq, int error)
52 elv_completed_request(rq->q, rq);
53 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error);
56 static void flush_data_end_io(struct request *rq, int error)
58 elv_completed_request(rq->q, rq);
59 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error);
62 static void post_flush_end_io(struct request *rq, int error)
64 elv_completed_request(rq->q, rq);
65 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
68 static void queue_flush(struct request_queue *q, struct request *rq,
72 rq->cmd_type = REQ_TYPE_FS;
73 rq->cmd_flags = REQ_FLUSH;
74 rq->rq_disk = q->orig_flush_rq->rq_disk;
77 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
80 static struct request *queue_next_fseq(struct request_queue *q)
82 struct request *rq = &q->flush_rq;
84 switch (blk_flush_cur_seq(q)) {
85 case QUEUE_FSEQ_PREFLUSH:
86 queue_flush(q, rq, pre_flush_end_io);
90 /* initialize proxy request and queue it */
92 init_request_from_bio(rq, q->orig_flush_rq->bio);
93 rq->cmd_flags &= ~REQ_HARDBARRIER;
94 if (q->ordered & QUEUE_ORDERED_DO_FUA)
95 rq->cmd_flags |= REQ_FUA;
96 rq->end_io = flush_data_end_io;
98 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
101 case QUEUE_FSEQ_POSTFLUSH:
102 queue_flush(q, rq, post_flush_end_io);
111 struct request *blk_do_flush(struct request_queue *q, struct request *rq)
115 if (!(rq->cmd_flags & REQ_HARDBARRIER))
120 * Sequenced flush is already in progress and they
121 * can't be processed in parallel. Queue for later
124 list_move_tail(&rq->queuelist, &q->pending_flushes);
128 if (unlikely(q->next_ordered == QUEUE_ORDERED_NONE)) {
130 * Queue ordering not supported. Terminate
133 blk_dequeue_request(rq);
134 __blk_end_request_all(rq, -EOPNOTSUPP);
139 * Start a new flush sequence
142 q->ordered = q->next_ordered;
143 q->flush_seq |= QUEUE_FSEQ_STARTED;
146 * For an empty barrier, there's no actual BAR request, which
147 * in turn makes POSTFLUSH unnecessary. Mask them off.
149 if (!blk_rq_sectors(rq))
150 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
151 QUEUE_ORDERED_DO_POSTFLUSH);
153 /* stash away the original request */
154 blk_dequeue_request(rq);
155 q->orig_flush_rq = rq;
157 if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
158 skip |= QUEUE_FSEQ_PREFLUSH;
160 if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
161 skip |= QUEUE_FSEQ_DATA;
163 if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
164 skip |= QUEUE_FSEQ_POSTFLUSH;
166 /* complete skipped sequences and return the first sequence */
167 return blk_flush_complete_seq(q, skip, 0);
170 static void bio_end_empty_barrier(struct bio *bio, int err)
173 if (err == -EOPNOTSUPP)
174 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
175 clear_bit(BIO_UPTODATE, &bio->bi_flags);
178 complete(bio->bi_private);
183 * blkdev_issue_flush - queue a flush
184 * @bdev: blockdev to issue flush for
185 * @gfp_mask: memory allocation flags (for bio_alloc)
186 * @error_sector: error sector
187 * @flags: BLKDEV_IFL_* flags to control behaviour
190 * Issue a flush for the block device in question. Caller can supply
191 * room for storing the error offset in case of a flush error, if they
192 * wish to. If WAIT flag is not passed then caller may check only what
193 * request was pushed in some internal queue for later handling.
195 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
196 sector_t *error_sector, unsigned long flags)
198 DECLARE_COMPLETION_ONSTACK(wait);
199 struct request_queue *q;
203 if (bdev->bd_disk == NULL)
206 q = bdev_get_queue(bdev);
211 * some block devices may not have their queue correctly set up here
212 * (e.g. loop device without a backing file) and so issuing a flush
213 * here will panic. Ensure there is a request function before issuing
216 if (!q->make_request_fn)
219 bio = bio_alloc(gfp_mask, 0);
220 bio->bi_end_io = bio_end_empty_barrier;
222 if (test_bit(BLKDEV_WAIT, &flags))
223 bio->bi_private = &wait;
226 submit_bio(WRITE_BARRIER, bio);
227 if (test_bit(BLKDEV_WAIT, &flags)) {
228 wait_for_completion(&wait);
230 * The driver must store the error location in ->bi_sector, if
231 * it supports it. For non-stacked drivers, this should be
232 * copied from blk_rq_pos(rq).
235 *error_sector = bio->bi_sector;
238 if (bio_flagged(bio, BIO_EOPNOTSUPP))
240 else if (!bio_flagged(bio, BIO_UPTODATE))
246 EXPORT_SYMBOL(blkdev_issue_flush);