MD: Add del_timer_sync to mddev_suspend (fix nasty panic)
[linux-flexiantxendom0-3.2.10.git] / fs / nfs / blocklayout / blocklayout.c
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>          /* struct bio */
38 #include <linux/buffer_head.h>  /* various write calls */
39 #include <linux/prefetch.h>
40
41 #include "blocklayout.h"
42
43 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
44
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
48
49 static void print_page(struct page *page)
50 {
51         dprintk("PRINTPAGE page %p\n", page);
52         dprintk("       PagePrivate %d\n", PagePrivate(page));
53         dprintk("       PageUptodate %d\n", PageUptodate(page));
54         dprintk("       PageError %d\n", PageError(page));
55         dprintk("       PageDirty %d\n", PageDirty(page));
56         dprintk("       PageReferenced %d\n", PageReferenced(page));
57         dprintk("       PageLocked %d\n", PageLocked(page));
58         dprintk("       PageWriteback %d\n", PageWriteback(page));
59         dprintk("       PageMappedToDisk %d\n", PageMappedToDisk(page));
60         dprintk("\n");
61 }
62
63 /* Given the be associated with isect, determine if page data needs to be
64  * initialized.
65  */
66 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
67 {
68         if (be->be_state == PNFS_BLOCK_NONE_DATA)
69                 return 1;
70         else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
71                 return 0;
72         else
73                 return !bl_is_sector_init(be->be_inval, isect);
74 }
75
76 /* Given the be associated with isect, determine if page data can be
77  * written to disk.
78  */
79 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
80 {
81         return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
82                 be->be_state == PNFS_BLOCK_INVALID_DATA);
83 }
84
85 /* The data we are handed might be spread across several bios.  We need
86  * to track when the last one is finished.
87  */
88 struct parallel_io {
89         struct kref refcnt;
90         void (*pnfs_callback) (void *data, int num_se);
91         void *data;
92         int bse_count;
93 };
94
95 static inline struct parallel_io *alloc_parallel(void *data)
96 {
97         struct parallel_io *rv;
98
99         rv  = kmalloc(sizeof(*rv), GFP_NOFS);
100         if (rv) {
101                 rv->data = data;
102                 kref_init(&rv->refcnt);
103                 rv->bse_count = 0;
104         }
105         return rv;
106 }
107
108 static inline void get_parallel(struct parallel_io *p)
109 {
110         kref_get(&p->refcnt);
111 }
112
113 static void destroy_parallel(struct kref *kref)
114 {
115         struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
116
117         dprintk("%s enter\n", __func__);
118         p->pnfs_callback(p->data, p->bse_count);
119         kfree(p);
120 }
121
122 static inline void put_parallel(struct parallel_io *p)
123 {
124         kref_put(&p->refcnt, destroy_parallel);
125 }
126
127 static struct bio *
128 bl_submit_bio(int rw, struct bio *bio)
129 {
130         if (bio) {
131                 get_parallel(bio->bi_private);
132                 dprintk("%s submitting %s bio %u@%llu\n", __func__,
133                         rw == READ ? "read" : "write",
134                         bio->bi_size, (unsigned long long)bio->bi_sector);
135                 submit_bio(rw, bio);
136         }
137         return NULL;
138 }
139
140 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
141                                      struct pnfs_block_extent *be,
142                                      void (*end_io)(struct bio *, int err),
143                                      struct parallel_io *par)
144 {
145         struct bio *bio;
146
147         npg = min(npg, BIO_MAX_PAGES);
148         bio = bio_alloc(GFP_NOIO, npg);
149         if (!bio && (current->flags & PF_MEMALLOC)) {
150                 while (!bio && (npg /= 2))
151                         bio = bio_alloc(GFP_NOIO, npg);
152         }
153
154         if (bio) {
155                 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
156                 bio->bi_bdev = be->be_mdev;
157                 bio->bi_end_io = end_io;
158                 bio->bi_private = par;
159         }
160         return bio;
161 }
162
163 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
164                                       sector_t isect, struct page *page,
165                                       struct pnfs_block_extent *be,
166                                       void (*end_io)(struct bio *, int err),
167                                       struct parallel_io *par)
168 {
169 retry:
170         if (!bio) {
171                 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
172                 if (!bio)
173                         return ERR_PTR(-ENOMEM);
174         }
175         if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
176                 bio = bl_submit_bio(rw, bio);
177                 goto retry;
178         }
179         return bio;
180 }
181
182 /* This is basically copied from mpage_end_io_read */
183 static void bl_end_io_read(struct bio *bio, int err)
184 {
185         struct parallel_io *par = bio->bi_private;
186         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
187         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
188         struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
189
190         do {
191                 struct page *page = bvec->bv_page;
192
193                 if (--bvec >= bio->bi_io_vec)
194                         prefetchw(&bvec->bv_page->flags);
195                 if (uptodate)
196                         SetPageUptodate(page);
197         } while (bvec >= bio->bi_io_vec);
198         if (!uptodate) {
199                 if (!rdata->pnfs_error)
200                         rdata->pnfs_error = -EIO;
201                 pnfs_set_lo_fail(rdata->lseg);
202         }
203         bio_put(bio);
204         put_parallel(par);
205 }
206
207 static void bl_read_cleanup(struct work_struct *work)
208 {
209         struct rpc_task *task;
210         struct nfs_read_data *rdata;
211         dprintk("%s enter\n", __func__);
212         task = container_of(work, struct rpc_task, u.tk_work);
213         rdata = container_of(task, struct nfs_read_data, task);
214         pnfs_ld_read_done(rdata);
215 }
216
217 static void
218 bl_end_par_io_read(void *data, int unused)
219 {
220         struct nfs_read_data *rdata = data;
221
222         rdata->task.tk_status = rdata->pnfs_error;
223         INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
224         schedule_work(&rdata->task.u.tk_work);
225 }
226
227 static enum pnfs_try_status
228 bl_read_pagelist(struct nfs_read_data *rdata)
229 {
230         int i, hole;
231         struct bio *bio = NULL;
232         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
233         sector_t isect, extent_length = 0;
234         struct parallel_io *par;
235         loff_t f_offset = rdata->args.offset;
236         struct page **pages = rdata->args.pages;
237         int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
238
239         dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
240                rdata->npages, f_offset, (unsigned int)rdata->args.count);
241
242         par = alloc_parallel(rdata);
243         if (!par)
244                 goto use_mds;
245         par->pnfs_callback = bl_end_par_io_read;
246         /* At this point, we can no longer jump to use_mds */
247
248         isect = (sector_t) (f_offset >> SECTOR_SHIFT);
249         /* Code assumes extents are page-aligned */
250         for (i = pg_index; i < rdata->npages; i++) {
251                 if (!extent_length) {
252                         /* We've used up the previous extent */
253                         bl_put_extent(be);
254                         bl_put_extent(cow_read);
255                         bio = bl_submit_bio(READ, bio);
256                         /* Get the next one */
257                         be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
258                                              isect, &cow_read);
259                         if (!be) {
260                                 rdata->pnfs_error = -EIO;
261                                 goto out;
262                         }
263                         extent_length = be->be_length -
264                                 (isect - be->be_f_offset);
265                         if (cow_read) {
266                                 sector_t cow_length = cow_read->be_length -
267                                         (isect - cow_read->be_f_offset);
268                                 extent_length = min(extent_length, cow_length);
269                         }
270                 }
271                 hole = is_hole(be, isect);
272                 if (hole && !cow_read) {
273                         bio = bl_submit_bio(READ, bio);
274                         /* Fill hole w/ zeroes w/o accessing device */
275                         dprintk("%s Zeroing page for hole\n", __func__);
276                         zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
277                         print_page(pages[i]);
278                         SetPageUptodate(pages[i]);
279                 } else {
280                         struct pnfs_block_extent *be_read;
281
282                         be_read = (hole && cow_read) ? cow_read : be;
283                         bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
284                                                  isect, pages[i], be_read,
285                                                  bl_end_io_read, par);
286                         if (IS_ERR(bio)) {
287                                 rdata->pnfs_error = PTR_ERR(bio);
288                                 bio = NULL;
289                                 goto out;
290                         }
291                 }
292                 isect += PAGE_CACHE_SECTORS;
293                 extent_length -= PAGE_CACHE_SECTORS;
294         }
295         if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
296                 rdata->res.eof = 1;
297                 rdata->res.count = rdata->inode->i_size - f_offset;
298         } else {
299                 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
300         }
301 out:
302         bl_put_extent(be);
303         bl_put_extent(cow_read);
304         bl_submit_bio(READ, bio);
305         put_parallel(par);
306         return PNFS_ATTEMPTED;
307
308  use_mds:
309         dprintk("Giving up and using normal NFS\n");
310         return PNFS_NOT_ATTEMPTED;
311 }
312
313 static void mark_extents_written(struct pnfs_block_layout *bl,
314                                  __u64 offset, __u32 count)
315 {
316         sector_t isect, end;
317         struct pnfs_block_extent *be;
318         struct pnfs_block_short_extent *se;
319
320         dprintk("%s(%llu, %u)\n", __func__, offset, count);
321         if (count == 0)
322                 return;
323         isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
324         end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
325         end >>= SECTOR_SHIFT;
326         while (isect < end) {
327                 sector_t len;
328                 be = bl_find_get_extent(bl, isect, NULL);
329                 BUG_ON(!be); /* FIXME */
330                 len = min(end, be->be_f_offset + be->be_length) - isect;
331                 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
332                         se = bl_pop_one_short_extent(be->be_inval);
333                         BUG_ON(!se);
334                         bl_mark_for_commit(be, isect, len, se);
335                 }
336                 isect += len;
337                 bl_put_extent(be);
338         }
339 }
340
341 static void bl_end_io_write_zero(struct bio *bio, int err)
342 {
343         struct parallel_io *par = bio->bi_private;
344         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
345         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
346         struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
347
348         do {
349                 struct page *page = bvec->bv_page;
350
351                 if (--bvec >= bio->bi_io_vec)
352                         prefetchw(&bvec->bv_page->flags);
353                 /* This is the zeroing page we added */
354                 end_page_writeback(page);
355                 page_cache_release(page);
356         } while (bvec >= bio->bi_io_vec);
357
358         if (unlikely(!uptodate)) {
359                 if (!wdata->pnfs_error)
360                         wdata->pnfs_error = -EIO;
361                 pnfs_set_lo_fail(wdata->lseg);
362         }
363         bio_put(bio);
364         put_parallel(par);
365 }
366
367 static void bl_end_io_write(struct bio *bio, int err)
368 {
369         struct parallel_io *par = bio->bi_private;
370         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
371         struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
372
373         if (!uptodate) {
374                 if (!wdata->pnfs_error)
375                         wdata->pnfs_error = -EIO;
376                 pnfs_set_lo_fail(wdata->lseg);
377         }
378         bio_put(bio);
379         put_parallel(par);
380 }
381
382 /* Function scheduled for call during bl_end_par_io_write,
383  * it marks sectors as written and extends the commitlist.
384  */
385 static void bl_write_cleanup(struct work_struct *work)
386 {
387         struct rpc_task *task;
388         struct nfs_write_data *wdata;
389         dprintk("%s enter\n", __func__);
390         task = container_of(work, struct rpc_task, u.tk_work);
391         wdata = container_of(task, struct nfs_write_data, task);
392         if (likely(!wdata->pnfs_error)) {
393                 /* Marks for LAYOUTCOMMIT */
394                 mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
395                                      wdata->args.offset, wdata->args.count);
396         }
397         pnfs_ld_write_done(wdata);
398 }
399
400 /* Called when last of bios associated with a bl_write_pagelist call finishes */
401 static void bl_end_par_io_write(void *data, int num_se)
402 {
403         struct nfs_write_data *wdata = data;
404
405         if (unlikely(wdata->pnfs_error)) {
406                 bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
407                                         num_se);
408         }
409
410         wdata->task.tk_status = wdata->pnfs_error;
411         wdata->verf.committed = NFS_FILE_SYNC;
412         INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
413         schedule_work(&wdata->task.u.tk_work);
414 }
415
416 /* FIXME STUB - mark intersection of layout and page as bad, so is not
417  * used again.
418  */
419 static void mark_bad_read(void)
420 {
421         return;
422 }
423
424 /*
425  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
426  * block_device
427  */
428 static void
429 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
430 {
431         dprintk("%s enter be=%p\n", __func__, be);
432
433         set_buffer_mapped(bh);
434         bh->b_bdev = be->be_mdev;
435         bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
436             (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
437
438         dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
439                 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
440                 bh->b_size);
441         return;
442 }
443
444 /* Given an unmapped page, zero it or read in page for COW, page is locked
445  * by caller.
446  */
447 static int
448 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
449 {
450         struct buffer_head *bh = NULL;
451         int ret = 0;
452         sector_t isect;
453
454         dprintk("%s enter, %p\n", __func__, page);
455         BUG_ON(PageUptodate(page));
456         if (!cow_read) {
457                 zero_user_segment(page, 0, PAGE_SIZE);
458                 SetPageUptodate(page);
459                 goto cleanup;
460         }
461
462         bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
463         if (!bh) {
464                 ret = -ENOMEM;
465                 goto cleanup;
466         }
467
468         isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
469         map_block(bh, isect, cow_read);
470         if (!bh_uptodate_or_lock(bh))
471                 ret = bh_submit_read(bh);
472         if (ret)
473                 goto cleanup;
474         SetPageUptodate(page);
475
476 cleanup:
477         bl_put_extent(cow_read);
478         if (bh)
479                 free_buffer_head(bh);
480         if (ret) {
481                 /* Need to mark layout with bad read...should now
482                  * just use nfs4 for reads and writes.
483                  */
484                 mark_bad_read();
485         }
486         return ret;
487 }
488
489 /* Find or create a zeroing page marked being writeback.
490  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
491  * to indicate write out.
492  */
493 static struct page *
494 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
495                         struct pnfs_block_extent *cow_read)
496 {
497         struct page *page;
498         int locked = 0;
499         page = find_get_page(inode->i_mapping, index);
500         if (page)
501                 goto check_page;
502
503         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
504         if (unlikely(!page)) {
505                 dprintk("%s oom\n", __func__);
506                 return ERR_PTR(-ENOMEM);
507         }
508         locked = 1;
509
510 check_page:
511         /* PageDirty: Other will write this out
512          * PageWriteback: Other is writing this out
513          * PageUptodate: It was read before
514          */
515         if (PageDirty(page) || PageWriteback(page)) {
516                 print_page(page);
517                 if (locked)
518                         unlock_page(page);
519                 page_cache_release(page);
520                 return NULL;
521         }
522
523         if (!locked) {
524                 lock_page(page);
525                 locked = 1;
526                 goto check_page;
527         }
528         if (!PageUptodate(page)) {
529                 /* New page, readin or zero it */
530                 init_page_for_write(page, cow_read);
531         }
532         set_page_writeback(page);
533         unlock_page(page);
534
535         return page;
536 }
537
538 static enum pnfs_try_status
539 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
540 {
541         int i, ret, npg_zero, pg_index, last = 0;
542         struct bio *bio = NULL;
543         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
544         sector_t isect, last_isect = 0, extent_length = 0;
545         struct parallel_io *par;
546         loff_t offset = wdata->args.offset;
547         size_t count = wdata->args.count;
548         struct page **pages = wdata->args.pages;
549         struct page *page;
550         pgoff_t index;
551         u64 temp;
552         int npg_per_block =
553             NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
554
555         dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
556         /* At this point, wdata->pages is a (sequential) list of nfs_pages.
557          * We want to write each, and if there is an error set pnfs_error
558          * to have it redone using nfs.
559          */
560         par = alloc_parallel(wdata);
561         if (!par)
562                 goto out_mds;
563         par->pnfs_callback = bl_end_par_io_write;
564         /* At this point, have to be more careful with error handling */
565
566         isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
567         be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
568         if (!be || !is_writable(be, isect)) {
569                 dprintk("%s no matching extents!\n", __func__);
570                 goto out_mds;
571         }
572
573         /* First page inside INVALID extent */
574         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
575                 if (likely(!bl_push_one_short_extent(be->be_inval)))
576                         par->bse_count++;
577                 else
578                         goto out_mds;
579                 temp = offset >> PAGE_CACHE_SHIFT;
580                 npg_zero = do_div(temp, npg_per_block);
581                 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
582                                      (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
583                 extent_length = be->be_length - (isect - be->be_f_offset);
584
585 fill_invalid_ext:
586                 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
587                 for (;npg_zero > 0; npg_zero--) {
588                         if (bl_is_sector_init(be->be_inval, isect)) {
589                                 dprintk("isect %llu already init\n",
590                                         (unsigned long long)isect);
591                                 goto next_page;
592                         }
593                         /* page ref released in bl_end_io_write_zero */
594                         index = isect >> PAGE_CACHE_SECTOR_SHIFT;
595                         dprintk("%s zero %dth page: index %lu isect %llu\n",
596                                 __func__, npg_zero, index,
597                                 (unsigned long long)isect);
598                         page = bl_find_get_zeroing_page(wdata->inode, index,
599                                                         cow_read);
600                         if (unlikely(IS_ERR(page))) {
601                                 wdata->pnfs_error = PTR_ERR(page);
602                                 goto out;
603                         } else if (page == NULL)
604                                 goto next_page;
605
606                         ret = bl_mark_sectors_init(be->be_inval, isect,
607                                                        PAGE_CACHE_SECTORS);
608                         if (unlikely(ret)) {
609                                 dprintk("%s bl_mark_sectors_init fail %d\n",
610                                         __func__, ret);
611                                 end_page_writeback(page);
612                                 page_cache_release(page);
613                                 wdata->pnfs_error = ret;
614                                 goto out;
615                         }
616                         if (likely(!bl_push_one_short_extent(be->be_inval)))
617                                 par->bse_count++;
618                         else {
619                                 end_page_writeback(page);
620                                 page_cache_release(page);
621                                 wdata->pnfs_error = -ENOMEM;
622                                 goto out;
623                         }
624                         /* FIXME: This should be done in bi_end_io */
625                         mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
626                                              page->index << PAGE_CACHE_SHIFT,
627                                              PAGE_CACHE_SIZE);
628
629                         bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
630                                                  isect, page, be,
631                                                  bl_end_io_write_zero, par);
632                         if (IS_ERR(bio)) {
633                                 wdata->pnfs_error = PTR_ERR(bio);
634                                 bio = NULL;
635                                 goto out;
636                         }
637 next_page:
638                         isect += PAGE_CACHE_SECTORS;
639                         extent_length -= PAGE_CACHE_SECTORS;
640                 }
641                 if (last)
642                         goto write_done;
643         }
644         bio = bl_submit_bio(WRITE, bio);
645
646         /* Middle pages */
647         pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
648         for (i = pg_index; i < wdata->npages; i++) {
649                 if (!extent_length) {
650                         /* We've used up the previous extent */
651                         bl_put_extent(be);
652                         bio = bl_submit_bio(WRITE, bio);
653                         /* Get the next one */
654                         be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
655                                              isect, NULL);
656                         if (!be || !is_writable(be, isect)) {
657                                 wdata->pnfs_error = -EINVAL;
658                                 goto out;
659                         }
660                         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
661                                 if (likely(!bl_push_one_short_extent(
662                                                                 be->be_inval)))
663                                         par->bse_count++;
664                                 else {
665                                         wdata->pnfs_error = -ENOMEM;
666                                         goto out;
667                                 }
668                         }
669                         extent_length = be->be_length -
670                             (isect - be->be_f_offset);
671                 }
672                 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
673                         ret = bl_mark_sectors_init(be->be_inval, isect,
674                                                        PAGE_CACHE_SECTORS);
675                         if (unlikely(ret)) {
676                                 dprintk("%s bl_mark_sectors_init fail %d\n",
677                                         __func__, ret);
678                                 wdata->pnfs_error = ret;
679                                 goto out;
680                         }
681                 }
682                 bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
683                                          isect, pages[i], be,
684                                          bl_end_io_write, par);
685                 if (IS_ERR(bio)) {
686                         wdata->pnfs_error = PTR_ERR(bio);
687                         bio = NULL;
688                         goto out;
689                 }
690                 isect += PAGE_CACHE_SECTORS;
691                 last_isect = isect;
692                 extent_length -= PAGE_CACHE_SECTORS;
693         }
694
695         /* Last page inside INVALID extent */
696         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
697                 bio = bl_submit_bio(WRITE, bio);
698                 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
699                 npg_zero = npg_per_block - do_div(temp, npg_per_block);
700                 if (npg_zero < npg_per_block) {
701                         last = 1;
702                         goto fill_invalid_ext;
703                 }
704         }
705
706 write_done:
707         wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
708         if (count < wdata->res.count) {
709                 wdata->res.count = count;
710         }
711 out:
712         bl_put_extent(be);
713         bl_submit_bio(WRITE, bio);
714         put_parallel(par);
715         return PNFS_ATTEMPTED;
716 out_mds:
717         bl_put_extent(be);
718         kfree(par);
719         return PNFS_NOT_ATTEMPTED;
720 }
721
722 /* FIXME - range ignored */
723 static void
724 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
725 {
726         int i;
727         struct pnfs_block_extent *be;
728
729         spin_lock(&bl->bl_ext_lock);
730         for (i = 0; i < EXTENT_LISTS; i++) {
731                 while (!list_empty(&bl->bl_extents[i])) {
732                         be = list_first_entry(&bl->bl_extents[i],
733                                               struct pnfs_block_extent,
734                                               be_node);
735                         list_del(&be->be_node);
736                         bl_put_extent(be);
737                 }
738         }
739         spin_unlock(&bl->bl_ext_lock);
740 }
741
742 static void
743 release_inval_marks(struct pnfs_inval_markings *marks)
744 {
745         struct pnfs_inval_tracking *pos, *temp;
746         struct pnfs_block_short_extent *se, *stemp;
747
748         list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
749                 list_del(&pos->it_link);
750                 kfree(pos);
751         }
752
753         list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
754                 list_del(&se->bse_node);
755                 kfree(se);
756         }
757         return;
758 }
759
760 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
761 {
762         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
763
764         dprintk("%s enter\n", __func__);
765         release_extents(bl, NULL);
766         release_inval_marks(&bl->bl_inval);
767         kfree(bl);
768 }
769
770 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
771                                                    gfp_t gfp_flags)
772 {
773         struct pnfs_block_layout *bl;
774
775         dprintk("%s enter\n", __func__);
776         bl = kzalloc(sizeof(*bl), gfp_flags);
777         if (!bl)
778                 return NULL;
779         spin_lock_init(&bl->bl_ext_lock);
780         INIT_LIST_HEAD(&bl->bl_extents[0]);
781         INIT_LIST_HEAD(&bl->bl_extents[1]);
782         INIT_LIST_HEAD(&bl->bl_commit);
783         INIT_LIST_HEAD(&bl->bl_committing);
784         bl->bl_count = 0;
785         bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
786         BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
787         return &bl->bl_layout;
788 }
789
790 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
791 {
792         dprintk("%s enter\n", __func__);
793         kfree(lseg);
794 }
795
796 /* We pretty much ignore lseg, and store all data layout wide, so we
797  * can correctly merge.
798  */
799 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
800                                                  struct nfs4_layoutget_res *lgr,
801                                                  gfp_t gfp_flags)
802 {
803         struct pnfs_layout_segment *lseg;
804         int status;
805
806         dprintk("%s enter\n", __func__);
807         lseg = kzalloc(sizeof(*lseg), gfp_flags);
808         if (!lseg)
809                 return ERR_PTR(-ENOMEM);
810         status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
811         if (status) {
812                 /* We don't want to call the full-blown bl_free_lseg,
813                  * since on error extents were not touched.
814                  */
815                 kfree(lseg);
816                 return ERR_PTR(status);
817         }
818         return lseg;
819 }
820
821 static void
822 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
823                        const struct nfs4_layoutcommit_args *arg)
824 {
825         dprintk("%s enter\n", __func__);
826         encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
827 }
828
829 static void
830 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
831 {
832         struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
833
834         dprintk("%s enter\n", __func__);
835         clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
836 }
837
838 static void free_blk_mountid(struct block_mount_id *mid)
839 {
840         if (mid) {
841                 struct pnfs_block_dev *dev, *tmp;
842
843                 /* No need to take bm_lock as we are last user freeing bm_devlist */
844                 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
845                         list_del(&dev->bm_node);
846                         bl_free_block_dev(dev);
847                 }
848                 kfree(mid);
849         }
850 }
851
852 /* This is mostly copied from the filelayout's get_device_info function.
853  * It seems much of this should be at the generic pnfs level.
854  */
855 static struct pnfs_block_dev *
856 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
857                         struct nfs4_deviceid *d_id)
858 {
859         struct pnfs_device *dev;
860         struct pnfs_block_dev *rv;
861         u32 max_resp_sz;
862         int max_pages;
863         struct page **pages = NULL;
864         int i, rc;
865
866         /*
867          * Use the session max response size as the basis for setting
868          * GETDEVICEINFO's maxcount
869          */
870         max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
871         max_pages = max_resp_sz >> PAGE_SHIFT;
872         dprintk("%s max_resp_sz %u max_pages %d\n",
873                 __func__, max_resp_sz, max_pages);
874
875         dev = kmalloc(sizeof(*dev), GFP_NOFS);
876         if (!dev) {
877                 dprintk("%s kmalloc failed\n", __func__);
878                 return ERR_PTR(-ENOMEM);
879         }
880
881         pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
882         if (pages == NULL) {
883                 kfree(dev);
884                 return ERR_PTR(-ENOMEM);
885         }
886         for (i = 0; i < max_pages; i++) {
887                 pages[i] = alloc_page(GFP_NOFS);
888                 if (!pages[i]) {
889                         rv = ERR_PTR(-ENOMEM);
890                         goto out_free;
891                 }
892         }
893
894         memcpy(&dev->dev_id, d_id, sizeof(*d_id));
895         dev->layout_type = LAYOUT_BLOCK_VOLUME;
896         dev->pages = pages;
897         dev->pgbase = 0;
898         dev->pglen = PAGE_SIZE * max_pages;
899         dev->mincount = 0;
900
901         dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
902         rc = nfs4_proc_getdeviceinfo(server, dev);
903         dprintk("%s getdevice info returns %d\n", __func__, rc);
904         if (rc) {
905                 rv = ERR_PTR(rc);
906                 goto out_free;
907         }
908
909         rv = nfs4_blk_decode_device(server, dev);
910  out_free:
911         for (i = 0; i < max_pages; i++)
912                 __free_page(pages[i]);
913         kfree(pages);
914         kfree(dev);
915         return rv;
916 }
917
918 static int
919 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
920 {
921         struct block_mount_id *b_mt_id = NULL;
922         struct pnfs_devicelist *dlist = NULL;
923         struct pnfs_block_dev *bdev;
924         LIST_HEAD(block_disklist);
925         int status, i;
926
927         dprintk("%s enter\n", __func__);
928
929         if (server->pnfs_blksize == 0) {
930                 dprintk("%s Server did not return blksize\n", __func__);
931                 return -EINVAL;
932         }
933         b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
934         if (!b_mt_id) {
935                 status = -ENOMEM;
936                 goto out_error;
937         }
938         /* Initialize nfs4 block layout mount id */
939         spin_lock_init(&b_mt_id->bm_lock);
940         INIT_LIST_HEAD(&b_mt_id->bm_devlist);
941
942         dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
943         if (!dlist) {
944                 status = -ENOMEM;
945                 goto out_error;
946         }
947         dlist->eof = 0;
948         while (!dlist->eof) {
949                 status = nfs4_proc_getdevicelist(server, fh, dlist);
950                 if (status)
951                         goto out_error;
952                 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
953                         __func__, dlist->num_devs, dlist->eof);
954                 for (i = 0; i < dlist->num_devs; i++) {
955                         bdev = nfs4_blk_get_deviceinfo(server, fh,
956                                                        &dlist->dev_id[i]);
957                         if (IS_ERR(bdev)) {
958                                 status = PTR_ERR(bdev);
959                                 goto out_error;
960                         }
961                         spin_lock(&b_mt_id->bm_lock);
962                         list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
963                         spin_unlock(&b_mt_id->bm_lock);
964                 }
965         }
966         dprintk("%s SUCCESS\n", __func__);
967         server->pnfs_ld_data = b_mt_id;
968
969  out_return:
970         kfree(dlist);
971         return status;
972
973  out_error:
974         free_blk_mountid(b_mt_id);
975         goto out_return;
976 }
977
978 static int
979 bl_clear_layoutdriver(struct nfs_server *server)
980 {
981         struct block_mount_id *b_mt_id = server->pnfs_ld_data;
982
983         dprintk("%s enter\n", __func__);
984         free_blk_mountid(b_mt_id);
985         dprintk("%s RETURNS\n", __func__);
986         return 0;
987 }
988
989 static const struct nfs_pageio_ops bl_pg_read_ops = {
990         .pg_init = pnfs_generic_pg_init_read,
991         .pg_test = pnfs_generic_pg_test,
992         .pg_doio = pnfs_generic_pg_readpages,
993 };
994
995 static const struct nfs_pageio_ops bl_pg_write_ops = {
996         .pg_init = pnfs_generic_pg_init_write,
997         .pg_test = pnfs_generic_pg_test,
998         .pg_doio = pnfs_generic_pg_writepages,
999 };
1000
1001 static struct pnfs_layoutdriver_type blocklayout_type = {
1002         .id                             = LAYOUT_BLOCK_VOLUME,
1003         .name                           = "LAYOUT_BLOCK_VOLUME",
1004         .read_pagelist                  = bl_read_pagelist,
1005         .write_pagelist                 = bl_write_pagelist,
1006         .alloc_layout_hdr               = bl_alloc_layout_hdr,
1007         .free_layout_hdr                = bl_free_layout_hdr,
1008         .alloc_lseg                     = bl_alloc_lseg,
1009         .free_lseg                      = bl_free_lseg,
1010         .encode_layoutcommit            = bl_encode_layoutcommit,
1011         .cleanup_layoutcommit           = bl_cleanup_layoutcommit,
1012         .set_layoutdriver               = bl_set_layoutdriver,
1013         .clear_layoutdriver             = bl_clear_layoutdriver,
1014         .pg_read_ops                    = &bl_pg_read_ops,
1015         .pg_write_ops                   = &bl_pg_write_ops,
1016 };
1017
1018 static const struct rpc_pipe_ops bl_upcall_ops = {
1019         .upcall         = rpc_pipe_generic_upcall,
1020         .downcall       = bl_pipe_downcall,
1021         .destroy_msg    = bl_pipe_destroy_msg,
1022 };
1023
1024 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1025                                             struct rpc_pipe *pipe)
1026 {
1027         struct dentry *dir, *dentry;
1028
1029         dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1030         if (dir == NULL)
1031                 return ERR_PTR(-ENOENT);
1032         dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1033         dput(dir);
1034         return dentry;
1035 }
1036
1037 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1038                                           struct rpc_pipe *pipe)
1039 {
1040         if (pipe->dentry)
1041                 rpc_unlink(pipe->dentry);
1042 }
1043
1044 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1045                            void *ptr)
1046 {
1047         struct super_block *sb = ptr;
1048         struct net *net = sb->s_fs_info;
1049         struct nfs_net *nn = net_generic(net, nfs_net_id);
1050         struct dentry *dentry;
1051         int ret = 0;
1052
1053         if (!try_module_get(THIS_MODULE))
1054                 return 0;
1055
1056         if (nn->bl_device_pipe == NULL) {
1057                 module_put(THIS_MODULE);
1058                 return 0;
1059         }
1060
1061         switch (event) {
1062         case RPC_PIPEFS_MOUNT:
1063                 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1064                 if (IS_ERR(dentry)) {
1065                         ret = PTR_ERR(dentry);
1066                         break;
1067                 }
1068                 nn->bl_device_pipe->dentry = dentry;
1069                 break;
1070         case RPC_PIPEFS_UMOUNT:
1071                 if (nn->bl_device_pipe->dentry)
1072                         nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1073                 break;
1074         default:
1075                 ret = -ENOTSUPP;
1076                 break;
1077         }
1078         module_put(THIS_MODULE);
1079         return ret;
1080 }
1081
1082 static struct notifier_block nfs4blocklayout_block = {
1083         .notifier_call = rpc_pipefs_event,
1084 };
1085
1086 static struct dentry *nfs4blocklayout_register_net(struct net *net,
1087                                                    struct rpc_pipe *pipe)
1088 {
1089         struct super_block *pipefs_sb;
1090         struct dentry *dentry;
1091
1092         pipefs_sb = rpc_get_sb_net(net);
1093         if (!pipefs_sb)
1094                 return NULL;
1095         dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1096         rpc_put_sb_net(net);
1097         return dentry;
1098 }
1099
1100 static void nfs4blocklayout_unregister_net(struct net *net,
1101                                            struct rpc_pipe *pipe)
1102 {
1103         struct super_block *pipefs_sb;
1104
1105         pipefs_sb = rpc_get_sb_net(net);
1106         if (pipefs_sb) {
1107                 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1108                 rpc_put_sb_net(net);
1109         }
1110 }
1111
1112 static int nfs4blocklayout_net_init(struct net *net)
1113 {
1114         struct nfs_net *nn = net_generic(net, nfs_net_id);
1115         struct dentry *dentry;
1116
1117         init_waitqueue_head(&nn->bl_wq);
1118         nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1119         if (IS_ERR(nn->bl_device_pipe))
1120                 return PTR_ERR(nn->bl_device_pipe);
1121         dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1122         if (IS_ERR(dentry)) {
1123                 rpc_destroy_pipe_data(nn->bl_device_pipe);
1124                 return PTR_ERR(dentry);
1125         }
1126         nn->bl_device_pipe->dentry = dentry;
1127         return 0;
1128 }
1129
1130 static void nfs4blocklayout_net_exit(struct net *net)
1131 {
1132         struct nfs_net *nn = net_generic(net, nfs_net_id);
1133
1134         nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1135         rpc_destroy_pipe_data(nn->bl_device_pipe);
1136         nn->bl_device_pipe = NULL;
1137 }
1138
1139 static struct pernet_operations nfs4blocklayout_net_ops = {
1140         .init = nfs4blocklayout_net_init,
1141         .exit = nfs4blocklayout_net_exit,
1142 };
1143
1144 static int __init nfs4blocklayout_init(void)
1145 {
1146         int ret;
1147
1148         dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1149
1150         ret = pnfs_register_layoutdriver(&blocklayout_type);
1151         if (ret)
1152                 goto out;
1153
1154         ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1155         if (ret)
1156                 goto out_remove;
1157         ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1158         if (ret)
1159                 goto out_notifier;
1160 out:
1161         return ret;
1162
1163 out_notifier:
1164         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1165 out_remove:
1166         pnfs_unregister_layoutdriver(&blocklayout_type);
1167         return ret;
1168 }
1169
1170 static void __exit nfs4blocklayout_exit(void)
1171 {
1172         dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1173                __func__);
1174
1175         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1176         unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1177         pnfs_unregister_layoutdriver(&blocklayout_type);
1178 }
1179
1180 MODULE_ALIAS("nfs-layouttype4-3");
1181
1182 module_init(nfs4blocklayout_init);
1183 module_exit(nfs4blocklayout_exit);