PM / Freezer / Docs: Update documentation about freezing of tasks
[linux-flexiantxendom0-3.2.10.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24
25 #include <asm/uaccess.h>
26
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33
34 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
35
36 #define MIN_POOL_WRITE          (32)
37 #define MIN_POOL_COMMIT         (4)
38
39 /*
40  * Local function declarations
41  */
42 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43                                   struct inode *inode, int ioflags);
44 static void nfs_redirty_request(struct nfs_page *req);
45 static const struct rpc_call_ops nfs_write_partial_ops;
46 static const struct rpc_call_ops nfs_write_full_ops;
47 static const struct rpc_call_ops nfs_commit_ops;
48
49 static struct kmem_cache *nfs_wdata_cachep;
50 static mempool_t *nfs_wdata_mempool;
51 static mempool_t *nfs_commit_mempool;
52
53 struct nfs_write_data *nfs_commitdata_alloc(void)
54 {
55         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
56
57         if (p) {
58                 memset(p, 0, sizeof(*p));
59                 INIT_LIST_HEAD(&p->pages);
60         }
61         return p;
62 }
63 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
64
65 void nfs_commit_free(struct nfs_write_data *p)
66 {
67         if (p && (p->pagevec != &p->page_array[0]))
68                 kfree(p->pagevec);
69         mempool_free(p, nfs_commit_mempool);
70 }
71 EXPORT_SYMBOL_GPL(nfs_commit_free);
72
73 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
74 {
75         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
76
77         if (p) {
78                 memset(p, 0, sizeof(*p));
79                 INIT_LIST_HEAD(&p->pages);
80                 p->npages = pagecount;
81                 if (pagecount <= ARRAY_SIZE(p->page_array))
82                         p->pagevec = p->page_array;
83                 else {
84                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
85                         if (!p->pagevec) {
86                                 mempool_free(p, nfs_wdata_mempool);
87                                 p = NULL;
88                         }
89                 }
90         }
91         return p;
92 }
93
94 void nfs_writedata_free(struct nfs_write_data *p)
95 {
96         if (p && (p->pagevec != &p->page_array[0]))
97                 kfree(p->pagevec);
98         mempool_free(p, nfs_wdata_mempool);
99 }
100
101 void nfs_writedata_release(struct nfs_write_data *wdata)
102 {
103         put_nfs_open_context(wdata->args.context);
104         nfs_writedata_free(wdata);
105 }
106
107 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
108 {
109         ctx->error = error;
110         smp_wmb();
111         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
112 }
113
114 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115 {
116         struct nfs_page *req = NULL;
117
118         if (PagePrivate(page)) {
119                 req = (struct nfs_page *)page_private(page);
120                 if (req != NULL)
121                         kref_get(&req->wb_kref);
122         }
123         return req;
124 }
125
126 static struct nfs_page *nfs_page_find_request(struct page *page)
127 {
128         struct inode *inode = page->mapping->host;
129         struct nfs_page *req = NULL;
130
131         spin_lock(&inode->i_lock);
132         req = nfs_page_find_request_locked(page);
133         spin_unlock(&inode->i_lock);
134         return req;
135 }
136
137 /* Adjust the file length if we're writing beyond the end */
138 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
139 {
140         struct inode *inode = page->mapping->host;
141         loff_t end, i_size;
142         pgoff_t end_index;
143
144         spin_lock(&inode->i_lock);
145         i_size = i_size_read(inode);
146         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
147         if (i_size > 0 && page->index < end_index)
148                 goto out;
149         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
150         if (i_size >= end)
151                 goto out;
152         i_size_write(inode, end);
153         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
154 out:
155         spin_unlock(&inode->i_lock);
156 }
157
158 /* A writeback failed: mark the page as bad, and invalidate the page cache */
159 static void nfs_set_pageerror(struct page *page)
160 {
161         SetPageError(page);
162         nfs_zap_mapping(page->mapping->host, page->mapping);
163 }
164
165 /* We can set the PG_uptodate flag if we see that a write request
166  * covers the full page.
167  */
168 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
169 {
170         if (PageUptodate(page))
171                 return;
172         if (base != 0)
173                 return;
174         if (count != nfs_page_length(page))
175                 return;
176         SetPageUptodate(page);
177 }
178
179 static int wb_priority(struct writeback_control *wbc)
180 {
181         if (wbc->for_reclaim)
182                 return FLUSH_HIGHPRI | FLUSH_STABLE;
183         if (wbc->for_kupdate || wbc->for_background)
184                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
185         return FLUSH_COND_STABLE;
186 }
187
188 /*
189  * NFS congestion control
190  */
191
192 int nfs_congestion_kb;
193
194 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
195 #define NFS_CONGESTION_OFF_THRESH       \
196         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
197
198 static int nfs_set_page_writeback(struct page *page)
199 {
200         int ret = test_set_page_writeback(page);
201
202         if (!ret) {
203                 struct inode *inode = page->mapping->host;
204                 struct nfs_server *nfss = NFS_SERVER(inode);
205
206                 page_cache_get(page);
207                 if (atomic_long_inc_return(&nfss->writeback) >
208                                 NFS_CONGESTION_ON_THRESH) {
209                         set_bdi_congested(&nfss->backing_dev_info,
210                                                 BLK_RW_ASYNC);
211                 }
212         }
213         return ret;
214 }
215
216 static void nfs_end_page_writeback(struct page *page)
217 {
218         struct inode *inode = page->mapping->host;
219         struct nfs_server *nfss = NFS_SERVER(inode);
220
221         end_page_writeback(page);
222         page_cache_release(page);
223         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
224                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
225 }
226
227 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
228 {
229         struct inode *inode = page->mapping->host;
230         struct nfs_page *req;
231         int ret;
232
233         spin_lock(&inode->i_lock);
234         for (;;) {
235                 req = nfs_page_find_request_locked(page);
236                 if (req == NULL)
237                         break;
238                 if (nfs_lock_request_dontget(req))
239                         break;
240                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
241                  *       then the call to nfs_lock_request_dontget() will always
242                  *       succeed provided that someone hasn't already marked the
243                  *       request as dirty (in which case we don't care).
244                  */
245                 spin_unlock(&inode->i_lock);
246                 if (!nonblock)
247                         ret = nfs_wait_on_request(req);
248                 else
249                         ret = -EAGAIN;
250                 nfs_release_request(req);
251                 if (ret != 0)
252                         return ERR_PTR(ret);
253                 spin_lock(&inode->i_lock);
254         }
255         spin_unlock(&inode->i_lock);
256         return req;
257 }
258
259 /*
260  * Find an associated nfs write request, and prepare to flush it out
261  * May return an error if the user signalled nfs_wait_on_request().
262  */
263 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
264                                 struct page *page, bool nonblock)
265 {
266         struct nfs_page *req;
267         int ret = 0;
268
269         req = nfs_find_and_lock_request(page, nonblock);
270         if (!req)
271                 goto out;
272         ret = PTR_ERR(req);
273         if (IS_ERR(req))
274                 goto out;
275
276         ret = nfs_set_page_writeback(page);
277         BUG_ON(ret != 0);
278         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
279
280         if (!nfs_pageio_add_request(pgio, req)) {
281                 nfs_redirty_request(req);
282                 ret = pgio->pg_error;
283         }
284 out:
285         return ret;
286 }
287
288 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
289 {
290         struct inode *inode = page->mapping->host;
291         int ret;
292
293         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
294         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
295
296         nfs_pageio_cond_complete(pgio, page->index);
297         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
298         if (ret == -EAGAIN) {
299                 redirty_page_for_writepage(wbc, page);
300                 ret = 0;
301         }
302         return ret;
303 }
304
305 /*
306  * Write an mmapped page to the server.
307  */
308 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
309 {
310         struct nfs_pageio_descriptor pgio;
311         int err;
312
313         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
314         err = nfs_do_writepage(page, wbc, &pgio);
315         nfs_pageio_complete(&pgio);
316         if (err < 0)
317                 return err;
318         if (pgio.pg_error < 0)
319                 return pgio.pg_error;
320         return 0;
321 }
322
323 int nfs_writepage(struct page *page, struct writeback_control *wbc)
324 {
325         int ret;
326
327         ret = nfs_writepage_locked(page, wbc);
328         unlock_page(page);
329         return ret;
330 }
331
332 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
333 {
334         int ret;
335
336         ret = nfs_do_writepage(page, wbc, data);
337         unlock_page(page);
338         return ret;
339 }
340
341 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
342 {
343         struct inode *inode = mapping->host;
344         unsigned long *bitlock = &NFS_I(inode)->flags;
345         struct nfs_pageio_descriptor pgio;
346         int err;
347
348         /* Stop dirtying of new pages while we sync */
349         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
350                         nfs_wait_bit_killable, TASK_KILLABLE);
351         if (err)
352                 goto out_err;
353
354         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
355
356         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
357         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358         nfs_pageio_complete(&pgio);
359
360         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
361         smp_mb__after_clear_bit();
362         wake_up_bit(bitlock, NFS_INO_FLUSHING);
363
364         if (err < 0)
365                 goto out_err;
366         err = pgio.pg_error;
367         if (err < 0)
368                 goto out_err;
369         return 0;
370 out_err:
371         return err;
372 }
373
374 /*
375  * Insert a write request into an inode
376  */
377 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
378 {
379         struct nfs_inode *nfsi = NFS_I(inode);
380
381         /* Lock the request! */
382         nfs_lock_request_dontget(req);
383
384         spin_lock(&inode->i_lock);
385         if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
386                 inode->i_version++;
387         set_bit(PG_MAPPED, &req->wb_flags);
388         SetPagePrivate(req->wb_page);
389         set_page_private(req->wb_page, (unsigned long)req);
390         nfsi->npages++;
391         kref_get(&req->wb_kref);
392         spin_unlock(&inode->i_lock);
393 }
394
395 /*
396  * Remove a write request from an inode
397  */
398 static void nfs_inode_remove_request(struct nfs_page *req)
399 {
400         struct inode *inode = req->wb_context->dentry->d_inode;
401         struct nfs_inode *nfsi = NFS_I(inode);
402
403         BUG_ON (!NFS_WBACK_BUSY(req));
404
405         spin_lock(&inode->i_lock);
406         set_page_private(req->wb_page, 0);
407         ClearPagePrivate(req->wb_page);
408         clear_bit(PG_MAPPED, &req->wb_flags);
409         nfsi->npages--;
410         spin_unlock(&inode->i_lock);
411         nfs_release_request(req);
412 }
413
414 static void
415 nfs_mark_request_dirty(struct nfs_page *req)
416 {
417         __set_page_dirty_nobuffers(req->wb_page);
418 }
419
420 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
421 /**
422  * nfs_request_add_commit_list - add request to a commit list
423  * @req: pointer to a struct nfs_page
424  * @head: commit list head
425  *
426  * This sets the PG_CLEAN bit, updates the inode global count of
427  * number of outstanding requests requiring a commit as well as
428  * the MM page stats.
429  *
430  * The caller must _not_ hold the inode->i_lock, but must be
431  * holding the nfs_page lock.
432  */
433 void
434 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
435 {
436         struct inode *inode = req->wb_context->dentry->d_inode;
437
438         set_bit(PG_CLEAN, &(req)->wb_flags);
439         spin_lock(&inode->i_lock);
440         nfs_list_add_request(req, head);
441         NFS_I(inode)->ncommit++;
442         spin_unlock(&inode->i_lock);
443         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
444         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
445         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
446 }
447 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
448
449 /**
450  * nfs_request_remove_commit_list - Remove request from a commit list
451  * @req: pointer to a nfs_page
452  *
453  * This clears the PG_CLEAN bit, and updates the inode global count of
454  * number of outstanding requests requiring a commit
455  * It does not update the MM page stats.
456  *
457  * The caller _must_ hold the inode->i_lock and the nfs_page lock.
458  */
459 void
460 nfs_request_remove_commit_list(struct nfs_page *req)
461 {
462         struct inode *inode = req->wb_context->dentry->d_inode;
463
464         if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
465                 return;
466         nfs_list_remove_request(req);
467         NFS_I(inode)->ncommit--;
468 }
469 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
470
471
472 /*
473  * Add a request to the inode's commit list.
474  */
475 static void
476 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
477 {
478         struct inode *inode = req->wb_context->dentry->d_inode;
479
480         if (pnfs_mark_request_commit(req, lseg))
481                 return;
482         nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
483 }
484
485 static void
486 nfs_clear_page_commit(struct page *page)
487 {
488         dec_zone_page_state(page, NR_UNSTABLE_NFS);
489         dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
490 }
491
492 static void
493 nfs_clear_request_commit(struct nfs_page *req)
494 {
495         if (test_bit(PG_CLEAN, &req->wb_flags)) {
496                 struct inode *inode = req->wb_context->dentry->d_inode;
497
498                 if (!pnfs_clear_request_commit(req)) {
499                         spin_lock(&inode->i_lock);
500                         nfs_request_remove_commit_list(req);
501                         spin_unlock(&inode->i_lock);
502                 }
503                 nfs_clear_page_commit(req->wb_page);
504         }
505 }
506
507 static inline
508 int nfs_write_need_commit(struct nfs_write_data *data)
509 {
510         if (data->verf.committed == NFS_DATA_SYNC)
511                 return data->lseg == NULL;
512         else
513                 return data->verf.committed != NFS_FILE_SYNC;
514 }
515
516 static inline
517 int nfs_reschedule_unstable_write(struct nfs_page *req,
518                                   struct nfs_write_data *data)
519 {
520         if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
521                 nfs_mark_request_commit(req, data->lseg);
522                 return 1;
523         }
524         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
525                 nfs_mark_request_dirty(req);
526                 return 1;
527         }
528         return 0;
529 }
530 #else
531 static void
532 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
533 {
534 }
535
536 static void
537 nfs_clear_request_commit(struct nfs_page *req)
538 {
539 }
540
541 static inline
542 int nfs_write_need_commit(struct nfs_write_data *data)
543 {
544         return 0;
545 }
546
547 static inline
548 int nfs_reschedule_unstable_write(struct nfs_page *req,
549                                   struct nfs_write_data *data)
550 {
551         return 0;
552 }
553 #endif
554
555 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556 static int
557 nfs_need_commit(struct nfs_inode *nfsi)
558 {
559         return nfsi->ncommit > 0;
560 }
561
562 /* i_lock held by caller */
563 static int
564 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
565                 spinlock_t *lock)
566 {
567         struct nfs_page *req, *tmp;
568         int ret = 0;
569
570         list_for_each_entry_safe(req, tmp, src, wb_list) {
571                 if (!nfs_lock_request(req))
572                         continue;
573                 if (cond_resched_lock(lock))
574                         list_safe_reset_next(req, tmp, wb_list);
575                 nfs_request_remove_commit_list(req);
576                 nfs_list_add_request(req, dst);
577                 ret++;
578                 if (ret == max)
579                         break;
580         }
581         return ret;
582 }
583
584 /*
585  * nfs_scan_commit - Scan an inode for commit requests
586  * @inode: NFS inode to scan
587  * @dst: destination list
588  *
589  * Moves requests from the inode's 'commit' request list.
590  * The requests are *not* checked to ensure that they form a contiguous set.
591  */
592 static int
593 nfs_scan_commit(struct inode *inode, struct list_head *dst)
594 {
595         struct nfs_inode *nfsi = NFS_I(inode);
596         int ret = 0;
597
598         spin_lock(&inode->i_lock);
599         if (nfsi->ncommit > 0) {
600                 const int max = INT_MAX;
601
602                 ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
603                                 &inode->i_lock);
604                 ret += pnfs_scan_commit_lists(inode, max - ret,
605                                 &inode->i_lock);
606         }
607         spin_unlock(&inode->i_lock);
608         return ret;
609 }
610
611 #else
612 static inline int nfs_need_commit(struct nfs_inode *nfsi)
613 {
614         return 0;
615 }
616
617 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
618 {
619         return 0;
620 }
621 #endif
622
623 /*
624  * Search for an existing write request, and attempt to update
625  * it to reflect a new dirty region on a given page.
626  *
627  * If the attempt fails, then the existing request is flushed out
628  * to disk.
629  */
630 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
631                 struct page *page,
632                 unsigned int offset,
633                 unsigned int bytes)
634 {
635         struct nfs_page *req;
636         unsigned int rqend;
637         unsigned int end;
638         int error;
639
640         if (!PagePrivate(page))
641                 return NULL;
642
643         end = offset + bytes;
644         spin_lock(&inode->i_lock);
645
646         for (;;) {
647                 req = nfs_page_find_request_locked(page);
648                 if (req == NULL)
649                         goto out_unlock;
650
651                 rqend = req->wb_offset + req->wb_bytes;
652                 /*
653                  * Tell the caller to flush out the request if
654                  * the offsets are non-contiguous.
655                  * Note: nfs_flush_incompatible() will already
656                  * have flushed out requests having wrong owners.
657                  */
658                 if (offset > rqend
659                     || end < req->wb_offset)
660                         goto out_flushme;
661
662                 if (nfs_lock_request_dontget(req))
663                         break;
664
665                 /* The request is locked, so wait and then retry */
666                 spin_unlock(&inode->i_lock);
667                 error = nfs_wait_on_request(req);
668                 nfs_release_request(req);
669                 if (error != 0)
670                         goto out_err;
671                 spin_lock(&inode->i_lock);
672         }
673
674         /* Okay, the request matches. Update the region */
675         if (offset < req->wb_offset) {
676                 req->wb_offset = offset;
677                 req->wb_pgbase = offset;
678         }
679         if (end > rqend)
680                 req->wb_bytes = end - req->wb_offset;
681         else
682                 req->wb_bytes = rqend - req->wb_offset;
683 out_unlock:
684         spin_unlock(&inode->i_lock);
685         nfs_clear_request_commit(req);
686         return req;
687 out_flushme:
688         spin_unlock(&inode->i_lock);
689         nfs_release_request(req);
690         error = nfs_wb_page(inode, page);
691 out_err:
692         return ERR_PTR(error);
693 }
694
695 /*
696  * Try to update an existing write request, or create one if there is none.
697  *
698  * Note: Should always be called with the Page Lock held to prevent races
699  * if we have to add a new request. Also assumes that the caller has
700  * already called nfs_flush_incompatible() if necessary.
701  */
702 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
703                 struct page *page, unsigned int offset, unsigned int bytes)
704 {
705         struct inode *inode = page->mapping->host;
706         struct nfs_page *req;
707
708         req = nfs_try_to_update_request(inode, page, offset, bytes);
709         if (req != NULL)
710                 goto out;
711         req = nfs_create_request(ctx, inode, page, offset, bytes);
712         if (IS_ERR(req))
713                 goto out;
714         nfs_inode_add_request(inode, req);
715 out:
716         return req;
717 }
718
719 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
720                 unsigned int offset, unsigned int count)
721 {
722         struct nfs_page *req;
723
724         req = nfs_setup_write_request(ctx, page, offset, count);
725         if (IS_ERR(req))
726                 return PTR_ERR(req);
727         /* Update file length */
728         nfs_grow_file(page, offset, count);
729         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
730         nfs_mark_request_dirty(req);
731         nfs_unlock_request(req);
732         return 0;
733 }
734
735 int nfs_flush_incompatible(struct file *file, struct page *page)
736 {
737         struct nfs_open_context *ctx = nfs_file_open_context(file);
738         struct nfs_page *req;
739         int do_flush, status;
740         /*
741          * Look for a request corresponding to this page. If there
742          * is one, and it belongs to another file, we flush it out
743          * before we try to copy anything into the page. Do this
744          * due to the lack of an ACCESS-type call in NFSv2.
745          * Also do the same if we find a request from an existing
746          * dropped page.
747          */
748         do {
749                 req = nfs_page_find_request(page);
750                 if (req == NULL)
751                         return 0;
752                 do_flush = req->wb_page != page || req->wb_context != ctx ||
753                         req->wb_lock_context->lockowner != current->files ||
754                         req->wb_lock_context->pid != current->tgid;
755                 nfs_release_request(req);
756                 if (!do_flush)
757                         return 0;
758                 status = nfs_wb_page(page->mapping->host, page);
759         } while (status == 0);
760         return status;
761 }
762
763 /*
764  * If the page cache is marked as unsafe or invalid, then we can't rely on
765  * the PageUptodate() flag. In this case, we will need to turn off
766  * write optimisations that depend on the page contents being correct.
767  */
768 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
769 {
770         return PageUptodate(page) &&
771                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
772 }
773
774 /*
775  * Update and possibly write a cached page of an NFS file.
776  *
777  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
778  * things with a page scheduled for an RPC call (e.g. invalidate it).
779  */
780 int nfs_updatepage(struct file *file, struct page *page,
781                 unsigned int offset, unsigned int count)
782 {
783         struct nfs_open_context *ctx = nfs_file_open_context(file);
784         struct inode    *inode = page->mapping->host;
785         int             status = 0;
786
787         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
788
789         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
790                 file->f_path.dentry->d_parent->d_name.name,
791                 file->f_path.dentry->d_name.name, count,
792                 (long long)(page_offset(page) + offset));
793
794         /* If we're not using byte range locks, and we know the page
795          * is up to date, it may be more efficient to extend the write
796          * to cover the entire page in order to avoid fragmentation
797          * inefficiencies.
798          */
799         if (nfs_write_pageuptodate(page, inode) &&
800                         inode->i_flock == NULL &&
801                         !(file->f_flags & O_DSYNC)) {
802                 count = max(count + offset, nfs_page_length(page));
803                 offset = 0;
804         }
805
806         status = nfs_writepage_setup(ctx, page, offset, count);
807         if (status < 0)
808                 nfs_set_pageerror(page);
809         else
810                 __set_page_dirty_nobuffers(page);
811
812         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
813                         status, (long long)i_size_read(inode));
814         return status;
815 }
816
817 static void nfs_writepage_release(struct nfs_page *req,
818                                   struct nfs_write_data *data)
819 {
820         struct page *page = req->wb_page;
821
822         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
823                 nfs_inode_remove_request(req);
824         nfs_unlock_request(req);
825         nfs_end_page_writeback(page);
826 }
827
828 static int flush_task_priority(int how)
829 {
830         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
831                 case FLUSH_HIGHPRI:
832                         return RPC_PRIORITY_HIGH;
833                 case FLUSH_LOWPRI:
834                         return RPC_PRIORITY_LOW;
835         }
836         return RPC_PRIORITY_NORMAL;
837 }
838
839 int nfs_initiate_write(struct nfs_write_data *data,
840                        struct rpc_clnt *clnt,
841                        const struct rpc_call_ops *call_ops,
842                        int how)
843 {
844         struct inode *inode = data->inode;
845         int priority = flush_task_priority(how);
846         struct rpc_task *task;
847         struct rpc_message msg = {
848                 .rpc_argp = &data->args,
849                 .rpc_resp = &data->res,
850                 .rpc_cred = data->cred,
851         };
852         struct rpc_task_setup task_setup_data = {
853                 .rpc_client = clnt,
854                 .task = &data->task,
855                 .rpc_message = &msg,
856                 .callback_ops = call_ops,
857                 .callback_data = data,
858                 .workqueue = nfsiod_workqueue,
859                 .flags = RPC_TASK_ASYNC,
860                 .priority = priority,
861         };
862         int ret = 0;
863
864         /* Set up the initial task struct.  */
865         NFS_PROTO(inode)->write_setup(data, &msg);
866
867         dprintk("NFS: %5u initiated write call "
868                 "(req %s/%lld, %u bytes @ offset %llu)\n",
869                 data->task.tk_pid,
870                 inode->i_sb->s_id,
871                 (long long)NFS_FILEID(inode),
872                 data->args.count,
873                 (unsigned long long)data->args.offset);
874
875         task = rpc_run_task(&task_setup_data);
876         if (IS_ERR(task)) {
877                 ret = PTR_ERR(task);
878                 goto out;
879         }
880         if (how & FLUSH_SYNC) {
881                 ret = rpc_wait_for_completion_task(task);
882                 if (ret == 0)
883                         ret = task->tk_status;
884         }
885         rpc_put_task(task);
886 out:
887         return ret;
888 }
889 EXPORT_SYMBOL_GPL(nfs_initiate_write);
890
891 /*
892  * Set up the argument/result storage required for the RPC call.
893  */
894 static void nfs_write_rpcsetup(struct nfs_page *req,
895                 struct nfs_write_data *data,
896                 unsigned int count, unsigned int offset,
897                 int how)
898 {
899         struct inode *inode = req->wb_context->dentry->d_inode;
900
901         /* Set up the RPC argument and reply structs
902          * NB: take care not to mess about with data->commit et al. */
903
904         data->req = req;
905         data->inode = inode = req->wb_context->dentry->d_inode;
906         data->cred = req->wb_context->cred;
907
908         data->args.fh     = NFS_FH(inode);
909         data->args.offset = req_offset(req) + offset;
910         /* pnfs_set_layoutcommit needs this */
911         data->mds_offset = data->args.offset;
912         data->args.pgbase = req->wb_pgbase + offset;
913         data->args.pages  = data->pagevec;
914         data->args.count  = count;
915         data->args.context = get_nfs_open_context(req->wb_context);
916         data->args.lock_context = req->wb_lock_context;
917         data->args.stable  = NFS_UNSTABLE;
918         switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
919         case 0:
920                 break;
921         case FLUSH_COND_STABLE:
922                 if (nfs_need_commit(NFS_I(inode)))
923                         break;
924         default:
925                 data->args.stable = NFS_FILE_SYNC;
926         }
927
928         data->res.fattr   = &data->fattr;
929         data->res.count   = count;
930         data->res.verf    = &data->verf;
931         nfs_fattr_init(&data->fattr);
932 }
933
934 static int nfs_do_write(struct nfs_write_data *data,
935                 const struct rpc_call_ops *call_ops,
936                 int how)
937 {
938         struct inode *inode = data->args.context->dentry->d_inode;
939
940         return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
941 }
942
943 static int nfs_do_multiple_writes(struct list_head *head,
944                 const struct rpc_call_ops *call_ops,
945                 int how)
946 {
947         struct nfs_write_data *data;
948         int ret = 0;
949
950         while (!list_empty(head)) {
951                 int ret2;
952
953                 data = list_entry(head->next, struct nfs_write_data, list);
954                 list_del_init(&data->list);
955                 
956                 ret2 = nfs_do_write(data, call_ops, how);
957                  if (ret == 0)
958                          ret = ret2;
959         }
960         return ret;
961 }
962
963 /* If a nfs_flush_* function fails, it should remove reqs from @head and
964  * call this on each, which will prepare them to be retried on next
965  * writeback using standard nfs.
966  */
967 static void nfs_redirty_request(struct nfs_page *req)
968 {
969         struct page *page = req->wb_page;
970
971         nfs_mark_request_dirty(req);
972         nfs_unlock_request(req);
973         nfs_end_page_writeback(page);
974 }
975
976 /*
977  * Generate multiple small requests to write out a single
978  * contiguous dirty area on one page.
979  */
980 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
981 {
982         struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
983         struct page *page = req->wb_page;
984         struct nfs_write_data *data;
985         size_t wsize = desc->pg_bsize, nbytes;
986         unsigned int offset;
987         int requests = 0;
988         int ret = 0;
989
990         nfs_list_remove_request(req);
991
992         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
993             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
994              desc->pg_count > wsize))
995                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
996
997
998         offset = 0;
999         nbytes = desc->pg_count;
1000         do {
1001                 size_t len = min(nbytes, wsize);
1002
1003                 data = nfs_writedata_alloc(1);
1004                 if (!data)
1005                         goto out_bad;
1006                 data->pagevec[0] = page;
1007                 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
1008                 list_add(&data->list, res);
1009                 requests++;
1010                 nbytes -= len;
1011                 offset += len;
1012         } while (nbytes != 0);
1013         atomic_set(&req->wb_complete, requests);
1014         desc->pg_rpc_callops = &nfs_write_partial_ops;
1015         return ret;
1016
1017 out_bad:
1018         while (!list_empty(res)) {
1019                 data = list_entry(res->next, struct nfs_write_data, list);
1020                 list_del(&data->list);
1021                 nfs_writedata_free(data);
1022         }
1023         nfs_redirty_request(req);
1024         return -ENOMEM;
1025 }
1026
1027 /*
1028  * Create an RPC task for the given write request and kick it.
1029  * The page must have been locked by the caller.
1030  *
1031  * It may happen that the page we're passed is not marked dirty.
1032  * This is the case if nfs_updatepage detects a conflicting request
1033  * that has been written but not committed.
1034  */
1035 static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
1036 {
1037         struct nfs_page         *req;
1038         struct page             **pages;
1039         struct nfs_write_data   *data;
1040         struct list_head *head = &desc->pg_list;
1041         int ret = 0;
1042
1043         data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
1044                                                       desc->pg_count));
1045         if (!data) {
1046                 while (!list_empty(head)) {
1047                         req = nfs_list_entry(head->next);
1048                         nfs_list_remove_request(req);
1049                         nfs_redirty_request(req);
1050                 }
1051                 ret = -ENOMEM;
1052                 goto out;
1053         }
1054         pages = data->pagevec;
1055         while (!list_empty(head)) {
1056                 req = nfs_list_entry(head->next);
1057                 nfs_list_remove_request(req);
1058                 nfs_list_add_request(req, &data->pages);
1059                 *pages++ = req->wb_page;
1060         }
1061         req = nfs_list_entry(data->pages.next);
1062
1063         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1064             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1065                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1066
1067         /* Set up the argument struct */
1068         nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1069         list_add(&data->list, res);
1070         desc->pg_rpc_callops = &nfs_write_full_ops;
1071 out:
1072         return ret;
1073 }
1074
1075 int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1076 {
1077         if (desc->pg_bsize < PAGE_CACHE_SIZE)
1078                 return nfs_flush_multi(desc, head);
1079         return nfs_flush_one(desc, head);
1080 }
1081
1082 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1083 {
1084         LIST_HEAD(head);
1085         int ret;
1086
1087         ret = nfs_generic_flush(desc, &head);
1088         if (ret == 0)
1089                 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1090                                 desc->pg_ioflags);
1091         return ret;
1092 }
1093
1094 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1095         .pg_test = nfs_generic_pg_test,
1096         .pg_doio = nfs_generic_pg_writepages,
1097 };
1098
1099 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1100                                   struct inode *inode, int ioflags)
1101 {
1102         nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
1103                                 NFS_SERVER(inode)->wsize, ioflags);
1104 }
1105
1106 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1107 {
1108         pgio->pg_ops = &nfs_pageio_write_ops;
1109         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1110 }
1111 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1112
1113 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1114                                   struct inode *inode, int ioflags)
1115 {
1116         if (!pnfs_pageio_init_write(pgio, inode, ioflags))
1117                 nfs_pageio_init_write_mds(pgio, inode, ioflags);
1118 }
1119
1120 /*
1121  * Handle a write reply that flushed part of a page.
1122  */
1123 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1124 {
1125         struct nfs_write_data   *data = calldata;
1126
1127         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1128                 task->tk_pid,
1129                 data->req->wb_context->dentry->d_inode->i_sb->s_id,
1130                 (long long)
1131                   NFS_FILEID(data->req->wb_context->dentry->d_inode),
1132                 data->req->wb_bytes, (long long)req_offset(data->req));
1133
1134         nfs_writeback_done(task, data);
1135 }
1136
1137 static void nfs_writeback_release_partial(void *calldata)
1138 {
1139         struct nfs_write_data   *data = calldata;
1140         struct nfs_page         *req = data->req;
1141         struct page             *page = req->wb_page;
1142         int status = data->task.tk_status;
1143
1144         if (status < 0) {
1145                 nfs_set_pageerror(page);
1146                 nfs_context_set_write_error(req->wb_context, status);
1147                 dprintk(", error = %d\n", status);
1148                 goto out;
1149         }
1150
1151         if (nfs_write_need_commit(data)) {
1152                 struct inode *inode = page->mapping->host;
1153
1154                 spin_lock(&inode->i_lock);
1155                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1156                         /* Do nothing we need to resend the writes */
1157                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1158                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1159                         dprintk(" defer commit\n");
1160                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1161                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1162                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1163                         dprintk(" server reboot detected\n");
1164                 }
1165                 spin_unlock(&inode->i_lock);
1166         } else
1167                 dprintk(" OK\n");
1168
1169 out:
1170         if (atomic_dec_and_test(&req->wb_complete))
1171                 nfs_writepage_release(req, data);
1172         nfs_writedata_release(calldata);
1173 }
1174
1175 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1176 {
1177         struct nfs_write_data *data = calldata;
1178         NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
1179 }
1180
1181 static const struct rpc_call_ops nfs_write_partial_ops = {
1182         .rpc_call_prepare = nfs_write_prepare,
1183         .rpc_call_done = nfs_writeback_done_partial,
1184         .rpc_release = nfs_writeback_release_partial,
1185 };
1186
1187 /*
1188  * Handle a write reply that flushes a whole page.
1189  *
1190  * FIXME: There is an inherent race with invalidate_inode_pages and
1191  *        writebacks since the page->count is kept > 1 for as long
1192  *        as the page has a write request pending.
1193  */
1194 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1195 {
1196         struct nfs_write_data   *data = calldata;
1197
1198         nfs_writeback_done(task, data);
1199 }
1200
1201 static void nfs_writeback_release_full(void *calldata)
1202 {
1203         struct nfs_write_data   *data = calldata;
1204         int status = data->task.tk_status;
1205
1206         /* Update attributes as result of writeback. */
1207         while (!list_empty(&data->pages)) {
1208                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1209                 struct page *page = req->wb_page;
1210
1211                 nfs_list_remove_request(req);
1212
1213                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1214                         data->task.tk_pid,
1215                         req->wb_context->dentry->d_inode->i_sb->s_id,
1216                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1217                         req->wb_bytes,
1218                         (long long)req_offset(req));
1219
1220                 if (status < 0) {
1221                         nfs_set_pageerror(page);
1222                         nfs_context_set_write_error(req->wb_context, status);
1223                         dprintk(", error = %d\n", status);
1224                         goto remove_request;
1225                 }
1226
1227                 if (nfs_write_need_commit(data)) {
1228                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1229                         nfs_mark_request_commit(req, data->lseg);
1230                         dprintk(" marked for commit\n");
1231                         goto next;
1232                 }
1233                 dprintk(" OK\n");
1234 remove_request:
1235                 nfs_inode_remove_request(req);
1236         next:
1237                 nfs_unlock_request(req);
1238                 nfs_end_page_writeback(page);
1239         }
1240         nfs_writedata_release(calldata);
1241 }
1242
1243 static const struct rpc_call_ops nfs_write_full_ops = {
1244         .rpc_call_prepare = nfs_write_prepare,
1245         .rpc_call_done = nfs_writeback_done_full,
1246         .rpc_release = nfs_writeback_release_full,
1247 };
1248
1249
1250 /*
1251  * This function is called when the WRITE call is complete.
1252  */
1253 void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1254 {
1255         struct nfs_writeargs    *argp = &data->args;
1256         struct nfs_writeres     *resp = &data->res;
1257         int status;
1258
1259         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1260                 task->tk_pid, task->tk_status);
1261
1262         /*
1263          * ->write_done will attempt to use post-op attributes to detect
1264          * conflicting writes by other clients.  A strict interpretation
1265          * of close-to-open would allow us to continue caching even if
1266          * another writer had changed the file, but some applications
1267          * depend on tighter cache coherency when writing.
1268          */
1269         status = NFS_PROTO(data->inode)->write_done(task, data);
1270         if (status != 0)
1271                 return;
1272         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1273
1274 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1275         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1276                 /* We tried a write call, but the server did not
1277                  * commit data to stable storage even though we
1278                  * requested it.
1279                  * Note: There is a known bug in Tru64 < 5.0 in which
1280                  *       the server reports NFS_DATA_SYNC, but performs
1281                  *       NFS_FILE_SYNC. We therefore implement this checking
1282                  *       as a dprintk() in order to avoid filling syslog.
1283                  */
1284                 static unsigned long    complain;
1285
1286                 /* Note this will print the MDS for a DS write */
1287                 if (time_before(complain, jiffies)) {
1288                         dprintk("NFS:       faulty NFS server %s:"
1289                                 " (committed = %d) != (stable = %d)\n",
1290                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1291                                 resp->verf->committed, argp->stable);
1292                         complain = jiffies + 300 * HZ;
1293                 }
1294         }
1295 #endif
1296         /* Is this a short write? */
1297         if (task->tk_status >= 0 && resp->count < argp->count) {
1298                 static unsigned long    complain;
1299
1300                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1301
1302                 /* Has the server at least made some progress? */
1303                 if (resp->count != 0) {
1304                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1305                         if (resp->verf->committed != NFS_UNSTABLE) {
1306                                 /* Resend from where the server left off */
1307                                 data->mds_offset += resp->count;
1308                                 argp->offset += resp->count;
1309                                 argp->pgbase += resp->count;
1310                                 argp->count -= resp->count;
1311                         } else {
1312                                 /* Resend as a stable write in order to avoid
1313                                  * headaches in the case of a server crash.
1314                                  */
1315                                 argp->stable = NFS_FILE_SYNC;
1316                         }
1317                         rpc_restart_call_prepare(task);
1318                         return;
1319                 }
1320                 if (time_before(complain, jiffies)) {
1321                         printk(KERN_WARNING
1322                                "NFS: Server wrote zero bytes, expected %u.\n",
1323                                         argp->count);
1324                         complain = jiffies + 300 * HZ;
1325                 }
1326                 /* Can't do anything about it except throw an error. */
1327                 task->tk_status = -EIO;
1328         }
1329         return;
1330 }
1331
1332
1333 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1334 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1335 {
1336         int ret;
1337
1338         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1339                 return 1;
1340         if (!may_wait)
1341                 return 0;
1342         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1343                                 NFS_INO_COMMIT,
1344                                 nfs_wait_bit_killable,
1345                                 TASK_KILLABLE);
1346         return (ret < 0) ? ret : 1;
1347 }
1348
1349 void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1350 {
1351         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1352         smp_mb__after_clear_bit();
1353         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1354 }
1355 EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
1356
1357 void nfs_commitdata_release(void *data)
1358 {
1359         struct nfs_write_data *wdata = data;
1360
1361         put_nfs_open_context(wdata->args.context);
1362         nfs_commit_free(wdata);
1363 }
1364 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1365
1366 int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
1367                         const struct rpc_call_ops *call_ops,
1368                         int how)
1369 {
1370         struct rpc_task *task;
1371         int priority = flush_task_priority(how);
1372         struct rpc_message msg = {
1373                 .rpc_argp = &data->args,
1374                 .rpc_resp = &data->res,
1375                 .rpc_cred = data->cred,
1376         };
1377         struct rpc_task_setup task_setup_data = {
1378                 .task = &data->task,
1379                 .rpc_client = clnt,
1380                 .rpc_message = &msg,
1381                 .callback_ops = call_ops,
1382                 .callback_data = data,
1383                 .workqueue = nfsiod_workqueue,
1384                 .flags = RPC_TASK_ASYNC,
1385                 .priority = priority,
1386         };
1387         /* Set up the initial task struct.  */
1388         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1389
1390         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1391
1392         task = rpc_run_task(&task_setup_data);
1393         if (IS_ERR(task))
1394                 return PTR_ERR(task);
1395         if (how & FLUSH_SYNC)
1396                 rpc_wait_for_completion_task(task);
1397         rpc_put_task(task);
1398         return 0;
1399 }
1400 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1401
1402 /*
1403  * Set up the argument/result storage required for the RPC call.
1404  */
1405 void nfs_init_commit(struct nfs_write_data *data,
1406                             struct list_head *head,
1407                             struct pnfs_layout_segment *lseg)
1408 {
1409         struct nfs_page *first = nfs_list_entry(head->next);
1410         struct inode *inode = first->wb_context->dentry->d_inode;
1411
1412         /* Set up the RPC argument and reply structs
1413          * NB: take care not to mess about with data->commit et al. */
1414
1415         list_splice_init(head, &data->pages);
1416
1417         data->inode       = inode;
1418         data->cred        = first->wb_context->cred;
1419         data->lseg        = lseg; /* reference transferred */
1420         data->mds_ops     = &nfs_commit_ops;
1421
1422         data->args.fh     = NFS_FH(data->inode);
1423         /* Note: we always request a commit of the entire inode */
1424         data->args.offset = 0;
1425         data->args.count  = 0;
1426         data->args.context = get_nfs_open_context(first->wb_context);
1427         data->res.count   = 0;
1428         data->res.fattr   = &data->fattr;
1429         data->res.verf    = &data->verf;
1430         nfs_fattr_init(&data->fattr);
1431 }
1432 EXPORT_SYMBOL_GPL(nfs_init_commit);
1433
1434 void nfs_retry_commit(struct list_head *page_list,
1435                       struct pnfs_layout_segment *lseg)
1436 {
1437         struct nfs_page *req;
1438
1439         while (!list_empty(page_list)) {
1440                 req = nfs_list_entry(page_list->next);
1441                 nfs_list_remove_request(req);
1442                 nfs_mark_request_commit(req, lseg);
1443                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1444                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1445                              BDI_RECLAIMABLE);
1446                 nfs_unlock_request(req);
1447         }
1448 }
1449 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1450
1451 /*
1452  * Commit dirty pages
1453  */
1454 static int
1455 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1456 {
1457         struct nfs_write_data   *data;
1458
1459         data = nfs_commitdata_alloc();
1460
1461         if (!data)
1462                 goto out_bad;
1463
1464         /* Set up the argument struct */
1465         nfs_init_commit(data, head, NULL);
1466         return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
1467  out_bad:
1468         nfs_retry_commit(head, NULL);
1469         nfs_commit_clear_lock(NFS_I(inode));
1470         return -ENOMEM;
1471 }
1472
1473 /*
1474  * COMMIT call returned
1475  */
1476 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1477 {
1478         struct nfs_write_data   *data = calldata;
1479
1480         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1481                                 task->tk_pid, task->tk_status);
1482
1483         /* Call the NFS version-specific code */
1484         NFS_PROTO(data->inode)->commit_done(task, data);
1485 }
1486
1487 void nfs_commit_release_pages(struct nfs_write_data *data)
1488 {
1489         struct nfs_page *req;
1490         int status = data->task.tk_status;
1491
1492         while (!list_empty(&data->pages)) {
1493                 req = nfs_list_entry(data->pages.next);
1494                 nfs_list_remove_request(req);
1495                 nfs_clear_page_commit(req->wb_page);
1496
1497                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1498                         req->wb_context->dentry->d_sb->s_id,
1499                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1500                         req->wb_bytes,
1501                         (long long)req_offset(req));
1502                 if (status < 0) {
1503                         nfs_context_set_write_error(req->wb_context, status);
1504                         nfs_inode_remove_request(req);
1505                         dprintk(", error = %d\n", status);
1506                         goto next;
1507                 }
1508
1509                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1510                  * returned by the server against all stored verfs. */
1511                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1512                         /* We have a match */
1513                         nfs_inode_remove_request(req);
1514                         dprintk(" OK\n");
1515                         goto next;
1516                 }
1517                 /* We have a mismatch. Write the page again */
1518                 dprintk(" mismatch\n");
1519                 nfs_mark_request_dirty(req);
1520         next:
1521                 nfs_unlock_request(req);
1522         }
1523 }
1524 EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
1525
1526 static void nfs_commit_release(void *calldata)
1527 {
1528         struct nfs_write_data *data = calldata;
1529
1530         nfs_commit_release_pages(data);
1531         nfs_commit_clear_lock(NFS_I(data->inode));
1532         nfs_commitdata_release(calldata);
1533 }
1534
1535 static const struct rpc_call_ops nfs_commit_ops = {
1536         .rpc_call_prepare = nfs_write_prepare,
1537         .rpc_call_done = nfs_commit_done,
1538         .rpc_release = nfs_commit_release,
1539 };
1540
1541 int nfs_commit_inode(struct inode *inode, int how)
1542 {
1543         LIST_HEAD(head);
1544         int may_wait = how & FLUSH_SYNC;
1545         int res;
1546
1547         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1548         if (res <= 0)
1549                 goto out_mark_dirty;
1550         res = nfs_scan_commit(inode, &head);
1551         if (res) {
1552                 int error;
1553
1554                 error = pnfs_commit_list(inode, &head, how);
1555                 if (error == PNFS_NOT_ATTEMPTED)
1556                         error = nfs_commit_list(inode, &head, how);
1557                 if (error < 0)
1558                         return error;
1559                 if (!may_wait)
1560                         goto out_mark_dirty;
1561                 error = wait_on_bit(&NFS_I(inode)->flags,
1562                                 NFS_INO_COMMIT,
1563                                 nfs_wait_bit_killable,
1564                                 TASK_KILLABLE);
1565                 if (error < 0)
1566                         return error;
1567         } else
1568                 nfs_commit_clear_lock(NFS_I(inode));
1569         return res;
1570         /* Note: If we exit without ensuring that the commit is complete,
1571          * we must mark the inode as dirty. Otherwise, future calls to
1572          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1573          * that the data is on the disk.
1574          */
1575 out_mark_dirty:
1576         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1577         return res;
1578 }
1579
1580 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1581 {
1582         struct nfs_inode *nfsi = NFS_I(inode);
1583         int flags = FLUSH_SYNC;
1584         int ret = 0;
1585
1586         /* no commits means nothing needs to be done */
1587         if (!nfsi->ncommit)
1588                 return ret;
1589
1590         if (wbc->sync_mode == WB_SYNC_NONE) {
1591                 /* Don't commit yet if this is a non-blocking flush and there
1592                  * are a lot of outstanding writes for this mapping.
1593                  */
1594                 if (nfsi->ncommit <= (nfsi->npages >> 1))
1595                         goto out_mark_dirty;
1596
1597                 /* don't wait for the COMMIT response */
1598                 flags = 0;
1599         }
1600
1601         ret = nfs_commit_inode(inode, flags);
1602         if (ret >= 0) {
1603                 if (wbc->sync_mode == WB_SYNC_NONE) {
1604                         if (ret < wbc->nr_to_write)
1605                                 wbc->nr_to_write -= ret;
1606                         else
1607                                 wbc->nr_to_write = 0;
1608                 }
1609                 return 0;
1610         }
1611 out_mark_dirty:
1612         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1613         return ret;
1614 }
1615 #else
1616 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1617 {
1618         return 0;
1619 }
1620 #endif
1621
1622 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1623 {
1624         int ret;
1625
1626         ret = nfs_commit_unstable_pages(inode, wbc);
1627         if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1628                 int status;
1629                 bool sync = true;
1630
1631                 if (wbc->sync_mode == WB_SYNC_NONE)
1632                         sync = false;
1633
1634                 status = pnfs_layoutcommit_inode(inode, sync);
1635                 if (status < 0)
1636                         return status;
1637         }
1638         return ret;
1639 }
1640
1641 /*
1642  * flush the inode to disk.
1643  */
1644 int nfs_wb_all(struct inode *inode)
1645 {
1646         struct writeback_control wbc = {
1647                 .sync_mode = WB_SYNC_ALL,
1648                 .nr_to_write = LONG_MAX,
1649                 .range_start = 0,
1650                 .range_end = LLONG_MAX,
1651         };
1652
1653         return sync_inode(inode, &wbc);
1654 }
1655
1656 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1657 {
1658         struct nfs_page *req;
1659         int ret = 0;
1660
1661         BUG_ON(!PageLocked(page));
1662         for (;;) {
1663                 wait_on_page_writeback(page);
1664                 req = nfs_page_find_request(page);
1665                 if (req == NULL)
1666                         break;
1667                 if (nfs_lock_request_dontget(req)) {
1668                         nfs_clear_request_commit(req);
1669                         nfs_inode_remove_request(req);
1670                         /*
1671                          * In case nfs_inode_remove_request has marked the
1672                          * page as being dirty
1673                          */
1674                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1675                         nfs_unlock_request(req);
1676                         break;
1677                 }
1678                 ret = nfs_wait_on_request(req);
1679                 nfs_release_request(req);
1680                 if (ret < 0)
1681                         break;
1682         }
1683         return ret;
1684 }
1685
1686 /*
1687  * Write back all requests on one page - we do this before reading it.
1688  */
1689 int nfs_wb_page(struct inode *inode, struct page *page)
1690 {
1691         loff_t range_start = page_offset(page);
1692         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1693         struct writeback_control wbc = {
1694                 .sync_mode = WB_SYNC_ALL,
1695                 .nr_to_write = 0,
1696                 .range_start = range_start,
1697                 .range_end = range_end,
1698         };
1699         int ret;
1700
1701         for (;;) {
1702                 wait_on_page_writeback(page);
1703                 if (clear_page_dirty_for_io(page)) {
1704                         ret = nfs_writepage_locked(page, &wbc);
1705                         if (ret < 0)
1706                                 goto out_error;
1707                         continue;
1708                 }
1709                 if (!PagePrivate(page))
1710                         break;
1711                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1712                 if (ret < 0)
1713                         goto out_error;
1714         }
1715         return 0;
1716 out_error:
1717         return ret;
1718 }
1719
1720 #ifdef CONFIG_MIGRATION
1721 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1722                 struct page *page, enum migrate_mode mode)
1723 {
1724         /*
1725          * If PagePrivate is set, then the page is currently associated with
1726          * an in-progress read or write request. Don't try to migrate it.
1727          *
1728          * FIXME: we could do this in principle, but we'll need a way to ensure
1729          *        that we can safely release the inode reference while holding
1730          *        the page lock.
1731          */
1732         if (PagePrivate(page))
1733                 return -EBUSY;
1734
1735         nfs_fscache_release_page(page, GFP_KERNEL);
1736
1737         return migrate_page(mapping, newpage, page, mode);
1738 }
1739 #endif
1740
1741 int __init nfs_init_writepagecache(void)
1742 {
1743         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1744                                              sizeof(struct nfs_write_data),
1745                                              0, SLAB_HWCACHE_ALIGN,
1746                                              NULL);
1747         if (nfs_wdata_cachep == NULL)
1748                 return -ENOMEM;
1749
1750         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1751                                                      nfs_wdata_cachep);
1752         if (nfs_wdata_mempool == NULL)
1753                 return -ENOMEM;
1754
1755         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1756                                                       nfs_wdata_cachep);
1757         if (nfs_commit_mempool == NULL)
1758                 return -ENOMEM;
1759
1760         /*
1761          * NFS congestion size, scale with available memory.
1762          *
1763          *  64MB:    8192k
1764          * 128MB:   11585k
1765          * 256MB:   16384k
1766          * 512MB:   23170k
1767          *   1GB:   32768k
1768          *   2GB:   46340k
1769          *   4GB:   65536k
1770          *   8GB:   92681k
1771          *  16GB:  131072k
1772          *
1773          * This allows larger machines to have larger/more transfers.
1774          * Limit the default to 256M
1775          */
1776         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1777         if (nfs_congestion_kb > 256*1024)
1778                 nfs_congestion_kb = 256*1024;
1779
1780         return 0;
1781 }
1782
1783 void nfs_destroy_writepagecache(void)
1784 {
1785         mempool_destroy(nfs_commit_mempool);
1786         mempool_destroy(nfs_wdata_mempool);
1787         kmem_cache_destroy(nfs_wdata_cachep);
1788 }
1789