1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
40 #define MLOG_MASK_PREFIX ML_INODE
41 #include <cluster/masklog.h>
49 #include "extent_map.h"
62 #include "refcounttree.h"
64 #include "buffer_head_io.h"
66 static int ocfs2_sync_inode(struct inode *inode)
68 filemap_fdatawrite(inode->i_mapping);
69 return sync_mapping_buffers(inode->i_mapping);
72 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
74 struct ocfs2_file_private *fp;
76 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
81 mutex_init(&fp->fp_mutex);
82 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
83 file->private_data = fp;
88 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
90 struct ocfs2_file_private *fp = file->private_data;
91 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
94 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
95 ocfs2_lock_res_free(&fp->fp_flock);
97 file->private_data = NULL;
101 static int ocfs2_file_open(struct inode *inode, struct file *file)
104 int mode = file->f_flags;
105 struct ocfs2_inode_info *oi = OCFS2_I(inode);
107 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
108 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
110 spin_lock(&oi->ip_lock);
112 /* Check that the inode hasn't been wiped from disk by another
113 * node. If it hasn't then we're safe as long as we hold the
114 * spin lock until our increment of open count. */
115 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
116 spin_unlock(&oi->ip_lock);
123 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
126 spin_unlock(&oi->ip_lock);
128 status = ocfs2_init_file_private(inode, file);
131 * We want to set open count back if we're failing the
134 spin_lock(&oi->ip_lock);
136 spin_unlock(&oi->ip_lock);
144 static int ocfs2_file_release(struct inode *inode, struct file *file)
146 struct ocfs2_inode_info *oi = OCFS2_I(inode);
147 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
149 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
150 file->f_path.dentry->d_name.len,
151 file->f_path.dentry->d_name.name);
153 spin_lock(&oi->ip_lock);
154 if (!--oi->ip_open_count)
155 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
156 spin_unlock(&oi->ip_lock);
160 * Disable this for now. Keeping the reservation around a bit
161 * longer gives an improvement for workloads which rapidly do
162 * open()/write()/close() against a file.
164 if ((file->f_mode & FMODE_WRITE) &&
165 (atomic_read(&inode->i_writecount) == 1)) {
166 down_write(&oi->ip_alloc_sem);
167 ocfs2_resv_discard(&osb->osb_la_resmap,
168 &oi->ip_la_data_resv);
169 up_write(&oi->ip_alloc_sem);
173 ocfs2_free_file_private(inode, file);
180 static int ocfs2_dir_open(struct inode *inode, struct file *file)
182 return ocfs2_init_file_private(inode, file);
185 static int ocfs2_dir_release(struct inode *inode, struct file *file)
187 ocfs2_free_file_private(inode, file);
191 static int ocfs2_sync_file(struct file *file,
192 struct dentry *dentry,
197 struct inode *inode = dentry->d_inode;
198 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
200 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
201 dentry->d_name.len, dentry->d_name.name);
203 err = ocfs2_sync_inode(dentry->d_inode);
207 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
210 journal = osb->journal->j_journal;
211 err = jbd2_journal_force_commit(journal);
216 return (err < 0) ? -EIO : 0;
219 int ocfs2_should_update_atime(struct inode *inode,
220 struct vfsmount *vfsmnt)
223 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
225 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
228 if ((inode->i_flags & S_NOATIME) ||
229 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
233 * We can be called with no vfsmnt structure - NFSD will
236 * Note that our action here is different than touch_atime() -
237 * if we can't tell whether this is a noatime mount, then we
238 * don't know whether to trust the value of s_atime_quantum.
243 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
244 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
247 if (vfsmnt->mnt_flags & MNT_RELATIME) {
248 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
249 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
256 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
262 int ocfs2_update_inode_atime(struct inode *inode,
263 struct buffer_head *bh)
266 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
268 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
272 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
273 if (IS_ERR(handle)) {
274 ret = PTR_ERR(handle);
279 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
280 OCFS2_JOURNAL_ACCESS_WRITE);
287 * Don't use ocfs2_mark_inode_dirty() here as we don't always
288 * have i_mutex to guard against concurrent changes to other
291 inode->i_atime = CURRENT_TIME;
292 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
293 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
295 ret = ocfs2_journal_dirty(handle, bh);
300 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
306 static int ocfs2_set_inode_size(handle_t *handle,
308 struct buffer_head *fe_bh,
314 i_size_write(inode, new_i_size);
315 inode->i_blocks = ocfs2_inode_sector_count(inode);
316 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
318 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
329 int ocfs2_simple_size_update(struct inode *inode,
330 struct buffer_head *di_bh,
334 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
335 handle_t *handle = NULL;
337 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
338 if (IS_ERR(handle)) {
339 ret = PTR_ERR(handle);
344 ret = ocfs2_set_inode_size(handle, inode, di_bh,
349 ocfs2_commit_trans(osb, handle);
354 static int ocfs2_cow_file_pos(struct inode *inode,
355 struct buffer_head *fe_bh,
359 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
360 unsigned int num_clusters = 0;
361 unsigned int ext_flags = 0;
364 * If the new offset is aligned to the range of the cluster, there is
365 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
368 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
371 status = ocfs2_get_clusters(inode, cpos, &phys,
372 &num_clusters, &ext_flags);
378 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
381 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
387 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
389 struct buffer_head *fe_bh,
394 struct ocfs2_dinode *di;
400 * We need to CoW the cluster contains the offset if it is reflinked
401 * since we will call ocfs2_zero_range_for_truncate later which will
402 * write "0" from offset to the end of the cluster.
404 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
410 /* TODO: This needs to actually orphan the inode in this
413 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
414 if (IS_ERR(handle)) {
415 status = PTR_ERR(handle);
420 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
421 OCFS2_JOURNAL_ACCESS_WRITE);
428 * Do this before setting i_size.
430 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
431 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
438 i_size_write(inode, new_i_size);
439 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
441 di = (struct ocfs2_dinode *) fe_bh->b_data;
442 di->i_size = cpu_to_le64(new_i_size);
443 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
444 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
446 status = ocfs2_journal_dirty(handle, fe_bh);
451 ocfs2_commit_trans(osb, handle);
458 static int ocfs2_truncate_file(struct inode *inode,
459 struct buffer_head *di_bh,
463 struct ocfs2_dinode *fe = NULL;
464 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
465 struct ocfs2_truncate_context *tc = NULL;
467 mlog_entry("(inode = %llu, new_i_size = %llu\n",
468 (unsigned long long)OCFS2_I(inode)->ip_blkno,
469 (unsigned long long)new_i_size);
471 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
472 * already validated it */
473 fe = (struct ocfs2_dinode *) di_bh->b_data;
475 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
476 "Inode %llu, inode i_size = %lld != di "
477 "i_size = %llu, i_flags = 0x%x\n",
478 (unsigned long long)OCFS2_I(inode)->ip_blkno,
480 (unsigned long long)le64_to_cpu(fe->i_size),
481 le32_to_cpu(fe->i_flags));
483 if (new_i_size > le64_to_cpu(fe->i_size)) {
484 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
485 (unsigned long long)le64_to_cpu(fe->i_size),
486 (unsigned long long)new_i_size);
492 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
493 (unsigned long long)le64_to_cpu(fe->i_blkno),
494 (unsigned long long)le64_to_cpu(fe->i_size),
495 (unsigned long long)new_i_size);
497 /* lets handle the simple truncate cases before doing any more
498 * cluster locking. */
499 if (new_i_size == le64_to_cpu(fe->i_size))
502 down_write(&OCFS2_I(inode)->ip_alloc_sem);
504 ocfs2_resv_discard(&osb->osb_la_resmap,
505 &OCFS2_I(inode)->ip_la_data_resv);
508 * The inode lock forced other nodes to sync and drop their
509 * pages, which (correctly) happens even if we have a truncate
510 * without allocation change - ocfs2 cluster sizes can be much
511 * greater than page size, so we have to truncate them
514 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
515 truncate_inode_pages(inode->i_mapping, new_i_size);
517 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
518 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
519 i_size_read(inode), 1);
523 goto bail_unlock_sem;
526 /* alright, we're going to need to do a full blown alloc size
527 * change. Orphan the inode so that recovery can complete the
528 * truncate if necessary. This does the task of marking
530 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
533 goto bail_unlock_sem;
536 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
539 goto bail_unlock_sem;
542 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
545 goto bail_unlock_sem;
548 /* TODO: orphan dir cleanup here. */
550 up_write(&OCFS2_I(inode)->ip_alloc_sem);
553 if (!status && OCFS2_I(inode)->ip_clusters == 0)
554 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
561 * extend file allocation only here.
562 * we'll update all the disk stuff, and oip->alloc_size
564 * expect stuff to be locked, a transaction started and enough data /
565 * metadata reservations in the contexts.
567 * Will return -EAGAIN, and a reason if a restart is needed.
568 * If passed in, *reason will always be set, even in error.
570 int ocfs2_add_inode_data(struct ocfs2_super *osb,
575 struct buffer_head *fe_bh,
577 struct ocfs2_alloc_context *data_ac,
578 struct ocfs2_alloc_context *meta_ac,
579 enum ocfs2_alloc_restarted *reason_ret)
582 struct ocfs2_extent_tree et;
584 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
585 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
586 clusters_to_add, mark_unwritten,
587 data_ac, meta_ac, reason_ret);
592 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
593 u32 clusters_to_add, int mark_unwritten)
596 int restart_func = 0;
599 struct buffer_head *bh = NULL;
600 struct ocfs2_dinode *fe = NULL;
601 handle_t *handle = NULL;
602 struct ocfs2_alloc_context *data_ac = NULL;
603 struct ocfs2_alloc_context *meta_ac = NULL;
604 enum ocfs2_alloc_restarted why;
605 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
606 struct ocfs2_extent_tree et;
609 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
612 * This function only exists for file systems which don't
615 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
617 status = ocfs2_read_inode_block(inode, &bh);
622 fe = (struct ocfs2_dinode *) bh->b_data;
625 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
627 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
628 "clusters_to_add = %u\n",
629 (unsigned long long)OCFS2_I(inode)->ip_blkno,
630 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
632 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
633 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
640 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
642 handle = ocfs2_start_trans(osb, credits);
643 if (IS_ERR(handle)) {
644 status = PTR_ERR(handle);
650 restarted_transaction:
651 if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb,
658 /* reserve a write to the file entry early on - that we if we
659 * run out of credits in the allocation path, we can still
661 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
662 OCFS2_JOURNAL_ACCESS_WRITE);
668 prev_clusters = OCFS2_I(inode)->ip_clusters;
670 status = ocfs2_add_inode_data(osb,
680 if ((status < 0) && (status != -EAGAIN)) {
681 if (status != -ENOSPC)
686 status = ocfs2_journal_dirty(handle, bh);
692 spin_lock(&OCFS2_I(inode)->ip_lock);
693 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
694 spin_unlock(&OCFS2_I(inode)->ip_lock);
695 /* Release unused quota reservation */
696 vfs_dq_free_space(inode,
697 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
700 if (why != RESTART_NONE && clusters_to_add) {
701 if (why == RESTART_META) {
702 mlog(0, "restarting function.\n");
705 BUG_ON(why != RESTART_TRANS);
707 mlog(0, "restarting transaction.\n");
708 /* TODO: This can be more intelligent. */
709 credits = ocfs2_calc_extend_credits(osb->sb,
712 status = ocfs2_extend_trans(handle, credits);
714 /* handle still has to be committed at
720 goto restarted_transaction;
724 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
725 le32_to_cpu(fe->i_clusters),
726 (unsigned long long)le64_to_cpu(fe->i_size));
727 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
728 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
731 if (status < 0 && did_quota)
732 vfs_dq_free_space(inode,
733 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
735 ocfs2_commit_trans(osb, handle);
739 ocfs2_free_alloc_context(data_ac);
743 ocfs2_free_alloc_context(meta_ac);
746 if ((!status) && restart_func) {
757 /* Some parts of this taken from generic_cont_expand, which turned out
758 * to be too fragile to do exactly what we need without us having to
759 * worry about recursive locking in ->write_begin() and ->write_end(). */
760 static int ocfs2_write_zero_page(struct inode *inode,
763 struct address_space *mapping = inode->i_mapping;
767 handle_t *handle = NULL;
770 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
771 /* ugh. in prepare/commit_write, if from==to==start of block, we
772 ** skip the prepare. make sure we never send an offset for the start
775 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
778 index = size >> PAGE_CACHE_SHIFT;
780 page = grab_cache_page(mapping, index);
787 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
793 if (ocfs2_should_order_data(inode)) {
794 handle = ocfs2_start_walk_page_trans(inode, page, offset,
796 if (IS_ERR(handle)) {
797 ret = PTR_ERR(handle);
803 /* must not update i_size! */
804 ret = block_commit_write(page, offset, offset);
811 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
814 page_cache_release(page);
819 static int ocfs2_zero_extend(struct inode *inode,
824 struct super_block *sb = inode->i_sb;
826 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
827 while (start_off < zero_to_size) {
828 ret = ocfs2_write_zero_page(inode, start_off);
834 start_off += sb->s_blocksize;
837 * Very large extends have the potential to lock up
838 * the cpu for extended periods of time.
847 int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
851 struct ocfs2_inode_info *oi = OCFS2_I(inode);
853 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
854 if (clusters_to_add < oi->ip_clusters)
857 clusters_to_add -= oi->ip_clusters;
859 if (clusters_to_add) {
860 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
869 * Call this even if we don't add any clusters to the tree. We
870 * still need to zero the area between the old i_size and the
873 ret = ocfs2_zero_extend(inode, zero_to);
881 static int ocfs2_extend_file(struct inode *inode,
882 struct buffer_head *di_bh,
886 struct ocfs2_inode_info *oi = OCFS2_I(inode);
890 /* setattr sometimes calls us like this. */
894 if (i_size_read(inode) == new_i_size)
896 BUG_ON(new_i_size < i_size_read(inode));
899 * Fall through for converting inline data, even if the fs
900 * supports sparse files.
902 * The check for inline data here is legal - nobody can add
903 * the feature since we have i_mutex. We must check it again
904 * after acquiring ip_alloc_sem though, as paths like mmap
905 * might have raced us to converting the inode to extents.
907 if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
908 && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
909 goto out_update_size;
912 * The alloc sem blocks people in read/write from reading our
913 * allocation until we're done changing it. We depend on
914 * i_mutex to block other extend/truncate calls while we're
917 down_write(&oi->ip_alloc_sem);
919 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
921 * We can optimize small extends by keeping the inodes
924 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
925 up_write(&oi->ip_alloc_sem);
926 goto out_update_size;
929 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
931 up_write(&oi->ip_alloc_sem);
938 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
939 ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
941 up_write(&oi->ip_alloc_sem);
949 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
957 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
959 int status = 0, size_change;
960 struct inode *inode = dentry->d_inode;
961 struct super_block *sb = inode->i_sb;
962 struct ocfs2_super *osb = OCFS2_SB(sb);
963 struct buffer_head *bh = NULL;
964 handle_t *handle = NULL;
966 struct dquot *transfer_from[MAXQUOTAS] = { };
967 struct dquot *transfer_to[MAXQUOTAS] = { };
969 mlog_entry("(0x%p, '%.*s')\n", dentry,
970 dentry->d_name.len, dentry->d_name.name);
972 /* ensuring we don't even attempt to truncate a symlink */
973 if (S_ISLNK(inode->i_mode))
974 attr->ia_valid &= ~ATTR_SIZE;
976 if (attr->ia_valid & ATTR_MODE)
977 mlog(0, "mode change: %d\n", attr->ia_mode);
978 if (attr->ia_valid & ATTR_UID)
979 mlog(0, "uid change: %d\n", attr->ia_uid);
980 if (attr->ia_valid & ATTR_GID)
981 mlog(0, "gid change: %d\n", attr->ia_gid);
982 if (attr->ia_valid & ATTR_SIZE)
983 mlog(0, "size change...\n");
984 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
985 mlog(0, "time change...\n");
987 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
988 | ATTR_GID | ATTR_UID | ATTR_MODE)
989 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
990 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
994 status = inode_change_ok(inode, attr);
998 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1000 status = ocfs2_rw_lock(inode, 1);
1007 status = ocfs2_inode_lock(inode, &bh, 1);
1009 if (status != -ENOENT)
1011 goto bail_unlock_rw;
1014 if (size_change && attr->ia_size != i_size_read(inode)) {
1015 if (attr->ia_size > sb->s_maxbytes) {
1020 if (i_size_read(inode) > attr->ia_size) {
1021 if (ocfs2_should_order_data(inode)) {
1022 status = ocfs2_begin_ordered_truncate(inode,
1027 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1029 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1031 if (status != -ENOSPC)
1038 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1039 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
1041 * Gather pointers to quota structures so that allocation /
1042 * freeing of quota structures happens here and not inside
1043 * vfs_dq_transfer() where we have problems with lock ordering
1045 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1046 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1047 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1048 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1050 transfer_from[USRQUOTA] = dqget(sb, inode->i_uid,
1052 if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
1057 if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1058 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1059 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1060 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1062 transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid,
1064 if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
1069 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1070 2 * ocfs2_quota_trans_credits(sb));
1071 if (IS_ERR(handle)) {
1072 status = PTR_ERR(handle);
1076 status = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
1080 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1081 if (IS_ERR(handle)) {
1082 status = PTR_ERR(handle);
1089 * This will intentionally not wind up calling vmtruncate(),
1090 * since all the work for a size change has been done above.
1091 * Otherwise, we could get into problems with truncate as
1092 * ip_alloc_sem is used there to protect against i_size
1095 status = inode_setattr(inode, attr);
1101 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1106 ocfs2_commit_trans(osb, handle);
1108 ocfs2_inode_unlock(inode, 1);
1111 ocfs2_rw_unlock(inode, 1);
1115 /* Release quota pointers in case we acquired them */
1116 for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
1117 dqput(transfer_to[qtype]);
1118 dqput(transfer_from[qtype]);
1121 if (!status && attr->ia_valid & ATTR_MODE) {
1122 status = ocfs2_acl_chmod(inode);
1131 int ocfs2_getattr(struct vfsmount *mnt,
1132 struct dentry *dentry,
1135 struct inode *inode = dentry->d_inode;
1136 struct super_block *sb = dentry->d_inode->i_sb;
1137 struct ocfs2_super *osb = sb->s_fs_info;
1142 err = ocfs2_inode_revalidate(dentry);
1149 generic_fillattr(inode, stat);
1151 /* We set the blksize from the cluster size for performance */
1152 stat->blksize = osb->s_clustersize;
1160 int ocfs2_permission(struct inode *inode, int mask)
1166 ret = ocfs2_inode_lock(inode, NULL, 0);
1173 ret = generic_permission(inode, mask, ocfs2_check_acl);
1175 ocfs2_inode_unlock(inode, 0);
1181 static int __ocfs2_write_remove_suid(struct inode *inode,
1182 struct buffer_head *bh)
1186 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1187 struct ocfs2_dinode *di;
1189 mlog_entry("(Inode %llu, mode 0%o)\n",
1190 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1193 if (IS_ERR(handle)) {
1194 ret = PTR_ERR(handle);
1199 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1200 OCFS2_JOURNAL_ACCESS_WRITE);
1206 inode->i_mode &= ~S_ISUID;
1207 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1208 inode->i_mode &= ~S_ISGID;
1210 di = (struct ocfs2_dinode *) bh->b_data;
1211 di->i_mode = cpu_to_le16(inode->i_mode);
1213 ret = ocfs2_journal_dirty(handle, bh);
1218 ocfs2_commit_trans(osb, handle);
1225 * Will look for holes and unwritten extents in the range starting at
1226 * pos for count bytes (inclusive).
1228 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1232 unsigned int extent_flags;
1233 u32 cpos, clusters, extent_len, phys_cpos;
1234 struct super_block *sb = inode->i_sb;
1236 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1237 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1240 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1247 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1252 if (extent_len > clusters)
1253 extent_len = clusters;
1255 clusters -= extent_len;
1262 static int ocfs2_write_remove_suid(struct inode *inode)
1265 struct buffer_head *bh = NULL;
1267 ret = ocfs2_read_inode_block(inode, &bh);
1273 ret = __ocfs2_write_remove_suid(inode, bh);
1280 * Allocate enough extents to cover the region starting at byte offset
1281 * start for len bytes. Existing extents are skipped, any extents
1282 * added are marked as "unwritten".
1284 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1288 u32 cpos, phys_cpos, clusters, alloc_size;
1289 u64 end = start + len;
1290 struct buffer_head *di_bh = NULL;
1292 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1293 ret = ocfs2_read_inode_block(inode, &di_bh);
1300 * Nothing to do if the requested reservation range
1301 * fits within the inode.
1303 if (ocfs2_size_fits_inline_data(di_bh, end))
1306 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1314 * We consider both start and len to be inclusive.
1316 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1317 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1321 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1329 * Hole or existing extent len can be arbitrary, so
1330 * cap it to our own allocation request.
1332 if (alloc_size > clusters)
1333 alloc_size = clusters;
1337 * We already have an allocation at this
1338 * region so we can safely skip it.
1343 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1352 clusters -= alloc_size;
1363 * Truncate a byte range, avoiding pages within partial clusters. This
1364 * preserves those pages for the zeroing code to write to.
1366 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1369 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1371 struct address_space *mapping = inode->i_mapping;
1373 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1374 end = byte_start + byte_len;
1375 end = end & ~(osb->s_clustersize - 1);
1378 unmap_mapping_range(mapping, start, end - start, 0);
1379 truncate_inode_pages_range(mapping, start, end - 1);
1383 static int ocfs2_zero_partial_clusters(struct inode *inode,
1387 u64 tmpend, end = start + len;
1388 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1389 unsigned int csize = osb->s_clustersize;
1393 * The "start" and "end" values are NOT necessarily part of
1394 * the range whose allocation is being deleted. Rather, this
1395 * is what the user passed in with the request. We must zero
1396 * partial clusters here. There's no need to worry about
1397 * physical allocation - the zeroing code knows to skip holes.
1399 mlog(0, "byte start: %llu, end: %llu\n",
1400 (unsigned long long)start, (unsigned long long)end);
1403 * If both edges are on a cluster boundary then there's no
1404 * zeroing required as the region is part of the allocation to
1407 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1410 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1411 if (IS_ERR(handle)) {
1412 ret = PTR_ERR(handle);
1418 * We want to get the byte offset of the end of the 1st cluster.
1420 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1424 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1425 (unsigned long long)start, (unsigned long long)tmpend);
1427 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1433 * This may make start and end equal, but the zeroing
1434 * code will skip any work in that case so there's no
1435 * need to catch it up here.
1437 start = end & ~(osb->s_clustersize - 1);
1439 mlog(0, "2nd range: start: %llu, end: %llu\n",
1440 (unsigned long long)start, (unsigned long long)end);
1442 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1447 ocfs2_commit_trans(osb, handle);
1452 static int ocfs2_remove_inode_range(struct inode *inode,
1453 struct buffer_head *di_bh, u64 byte_start,
1457 u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size;
1458 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1459 struct ocfs2_cached_dealloc_ctxt dealloc;
1460 struct address_space *mapping = inode->i_mapping;
1461 struct ocfs2_extent_tree et;
1463 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1464 ocfs2_init_dealloc_ctxt(&dealloc);
1469 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1470 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1471 byte_start + byte_len, 0);
1477 * There's no need to get fancy with the page cache
1478 * truncate of an inline-data inode. We're talking
1479 * about less than a page here, which will be cached
1480 * in the dinode buffer anyway.
1482 unmap_mapping_range(mapping, 0, 0, 0);
1483 truncate_inode_pages(mapping, 0);
1487 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1488 trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
1489 if (trunc_len >= trunc_start)
1490 trunc_len -= trunc_start;
1494 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
1495 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1496 (unsigned long long)byte_start,
1497 (unsigned long long)byte_len, trunc_start, trunc_len);
1499 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1507 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1514 if (alloc_size > trunc_len)
1515 alloc_size = trunc_len;
1517 /* Only do work for non-holes */
1518 if (phys_cpos != 0) {
1519 ret = ocfs2_remove_btree_range(inode, &et, cpos,
1520 phys_cpos, alloc_size,
1529 trunc_len -= alloc_size;
1532 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1535 ocfs2_schedule_truncate_log_flush(osb, 1);
1536 ocfs2_run_deallocs(osb, &dealloc);
1542 * Parts of this function taken from xfs_change_file_space()
1544 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1545 loff_t f_pos, unsigned int cmd,
1546 struct ocfs2_space_resv *sr,
1552 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1553 struct buffer_head *di_bh = NULL;
1555 unsigned long long max_off = inode->i_sb->s_maxbytes;
1557 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1560 mutex_lock(&inode->i_mutex);
1563 * This prevents concurrent writes on other nodes
1565 ret = ocfs2_rw_lock(inode, 1);
1571 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1577 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1579 goto out_inode_unlock;
1582 switch (sr->l_whence) {
1583 case 0: /*SEEK_SET*/
1585 case 1: /*SEEK_CUR*/
1586 sr->l_start += f_pos;
1588 case 2: /*SEEK_END*/
1589 sr->l_start += i_size_read(inode);
1593 goto out_inode_unlock;
1597 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1600 || sr->l_start > max_off
1601 || (sr->l_start + llen) < 0
1602 || (sr->l_start + llen) > max_off) {
1604 goto out_inode_unlock;
1606 size = sr->l_start + sr->l_len;
1608 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1609 if (sr->l_len <= 0) {
1611 goto out_inode_unlock;
1615 if (file && should_remove_suid(file->f_path.dentry)) {
1616 ret = __ocfs2_write_remove_suid(inode, di_bh);
1619 goto out_inode_unlock;
1623 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1625 case OCFS2_IOC_RESVSP:
1626 case OCFS2_IOC_RESVSP64:
1628 * This takes unsigned offsets, but the signed ones we
1629 * pass have been checked against overflow above.
1631 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1634 case OCFS2_IOC_UNRESVSP:
1635 case OCFS2_IOC_UNRESVSP64:
1636 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1642 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1645 goto out_inode_unlock;
1649 * We update c/mtime for these changes
1651 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1652 if (IS_ERR(handle)) {
1653 ret = PTR_ERR(handle);
1655 goto out_inode_unlock;
1658 if (change_size && i_size_read(inode) < size)
1659 i_size_write(inode, size);
1661 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1662 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1666 ocfs2_commit_trans(osb, handle);
1670 ocfs2_inode_unlock(inode, 1);
1672 ocfs2_rw_unlock(inode, 1);
1675 mutex_unlock(&inode->i_mutex);
1679 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1680 struct ocfs2_space_resv *sr)
1682 struct inode *inode = file->f_path.dentry->d_inode;
1683 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1685 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1686 !ocfs2_writes_unwritten_extents(osb))
1688 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1689 !ocfs2_sparse_alloc(osb))
1692 if (!S_ISREG(inode->i_mode))
1695 if (!(file->f_mode & FMODE_WRITE))
1698 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1701 static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1704 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1705 struct ocfs2_space_resv sr;
1706 int change_size = 1;
1708 if (!ocfs2_writes_unwritten_extents(osb))
1711 if (S_ISDIR(inode->i_mode))
1714 if (mode & FALLOC_FL_KEEP_SIZE)
1718 sr.l_start = (s64)offset;
1719 sr.l_len = (s64)len;
1721 return __ocfs2_change_file_space(NULL, inode, offset,
1722 OCFS2_IOC_RESVSP64, &sr, change_size);
1725 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1729 unsigned int extent_flags;
1730 u32 cpos, clusters, extent_len, phys_cpos;
1731 struct super_block *sb = inode->i_sb;
1733 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1734 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1735 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1738 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1739 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1742 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1749 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
1754 if (extent_len > clusters)
1755 extent_len = clusters;
1757 clusters -= extent_len;
1764 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
1765 loff_t pos, size_t count,
1769 struct buffer_head *di_bh = NULL;
1770 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1772 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
1774 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1782 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
1790 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1797 int ret = 0, meta_level = 0;
1798 struct inode *inode = dentry->d_inode;
1799 loff_t saved_pos, end;
1802 * We start with a read level meta lock and only jump to an ex
1803 * if we need to make modifications here.
1806 ret = ocfs2_inode_lock(inode, NULL, meta_level);
1813 /* Clear suid / sgid if necessary. We do this here
1814 * instead of later in the write path because
1815 * remove_suid() calls ->setattr without any hint that
1816 * we may have already done our cluster locking. Since
1817 * ocfs2_setattr() *must* take cluster locks to
1818 * proceeed, this will lead us to recursively lock the
1819 * inode. There's also the dinode i_size state which
1820 * can be lost via setattr during extending writes (we
1821 * set inode->i_size at the end of a write. */
1822 if (should_remove_suid(dentry)) {
1823 if (meta_level == 0) {
1824 ocfs2_inode_unlock(inode, meta_level);
1829 ret = ocfs2_write_remove_suid(inode);
1836 /* work on a copy of ppos until we're sure that we won't have
1837 * to recalculate it due to relocking. */
1839 saved_pos = i_size_read(inode);
1840 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1845 end = saved_pos + count;
1847 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
1849 ocfs2_inode_unlock(inode, meta_level);
1852 ret = ocfs2_prepare_inode_for_refcount(inode,
1866 * Skip the O_DIRECT checks if we don't need
1869 if (!direct_io || !(*direct_io))
1873 * There's no sane way to do direct writes to an inode
1876 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1881 if (has_refcount && *has_refcount == 1) {
1886 * Allowing concurrent direct writes means
1887 * i_size changes wouldn't be synchronized, so
1888 * one node could wind up truncating another
1891 if (end > i_size_read(inode)) {
1897 * We don't fill holes during direct io, so
1898 * check for them here. If any are found, the
1899 * caller will have to retake some cluster
1900 * locks and initiate the io as buffered.
1902 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
1915 if (meta_level >= 0)
1916 ocfs2_inode_unlock(inode, meta_level);
1922 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1923 const struct iovec *iov,
1924 unsigned long nr_segs,
1927 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1928 int can_do_direct, has_refcount = 0;
1929 ssize_t written = 0;
1930 size_t ocount; /* original count */
1931 size_t count; /* after file limit checks */
1932 loff_t old_size, *ppos = &iocb->ki_pos;
1934 struct file *file = iocb->ki_filp;
1935 struct inode *inode = file->f_path.dentry->d_inode;
1936 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1938 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1939 (unsigned int)nr_segs,
1940 file->f_path.dentry->d_name.len,
1941 file->f_path.dentry->d_name.name);
1943 if (iocb->ki_left == 0)
1946 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1948 appending = file->f_flags & O_APPEND ? 1 : 0;
1949 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1951 mutex_lock(&inode->i_mutex);
1954 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1956 down_read(&inode->i_alloc_sem);
1960 /* concurrent O_DIRECT writes are allowed */
1961 rw_level = !direct_io;
1962 ret = ocfs2_rw_lock(inode, rw_level);
1968 can_do_direct = direct_io;
1969 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1970 iocb->ki_left, appending,
1971 &can_do_direct, &has_refcount);
1978 * We can't complete the direct I/O as requested, fall back to
1981 if (direct_io && !can_do_direct) {
1982 ocfs2_rw_unlock(inode, rw_level);
1983 up_read(&inode->i_alloc_sem);
1993 * To later detect whether a journal commit for sync writes is
1994 * necessary, we sample i_size, and cluster count here.
1996 old_size = i_size_read(inode);
1997 old_clusters = OCFS2_I(inode)->ip_clusters;
1999 /* communicate with ocfs2_dio_end_io */
2000 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2003 ret = generic_segment_checks(iov, &nr_segs, &ocount,
2009 ret = generic_write_checks(file, ppos, &count,
2010 S_ISBLK(inode->i_mode));
2014 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2015 ppos, count, ocount);
2018 * direct write may have instantiated a few
2019 * blocks outside i_size. Trim these off again.
2020 * Don't need i_size_read because we hold i_mutex.
2022 if (*ppos + count > inode->i_size)
2023 vmtruncate(inode, inode->i_size);
2028 written = __generic_file_aio_write(iocb, iov, nr_segs, ppos);
2032 /* buffered aio wouldn't have proper lock coverage today */
2033 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2035 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
2036 (file->f_flags & O_DIRECT && has_refcount)) {
2037 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2042 if (!ret && (old_size != i_size_read(inode) ||
2043 old_clusters != OCFS2_I(inode)->ip_clusters ||
2045 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2051 ret = filemap_fdatawait_range(file->f_mapping, pos,
2056 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2057 * function pointer which is called when o_direct io completes so that
2058 * it can unlock our rw lock. (it's the clustered equivalent of
2059 * i_alloc_sem; protects truncate from racing with pending ios).
2060 * Unfortunately there are error cases which call end_io and others
2061 * that don't. so we don't have to unlock the rw_lock if either an
2062 * async dio is going to do it in the future or an end_io after an
2063 * error has already done it.
2065 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2072 ocfs2_rw_unlock(inode, rw_level);
2076 up_read(&inode->i_alloc_sem);
2078 mutex_unlock(&inode->i_mutex);
2086 static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2088 struct splice_desc *sd)
2092 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
2093 sd->total_len, 0, NULL, NULL);
2099 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2102 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2109 struct address_space *mapping = out->f_mapping;
2110 struct inode *inode = mapping->host;
2111 struct splice_desc sd = {
2118 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2120 out->f_path.dentry->d_name.len,
2121 out->f_path.dentry->d_name.name);
2124 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2126 splice_from_pipe_begin(&sd);
2128 ret = splice_from_pipe_next(pipe, &sd);
2132 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2133 ret = ocfs2_rw_lock(inode, 1);
2137 ret = ocfs2_splice_to_file(pipe, out, &sd);
2138 ocfs2_rw_unlock(inode, 1);
2140 mutex_unlock(&inode->i_mutex);
2142 splice_from_pipe_end(pipe, &sd);
2145 mutex_unlock(&pipe->inode->i_mutex);
2148 ret = sd.num_spliced;
2151 unsigned long nr_pages;
2154 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2156 err = generic_write_sync(out, *ppos, ret);
2162 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2169 static ssize_t ocfs2_file_splice_read(struct file *in,
2171 struct pipe_inode_info *pipe,
2175 int ret = 0, lock_level = 0;
2176 struct inode *inode = in->f_path.dentry->d_inode;
2178 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2180 in->f_path.dentry->d_name.len,
2181 in->f_path.dentry->d_name.name);
2184 * See the comment in ocfs2_file_aio_read()
2186 ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2191 ocfs2_inode_unlock(inode, lock_level);
2193 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2200 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2201 const struct iovec *iov,
2202 unsigned long nr_segs,
2205 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2206 struct file *filp = iocb->ki_filp;
2207 struct inode *inode = filp->f_path.dentry->d_inode;
2209 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2210 (unsigned int)nr_segs,
2211 filp->f_path.dentry->d_name.len,
2212 filp->f_path.dentry->d_name.name);
2221 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2222 * need locks to protect pending reads from racing with truncate.
2224 if (filp->f_flags & O_DIRECT) {
2225 down_read(&inode->i_alloc_sem);
2228 ret = ocfs2_rw_lock(inode, 0);
2234 /* communicate with ocfs2_dio_end_io */
2235 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2239 * We're fine letting folks race truncates and extending
2240 * writes with read across the cluster, just like they can
2241 * locally. Hence no rw_lock during read.
2243 * Take and drop the meta data lock to update inode fields
2244 * like i_size. This allows the checks down below
2245 * generic_file_aio_read() a chance of actually working.
2247 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2252 ocfs2_inode_unlock(inode, lock_level);
2254 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2256 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2258 /* buffered aio wouldn't have proper lock coverage today */
2259 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2261 /* see ocfs2_file_aio_write */
2262 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2269 up_read(&inode->i_alloc_sem);
2271 ocfs2_rw_unlock(inode, rw_level);
2277 const struct inode_operations ocfs2_file_iops = {
2278 .setattr = ocfs2_setattr,
2279 .getattr = ocfs2_getattr,
2280 .permission = ocfs2_permission,
2281 .setxattr = generic_setxattr,
2282 .getxattr = generic_getxattr,
2283 .listxattr = ocfs2_listxattr,
2284 .removexattr = generic_removexattr,
2285 .fallocate = ocfs2_fallocate,
2286 .fiemap = ocfs2_fiemap,
2289 const struct inode_operations ocfs2_special_file_iops = {
2290 .setattr = ocfs2_setattr,
2291 .getattr = ocfs2_getattr,
2292 .permission = ocfs2_permission,
2296 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2297 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2299 const struct file_operations ocfs2_fops = {
2300 .llseek = generic_file_llseek,
2301 .read = do_sync_read,
2302 .write = do_sync_write,
2304 .fsync = ocfs2_sync_file,
2305 .release = ocfs2_file_release,
2306 .open = ocfs2_file_open,
2307 .aio_read = ocfs2_file_aio_read,
2308 .aio_write = ocfs2_file_aio_write,
2309 .unlocked_ioctl = ocfs2_ioctl,
2310 #ifdef CONFIG_COMPAT
2311 .compat_ioctl = ocfs2_compat_ioctl,
2314 .flock = ocfs2_flock,
2315 .splice_read = ocfs2_file_splice_read,
2316 .splice_write = ocfs2_file_splice_write,
2319 const struct file_operations ocfs2_dops = {
2320 .llseek = generic_file_llseek,
2321 .read = generic_read_dir,
2322 .readdir = ocfs2_readdir,
2323 .fsync = ocfs2_sync_file,
2324 .release = ocfs2_dir_release,
2325 .open = ocfs2_dir_open,
2326 .unlocked_ioctl = ocfs2_ioctl,
2327 #ifdef CONFIG_COMPAT
2328 .compat_ioctl = ocfs2_compat_ioctl,
2331 .flock = ocfs2_flock,
2335 * POSIX-lockless variants of our file_operations.
2337 * These will be used if the underlying cluster stack does not support
2338 * posix file locking, if the user passes the "localflocks" mount
2339 * option, or if we have a local-only fs.
2341 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2342 * so we still want it in the case of no stack support for
2343 * plocks. Internally, it will do the right thing when asked to ignore
2346 const struct file_operations ocfs2_fops_no_plocks = {
2347 .llseek = generic_file_llseek,
2348 .read = do_sync_read,
2349 .write = do_sync_write,
2351 .fsync = ocfs2_sync_file,
2352 .release = ocfs2_file_release,
2353 .open = ocfs2_file_open,
2354 .aio_read = ocfs2_file_aio_read,
2355 .aio_write = ocfs2_file_aio_write,
2356 .unlocked_ioctl = ocfs2_ioctl,
2357 #ifdef CONFIG_COMPAT
2358 .compat_ioctl = ocfs2_compat_ioctl,
2360 .flock = ocfs2_flock,
2361 .splice_read = ocfs2_file_splice_read,
2362 .splice_write = ocfs2_file_splice_write,
2365 const struct file_operations ocfs2_dops_no_plocks = {
2366 .llseek = generic_file_llseek,
2367 .read = generic_read_dir,
2368 .readdir = ocfs2_readdir,
2369 .fsync = ocfs2_sync_file,
2370 .release = ocfs2_dir_release,
2371 .open = ocfs2_dir_open,
2372 .unlocked_ioctl = ocfs2_ioctl,
2373 #ifdef CONFIG_COMPAT
2374 .compat_ioctl = ocfs2_compat_ioctl,
2376 .flock = ocfs2_flock,