Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / fs / buffer.c
index 3e7dca2..ad5938c 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/file.h>
 #include <linux/quotaops.h>
 #include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/writeback.h>
 #include <linux/hash.h>
 #include <linux/suspend.h>
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 }
 EXPORT_SYMBOL(init_buffer);
 
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
 {
-       struct block_device *bd;
-       struct buffer_head *bh
-               = container_of(word, struct buffer_head, b_state);
-
-       smp_mb();
-       bd = bh->b_bdev;
-       if (bd)
-               blk_run_address_space(bd->bd_inode->i_mapping);
        io_schedule();
        return 0;
 }
 
 void __lock_buffer(struct buffer_head *bh)
 {
-       wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+       wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-       wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
@@ -156,7 +148,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
+               if (!quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -220,13 +212,16 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
         * elsewhere, don't buffer_error if we had some unmapped buffers
         */
        if (all_mapped) {
+               char b[BDEVNAME_SIZE];
+
                printk("__find_get_block_slow() failed. "
                        "block=%llu, b_blocknr=%llu\n",
                        (unsigned long long)block,
                        (unsigned long long)bh->b_blocknr);
                printk("b_state=0x%08lx, b_size=%zu\n",
                        bh->b_state, bh->b_size);
-               printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
+               printk("device %s blocksize: %d\n", bdevname(bdev, b),
+                       1 << bd_inode->i_blkbits);
        }
 out_unlock:
        spin_unlock(&bd_mapping->private_lock);
@@ -235,51 +230,6 @@ out:
        return ret;
 }
 
-/* If invalidate_buffers() will trash dirty buffers, it means some kind
-   of fs corruption is going on. Trashing dirty data always imply losing
-   information that was supposed to be just stored on the physical layer
-   by the user.
-
-   Thus invalidate_buffers in general usage is not allwowed to trash
-   dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
-   be preserved.  These buffers are simply skipped.
-  
-   We also skip buffers which are still in use.  For example this can
-   happen if a userspace program is reading the block device.
-
-   NOTE: In the case where the user removed a removable-media-disk even if
-   there's still dirty data not synced on disk (due a bug in the device driver
-   or due an error of the user), by not destroying the dirty buffers we could
-   generate corruption also on the next media inserted, thus a parameter is
-   necessary to handle this case in the most safe way possible (trying
-   to not corrupt also the new disk inserted with the data belonging to
-   the old now corrupted disk). Also for the ramdisk the natural thing
-   to do in order to release the ramdisk memory is to destroy dirty buffers.
-
-   These are two special cases. Normal usage imply the device driver
-   to issue a sync on the device (without waiting I/O completion) and
-   then an invalidate_buffers call that doesn't trash dirty buffers.
-
-   For handling cache coherency with the blkdev pagecache the 'update' case
-   is been introduced. It is needed to re-read from disk any pinned
-   buffer. NOTE: re-reading from disk is destructive so we can do it only
-   when we assume nobody is changing the buffercache under our I/O and when
-   we think the disk contains more recent information than the buffercache.
-   The update == 1 pass marks the buffers we need to update, the update == 2
-   pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev)
-{
-       struct address_space *mapping = bdev->bd_inode->i_mapping;
-
-       if (mapping->nrpages == 0)
-               return;
-
-       invalidate_bh_lrus();
-       lru_add_drain_all();    /* make sure all lru add caches are flushed */
-       invalidate_mapping_pages(mapping, 0, -1);
-}
-EXPORT_SYMBOL(invalidate_bdev);
-
 /*
  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
@@ -288,7 +238,7 @@ static void free_more_memory(void)
        struct zone *zone;
        int nid;
 
-       wakeup_flusher_threads(1024);
+       wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
        yield();
 
        for_each_online_node(nid) {
@@ -749,10 +699,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping, *prev_mapping = NULL;
+       struct address_space *mapping;
        int err = 0, err2;
+       struct blk_plug plug;
 
        INIT_LIST_HEAD(&tmp);
+       blk_start_plug(&plug);
 
        spin_lock(lock);
        while (!list_empty(list)) {
@@ -775,7 +727,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * still in flight on potentially older
                                 * contents.
                                 */
-                               write_dirty_buffer(bh, WRITE_SYNC_PLUG);
+                               write_dirty_buffer(bh, WRITE_SYNC);
 
                                /*
                                 * Kick off IO for the previous mapping. Note
@@ -783,16 +735,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * wait_on_buffer() will do that for us
                                 * through sync_buffer().
                                 */
-                               if (prev_mapping && prev_mapping != mapping)
-                                       blk_run_address_space(prev_mapping);
-                               prev_mapping = mapping;
-
                                brelse(bh);
                                spin_lock(lock);
                        }
                }
        }
 
+       spin_unlock(lock);
+       blk_finish_plug(&plug);
+       spin_lock(lock);
+
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
                get_bh(bh);
@@ -905,7 +857,6 @@ try_again:
 
                bh->b_state = 0;
                atomic_set(&bh->b_count, 0);
-               bh->b_private = NULL;
                bh->b_size = size;
 
                /* Link the buffer to its page */
@@ -970,6 +921,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
        struct buffer_head *head = page_buffers(page);
        struct buffer_head *bh = head;
        int uptodate = PageUptodate(page);
+       sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
 
        do {
                if (!buffer_mapped(bh)) {
@@ -978,7 +930,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
                        bh->b_blocknr = block;
                        if (uptodate)
                                set_buffer_uptodate(bh);
-                       set_buffer_mapped(bh);
+                       if (block < end_block)
+                               set_buffer_mapped(bh);
                }
                block++;
                bh = bh->b_this_page;
@@ -1034,7 +987,6 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        return page;
 
 failed:
-       BUG();
        unlock_page(page);
        page_cache_release(page);
        return NULL;
@@ -1145,7 +1097,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * inode list.
  *
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and the global inode_lock.
+ * mapping->tree_lock and mapping->host->i_lock.
  */
 void mark_buffer_dirty(struct buffer_head *bh)
 {
@@ -1271,12 +1223,10 @@ static inline void check_irqs_on(void)
 static void bh_lru_install(struct buffer_head *bh)
 {
        struct buffer_head *evictee = NULL;
-       struct bh_lru *lru;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
-       if (lru->bhs[0] != bh) {
+       if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
                struct buffer_head *bhs[BH_LRU_SIZE];
                int in;
                int out = 0;
@@ -1284,7 +1234,8 @@ static void bh_lru_install(struct buffer_head *bh)
                get_bh(bh);
                bhs[out++] = bh;
                for (in = 0; in < BH_LRU_SIZE; in++) {
-                       struct buffer_head *bh2 = lru->bhs[in];
+                       struct buffer_head *bh2 =
+                               __this_cpu_read(bh_lrus.bhs[in]);
 
                        if (bh2 == bh) {
                                __brelse(bh2);
@@ -1299,7 +1250,7 @@ static void bh_lru_install(struct buffer_head *bh)
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(lru->bhs, bhs, sizeof(bhs));
+               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
 
@@ -1314,23 +1265,22 @@ static struct buffer_head *
 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *ret = NULL;
-       struct bh_lru *lru;
        unsigned int i;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
        for (i = 0; i < BH_LRU_SIZE; i++) {
-               struct buffer_head *bh = lru->bhs[i];
+               struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
 
                if (bh && bh->b_bdev == bdev &&
                                bh->b_blocknr == block && bh->b_size == size) {
                        if (i) {
                                while (i) {
-                                       lru->bhs[i] = lru->bhs[i - 1];
+                                       __this_cpu_write(bh_lrus.bhs[i],
+                                               __this_cpu_read(bh_lrus.bhs[i - 1]));
                                        i--;
                                }
-                               lru->bhs[0] = bh;
+                               __this_cpu_write(bh_lrus.bhs[0], bh);
                        }
                        get_bh(bh);
                        ret = bh;
@@ -1435,10 +1385,23 @@ static void invalidate_bh_lru(void *arg)
        }
        put_cpu_var(bh_lrus);
 }
+
+static bool has_bh_in_lru(int cpu, void *dummy)
+{
+       struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+       int i;
        
+       for (i = 0; i < BH_LRU_SIZE; i++) {
+               if (b->bhs[i])
+                       return 1;
+       }
+
+       return 0;
+}
+
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu(invalidate_bh_lru, NULL, 1);
+       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
@@ -1474,13 +1437,13 @@ static void discard_buffer(struct buffer_head * bh)
 }
 
 /**
- * block_invalidatepage - invalidate part of all of a buffer-backed page
+ * block_invalidatepage - invalidate part or all of a buffer-backed page
  *
  * @page: the page which is affected
  * @offset: the index of the truncation point
  *
  * block_invalidatepage() is called when all or part of the page has become
- * invalidatedby a truncate operation.
+ * invalidated by a truncate operation.
  *
  * block_invalidatepage() does not have to release all buffers, but it must
  * ensure that no dirty buffer is left outside @offset and that no I/O
@@ -1617,14 +1580,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * prevents this contention from occurring.
  *
  * If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed.  The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
                        get_block_t *get_block, struct writeback_control *wbc,
@@ -1637,7 +1594,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
        int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
-                       WRITE_SYNC_PLUG : WRITE);
+                       WRITE_SYNC : WRITE);
 
        BUG_ON(!PageLocked(page));
 
@@ -1706,7 +1663,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                 * and kswapd activity, but those code paths have their own
                 * higher-level throttling.
                 */
-               if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+               if (wbc->sync_mode != WB_SYNC_NONE) {
                        lock_buffer(bh);
                } else if (!trylock_buffer(bh)) {
                        redirty_page_for_writepage(wbc, page);
@@ -1834,9 +1791,11 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
 }
 EXPORT_SYMBOL(page_zero_new_buffers);
 
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
                get_block_t *get_block)
 {
+       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned to = from + len;
        struct inode *inode = page->mapping->host;
        unsigned block_start, block_end;
        sector_t block;
@@ -1910,13 +1869,11 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
                if (!buffer_uptodate(*wait_bh))
                        err = -EIO;
        }
-       if (unlikely(err)) {
+       if (unlikely(err))
                page_zero_new_buffers(page, from, to);
-               ClearPageUptodate(page);
-       }
        return err;
 }
-EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(__block_write_begin);
 
 static int __block_commit_write(struct inode *inode, struct page *page,
                unsigned from, unsigned to)
@@ -1953,15 +1910,6 @@ static int __block_commit_write(struct inode *inode, struct page *page,
        return 0;
 }
 
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
-               get_block_t *get_block)
-{
-       unsigned start = pos & (PAGE_CACHE_SIZE - 1);
-
-       return block_prepare_write(page, start, start + len, get_block);
-}
-EXPORT_SYMBOL(__block_write_begin);
-
 /*
  * block_write_begin takes care of the basic task of block allocation and
  * bringing partial write blocks uptodate first.
@@ -2353,24 +2301,26 @@ EXPORT_SYMBOL(block_commit_write);
  * page lock we can determine safely if the page is beyond EOF. If it is not
  * beyond EOF, then the page is guaranteed safe against truncation until we
  * unlock the page.
+ *
+ * Direct callers of this function should call vfs_check_frozen() so that page
+ * fault does not busyloop until the fs is thawed.
  */
-int
-block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
-                  get_block_t get_block)
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                        get_block_t get_block)
 {
        struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
-       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+       int ret;
 
        lock_page(page);
        size = i_size_read(inode);
        if ((page->mapping != inode->i_mapping) ||
            (page_offset(page) > size)) {
-               /* page got truncated out from underneath us */
-               unlock_page(page);
-               goto out;
+               /* We overload EFAULT to mean page got truncated */
+               ret = -EFAULT;
+               goto out_unlock;
        }
 
        /* page is wholly or partially inside EOF */
@@ -2379,22 +2329,46 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        else
                end = PAGE_CACHE_SIZE;
 
-       ret = block_prepare_write(page, 0, end, get_block);
+       ret = __block_write_begin(page, 0, end, get_block);
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
-       if (unlikely(ret)) {
-               unlock_page(page);
-               if (ret == -ENOMEM)
-                       ret = VM_FAULT_OOM;
-               else /* -ENOSPC, -EIO, etc */
-                       ret = VM_FAULT_SIGBUS;
-       } else
-               ret = VM_FAULT_LOCKED;
-
-out:
+       if (unlikely(ret < 0))
+               goto out_unlock;
+       /*
+        * Freezing in progress? We check after the page is marked dirty and
+        * with page lock held so if the test here fails, we are sure freezing
+        * code will wait during syncing until the page fault is done - at that
+        * point page will be dirty and unlocked so freezing code will write it
+        * and writeprotect it again.
+        */
+       set_page_dirty(page);
+       if (inode->i_sb->s_frozen != SB_UNFROZEN) {
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+       wait_on_page_writeback(page);
+       return 0;
+out_unlock:
+       unlock_page(page);
        return ret;
 }
+EXPORT_SYMBOL(__block_page_mkwrite);
+
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                  get_block_t get_block)
+{
+       int ret;
+       struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+
+       /*
+        * This check is racy but catches the common case. The check in
+        * __block_page_mkwrite() is reliable.
+        */
+       vfs_check_frozen(sb, SB_FREEZE_WRITE);
+       ret = __block_page_mkwrite(vma, vmf, get_block);
+       return block_page_mkwrite_return(ret);
+}
 EXPORT_SYMBOL(block_page_mkwrite);
 
 /*
@@ -2466,11 +2440,10 @@ int nobh_write_begin(struct address_space *mapping,
        *fsdata = NULL;
 
        if (page_has_buffers(page)) {
-               unlock_page(page);
-               page_cache_release(page);
-               *pagep = NULL;
-               return block_write_begin(mapping, pos, len, flags, pagep,
-                                        get_block);
+               ret = __block_write_begin(page, pos, len, get_block);
+               if (unlikely(ret))
+                       goto out_release;
+               return ret;
        }
 
        if (PageMappedToDisk(page))
@@ -2891,7 +2864,6 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
 
        if (err == -EOPNOTSUPP) {
                set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
-               set_bit(BH_Eopnotsupp, &bh->b_state);
        }
 
        if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
@@ -3031,10 +3003,6 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw)
                bh->b_end_io = end_buffer_write_sync;
                ret = submit_bh(rw, bh);
                wait_on_buffer(bh);
-               if (buffer_eopnotsupp(bh)) {
-                       clear_buffer_eopnotsupp(bh);
-                       ret = -EOPNOTSUPP;
-               }
                if (!ret && !buffer_uptodate(bh))
                        ret = -EIO;
        } else {
@@ -3154,17 +3122,6 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-void block_sync_page(struct page *page)
-{
-       struct address_space *mapping;
-
-       smp_mb();
-       mapping = page_mapping(page);
-       if (mapping)
-               blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
 /*
  * There are no bdflush tunables left.  But distributions are
  * still running obsolete flush daemons, so we terminate them here.
@@ -3217,22 +3174,23 @@ static void recalc_bh_state(void)
        int i;
        int tot = 0;
 
-       if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
+       if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
                return;
-       __get_cpu_var(bh_accounting).ratelimit = 0;
+       __this_cpu_write(bh_accounting.ratelimit, 0);
        for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
-       
+
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
-               get_cpu_var(bh_accounting).nr++;
+               preempt_disable();
+               __this_cpu_inc(bh_accounting.nr);
                recalc_bh_state();
-               put_cpu_var(bh_accounting);
+               preempt_enable();
        }
        return ret;
 }
@@ -3242,9 +3200,10 @@ void free_buffer_head(struct buffer_head *bh)
 {
        BUG_ON(!list_empty(&bh->b_assoc_buffers));
        kmem_cache_free(bh_cachep, bh);
-       get_cpu_var(bh_accounting).nr--;
+       preempt_disable();
+       __this_cpu_dec(bh_accounting.nr);
        recalc_bh_state();
-       put_cpu_var(bh_accounting);
+       preempt_enable();
 }
 EXPORT_SYMBOL(free_buffer_head);
 
@@ -3257,9 +3216,8 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
-       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
        per_cpu(bh_accounting, cpu).nr = 0;
-       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,