struct pagevec lru_pvec;
int ret = 0;
- current->flags |= PF_READAHEAD;
-
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
goto out;
}
pagevec_lru_add(&lru_pvec);
out:
- current->flags &= ~PF_READAHEAD;
return ret;
}
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
* memory at once.
*/
-int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ unsigned long offset, unsigned long nr_to_read)
{
int ret = 0;
}
/*
+ * This version skips the IO if the queue is read-congested, and will tell the
+ * block layer to abandon the readahead if request allocation would block.
+ *
+ * force_page_cache_readahead() will ignore queue congestion and will block on
+ * request queues.
+ */
+int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ unsigned long offset, unsigned long nr_to_read)
+{
+ if (!bdi_read_congested(mapping->backing_dev_info))
+ return __do_page_cache_readahead(mapping, filp,
+ offset, nr_to_read);
+ return 0;
+}
+
+/*
* Check how effective readahead is being. If the amount of started IO is
* less than expected then the file is partly or fully in pagecache and
* readahead isn't helping. Shrink the window.