2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
17 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
20 EXPORT_SYMBOL(default_unplug_io_fn);
22 struct backing_dev_info default_backing_dev_info = {
24 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
26 .capabilities = BDI_CAP_MAP_COPY,
27 .unplug_io_fn = default_unplug_io_fn,
29 EXPORT_SYMBOL_GPL(default_backing_dev_info);
31 struct backing_dev_info noop_backing_dev_info = {
34 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
36 static struct class *bdi_class;
39 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
40 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
43 DEFINE_SPINLOCK(bdi_lock);
45 LIST_HEAD(bdi_pending_list);
47 static struct task_struct *sync_supers_tsk;
48 static struct timer_list sync_supers_timer;
50 static int bdi_sync_supers(void *);
51 static void sync_supers_timer_fn(unsigned long);
53 static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
55 #ifdef CONFIG_DEBUG_FS
56 #include <linux/debugfs.h>
57 #include <linux/seq_file.h>
59 static struct dentry *bdi_debug_root;
61 static void bdi_debug_init(void)
63 bdi_debug_root = debugfs_create_dir("bdi", NULL);
66 static int bdi_debug_stats_show(struct seq_file *m, void *v)
68 struct backing_dev_info *bdi = m->private;
69 struct bdi_writeback *wb = &bdi->wb;
70 unsigned long background_thresh;
71 unsigned long dirty_thresh;
72 unsigned long bdi_thresh;
73 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
76 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
77 spin_lock(&inode_lock);
78 list_for_each_entry(inode, &wb->b_dirty, i_list)
80 list_for_each_entry(inode, &wb->b_io, i_list)
82 list_for_each_entry(inode, &wb->b_more_io, i_list)
84 spin_unlock(&inode_lock);
86 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
88 #define K(x) ((x) << (PAGE_SHIFT - 10))
90 "BdiWriteback: %8lu kB\n"
91 "BdiReclaimable: %8lu kB\n"
92 "BdiDirtyThresh: %8lu kB\n"
93 "DirtyThresh: %8lu kB\n"
94 "BackgroundThresh: %8lu kB\n"
100 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
101 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
102 K(bdi_thresh), K(dirty_thresh),
103 K(background_thresh), nr_dirty, nr_io, nr_more_io,
104 !list_empty(&bdi->bdi_list), bdi->state);
110 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
112 return single_open(file, bdi_debug_stats_show, inode->i_private);
115 static const struct file_operations bdi_debug_stats_fops = {
116 .open = bdi_debug_stats_open,
119 .release = single_release,
122 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
124 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
125 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
126 bdi, &bdi_debug_stats_fops);
129 static void bdi_debug_unregister(struct backing_dev_info *bdi)
131 debugfs_remove(bdi->debug_stats);
132 debugfs_remove(bdi->debug_dir);
135 static inline void bdi_debug_init(void)
138 static inline void bdi_debug_register(struct backing_dev_info *bdi,
142 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
147 static ssize_t read_ahead_kb_store(struct device *dev,
148 struct device_attribute *attr,
149 const char *buf, size_t count)
151 struct backing_dev_info *bdi = dev_get_drvdata(dev);
153 unsigned long read_ahead_kb;
154 ssize_t ret = -EINVAL;
156 read_ahead_kb = simple_strtoul(buf, &end, 10);
157 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
158 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
164 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
166 #define BDI_SHOW(name, expr) \
167 static ssize_t name##_show(struct device *dev, \
168 struct device_attribute *attr, char *page) \
170 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
172 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
175 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
177 static ssize_t min_ratio_store(struct device *dev,
178 struct device_attribute *attr, const char *buf, size_t count)
180 struct backing_dev_info *bdi = dev_get_drvdata(dev);
183 ssize_t ret = -EINVAL;
185 ratio = simple_strtoul(buf, &end, 10);
186 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
187 ret = bdi_set_min_ratio(bdi, ratio);
193 BDI_SHOW(min_ratio, bdi->min_ratio)
195 static ssize_t max_ratio_store(struct device *dev,
196 struct device_attribute *attr, const char *buf, size_t count)
198 struct backing_dev_info *bdi = dev_get_drvdata(dev);
201 ssize_t ret = -EINVAL;
203 ratio = simple_strtoul(buf, &end, 10);
204 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
205 ret = bdi_set_max_ratio(bdi, ratio);
211 BDI_SHOW(max_ratio, bdi->max_ratio)
213 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
215 static struct device_attribute bdi_dev_attrs[] = {
216 __ATTR_RW(read_ahead_kb),
217 __ATTR_RW(min_ratio),
218 __ATTR_RW(max_ratio),
222 static __init int bdi_class_init(void)
224 bdi_class = class_create(THIS_MODULE, "bdi");
225 if (IS_ERR(bdi_class))
226 return PTR_ERR(bdi_class);
228 bdi_class->dev_attrs = bdi_dev_attrs;
232 postcore_initcall(bdi_class_init);
234 static int __init default_bdi_init(void)
238 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
239 BUG_ON(IS_ERR(sync_supers_tsk));
241 init_timer(&sync_supers_timer);
242 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
243 bdi_arm_supers_timer();
245 err = bdi_init(&default_backing_dev_info);
247 bdi_register(&default_backing_dev_info, NULL, "default");
251 subsys_initcall(default_bdi_init);
253 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
255 memset(wb, 0, sizeof(*wb));
258 wb->last_old_flush = jiffies;
259 INIT_LIST_HEAD(&wb->b_dirty);
260 INIT_LIST_HEAD(&wb->b_io);
261 INIT_LIST_HEAD(&wb->b_more_io);
264 int bdi_has_dirty_io(struct backing_dev_info *bdi)
266 return wb_has_dirty_io(&bdi->wb);
269 static void bdi_flush_io(struct backing_dev_info *bdi)
271 struct writeback_control wbc = {
272 .sync_mode = WB_SYNC_NONE,
273 .older_than_this = NULL,
278 writeback_inodes_wb(&bdi->wb, &wbc);
282 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
283 * or we risk deadlocking on ->s_umount. The longer term solution would be
284 * to implement sync_supers_bdi() or similar and simply do it from the
285 * bdi writeback tasks individually.
287 static int bdi_sync_supers(void *unused)
289 set_user_nice(current, 0);
291 while (!kthread_should_stop()) {
292 set_current_state(TASK_INTERRUPTIBLE);
296 * Do this periodically, like kupdated() did before.
304 void bdi_arm_supers_timer(void)
308 if (!dirty_writeback_interval)
311 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
312 mod_timer(&sync_supers_timer, round_jiffies_up(next));
315 static void sync_supers_timer_fn(unsigned long unused)
317 wake_up_process(sync_supers_tsk);
318 bdi_arm_supers_timer();
321 static int bdi_forker_task(void *ptr)
323 struct bdi_writeback *me = ptr;
325 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
329 * Our parent may run at a different priority, just set us to normal
331 set_user_nice(current, 0);
334 struct backing_dev_info *bdi, *tmp;
335 struct bdi_writeback *wb;
338 * Temporary measure, we want to make sure we don't see
339 * dirty data on the default backing_dev_info
341 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
342 wb_do_writeback(me, 0);
344 spin_lock_bh(&bdi_lock);
347 * Check if any existing bdi's have dirty data without
348 * a thread registered. If so, set that up.
350 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
353 if (list_empty(&bdi->work_list) &&
354 !bdi_has_dirty_io(bdi))
357 bdi_add_default_flusher_task(bdi);
360 set_current_state(TASK_INTERRUPTIBLE);
362 if (list_empty(&bdi_pending_list)) {
365 spin_unlock_bh(&bdi_lock);
366 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
368 schedule_timeout(wait);
375 __set_current_state(TASK_RUNNING);
378 * This is our real job - check for pending entries in
379 * bdi_pending_list, and create the tasks that got added
381 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
383 list_del_init(&bdi->bdi_list);
384 spin_unlock_bh(&bdi_lock);
387 wb->task = kthread_run(bdi_writeback_thread, wb, "flush-%s",
390 * If task creation fails, then readd the bdi to
391 * the pending list and force writeout of the bdi
392 * from this forker thread. That will free some memory
393 * and we can try again.
395 if (IS_ERR(wb->task)) {
399 * Add this 'bdi' to the back, so we get
400 * a chance to flush other bdi's to free
403 spin_lock_bh(&bdi_lock);
404 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
405 spin_unlock_bh(&bdi_lock);
414 static void bdi_add_to_pending(struct rcu_head *head)
416 struct backing_dev_info *bdi;
418 bdi = container_of(head, struct backing_dev_info, rcu_head);
419 INIT_LIST_HEAD(&bdi->bdi_list);
421 spin_lock(&bdi_lock);
422 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
423 spin_unlock(&bdi_lock);
426 * We are now on the pending list, wake up bdi_forker_task()
427 * to finish the job and add us back to the active bdi_list
429 wake_up_process(default_backing_dev_info.wb.task);
433 * Add the default flusher task that gets created for any bdi
434 * that has dirty data pending writeout
436 void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
438 if (!bdi_cap_writeback_dirty(bdi))
441 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
442 printk(KERN_ERR "bdi %p/%s is not registered!\n",
448 * Check with the helper whether to proceed adding a task. Will only
449 * abort if we two or more simultanous calls to
450 * bdi_add_default_flusher_task() occured, further additions will block
451 * waiting for previous additions to finish.
453 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
454 list_del_rcu(&bdi->bdi_list);
457 * We must wait for the current RCU period to end before
458 * moving to the pending list. So schedule that operation
459 * from an RCU callback.
461 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
466 * Remove bdi from bdi_list, and ensure that it is no longer visible
468 static void bdi_remove_from_list(struct backing_dev_info *bdi)
470 spin_lock_bh(&bdi_lock);
471 list_del_rcu(&bdi->bdi_list);
472 spin_unlock_bh(&bdi_lock);
477 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
478 const char *fmt, ...)
484 if (bdi->dev) /* The driver needs to use separate queues per device */
488 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
495 spin_lock_bh(&bdi_lock);
496 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
497 spin_unlock_bh(&bdi_lock);
502 * Just start the forker thread for our default backing_dev_info,
503 * and add other bdi's to the list. They will get a thread created
504 * on-demand when they need it.
506 if (bdi_cap_flush_forker(bdi)) {
507 struct bdi_writeback *wb = &bdi->wb;
509 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
511 if (IS_ERR(wb->task)) {
515 bdi_remove_from_list(bdi);
520 bdi_debug_register(bdi, dev_name(dev));
521 set_bit(BDI_registered, &bdi->state);
522 trace_writeback_bdi_register(bdi);
526 EXPORT_SYMBOL(bdi_register);
528 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
530 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
532 EXPORT_SYMBOL(bdi_register_dev);
535 * Remove bdi from the global list and shutdown any threads we have running
537 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
539 if (!bdi_cap_writeback_dirty(bdi))
543 * If setup is pending, wait for that to complete first
545 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
546 TASK_UNINTERRUPTIBLE);
549 * Make sure nobody finds us on the bdi_list anymore
551 bdi_remove_from_list(bdi);
554 * Finally, kill the kernel thread. We don't need to be RCU
555 * safe anymore, since the bdi is gone from visibility. Force
556 * unfreeze of the thread before calling kthread_stop(), otherwise
557 * it would never exet if it is currently stuck in the refrigerator.
560 thaw_process(bdi->wb.task);
561 kthread_stop(bdi->wb.task);
566 * This bdi is going away now, make sure that no super_blocks point to it
568 static void bdi_prune_sb(struct backing_dev_info *bdi)
570 struct super_block *sb;
573 list_for_each_entry(sb, &super_blocks, s_list) {
574 if (sb->s_bdi == bdi)
577 spin_unlock(&sb_lock);
580 void bdi_unregister(struct backing_dev_info *bdi)
583 trace_writeback_bdi_unregister(bdi);
586 if (!bdi_cap_flush_forker(bdi))
587 bdi_wb_shutdown(bdi);
588 bdi_debug_unregister(bdi);
589 device_unregister(bdi->dev);
593 EXPORT_SYMBOL(bdi_unregister);
595 int bdi_init(struct backing_dev_info *bdi)
602 bdi->max_ratio = 100;
603 bdi->max_prop_frac = PROP_FRAC_BASE;
604 spin_lock_init(&bdi->wb_lock);
605 INIT_RCU_HEAD(&bdi->rcu_head);
606 INIT_LIST_HEAD(&bdi->bdi_list);
607 INIT_LIST_HEAD(&bdi->work_list);
609 bdi_wb_init(&bdi->wb, bdi);
611 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
612 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
617 bdi->dirty_exceeded = 0;
618 err = prop_local_init_percpu(&bdi->completions);
623 percpu_counter_destroy(&bdi->bdi_stat[i]);
628 EXPORT_SYMBOL(bdi_init);
630 void bdi_destroy(struct backing_dev_info *bdi)
635 * Splice our entries to the default_backing_dev_info, if this
638 if (bdi_has_dirty_io(bdi)) {
639 struct bdi_writeback *dst = &default_backing_dev_info.wb;
641 spin_lock(&inode_lock);
642 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
643 list_splice(&bdi->wb.b_io, &dst->b_io);
644 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
645 spin_unlock(&inode_lock);
650 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
651 percpu_counter_destroy(&bdi->bdi_stat[i]);
653 prop_local_destroy_percpu(&bdi->completions);
655 EXPORT_SYMBOL(bdi_destroy);
658 * For use from filesystems to quickly init and register a bdi associated
659 * with dirty writeback
661 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
668 bdi->capabilities = cap;
673 sprintf(tmp, "%.28s%s", name, "-%d");
674 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
682 EXPORT_SYMBOL(bdi_setup_and_register);
684 static wait_queue_head_t congestion_wqh[2] = {
685 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
686 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
689 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
692 wait_queue_head_t *wqh = &congestion_wqh[sync];
694 bit = sync ? BDI_sync_congested : BDI_async_congested;
695 clear_bit(bit, &bdi->state);
696 smp_mb__after_clear_bit();
697 if (waitqueue_active(wqh))
700 EXPORT_SYMBOL(clear_bdi_congested);
702 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
706 bit = sync ? BDI_sync_congested : BDI_async_congested;
707 set_bit(bit, &bdi->state);
709 EXPORT_SYMBOL(set_bdi_congested);
712 * congestion_wait - wait for a backing_dev to become uncongested
713 * @sync: SYNC or ASYNC IO
714 * @timeout: timeout in jiffies
716 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
717 * write congestion. If no backing_devs are congested then just wait for the
718 * next write to be completed.
720 long congestion_wait(int sync, long timeout)
724 wait_queue_head_t *wqh = &congestion_wqh[sync];
726 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
727 ret = io_schedule_timeout(timeout);
728 finish_wait(wqh, &wait);
731 EXPORT_SYMBOL(congestion_wait);