tmpfs: copy truncate_inode_pages_range
[linux-flexiantxendom0-3.2.10.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/swap.h>
32
33 static struct vfsmount *shm_mnt;
34
35 #ifdef CONFIG_SHMEM
36 /*
37  * This virtual memory filesystem is heavily based on the ramfs. It
38  * extends ramfs by the ability to use swap and honor resource limits
39  * which makes it a completely usable filesystem.
40  */
41
42 #include <linux/xattr.h>
43 #include <linux/exportfs.h>
44 #include <linux/posix_acl.h>
45 #include <linux/generic_acl.h>
46 #include <linux/mman.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/backing-dev.h>
50 #include <linux/shmem_fs.h>
51 #include <linux/writeback.h>
52 #include <linux/blkdev.h>
53 #include <linux/pagevec.h>
54 #include <linux/percpu_counter.h>
55 #include <linux/splice.h>
56 #include <linux/security.h>
57 #include <linux/swapops.h>
58 #include <linux/mempolicy.h>
59 #include <linux/namei.h>
60 #include <linux/ctype.h>
61 #include <linux/migrate.h>
62 #include <linux/highmem.h>
63 #include <linux/seq_file.h>
64 #include <linux/magic.h>
65
66 #include <asm/uaccess.h>
67 #include <asm/pgtable.h>
68
69 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
70 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
71
72 /* Pretend that each entry is of this size in directory's i_size */
73 #define BOGO_DIRENT_SIZE 20
74
75 struct shmem_xattr {
76         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
77         char *name;             /* xattr name */
78         size_t size;
79         char value[0];
80 };
81
82 /* Flag allocation requirements to shmem_getpage */
83 enum sgp_type {
84         SGP_READ,       /* don't exceed i_size, don't allocate page */
85         SGP_CACHE,      /* don't exceed i_size, may allocate page */
86         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
87         SGP_WRITE,      /* may exceed i_size, may allocate page */
88 };
89
90 #ifdef CONFIG_TMPFS
91 static unsigned long shmem_default_max_blocks(void)
92 {
93         return totalram_pages / 2;
94 }
95
96 static unsigned long shmem_default_max_inodes(void)
97 {
98         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
99 }
100 #endif
101
102 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
103         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
104
105 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
106         struct page **pagep, enum sgp_type sgp, int *fault_type)
107 {
108         return shmem_getpage_gfp(inode, index, pagep, sgp,
109                         mapping_gfp_mask(inode->i_mapping), fault_type);
110 }
111
112 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
113 {
114         return sb->s_fs_info;
115 }
116
117 /*
118  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
119  * for shared memory and for shared anonymous (/dev/zero) mappings
120  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
121  * consistent with the pre-accounting of private mappings ...
122  */
123 static inline int shmem_acct_size(unsigned long flags, loff_t size)
124 {
125         return (flags & VM_NORESERVE) ?
126                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
127 }
128
129 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
130 {
131         if (!(flags & VM_NORESERVE))
132                 vm_unacct_memory(VM_ACCT(size));
133 }
134
135 /*
136  * ... whereas tmpfs objects are accounted incrementally as
137  * pages are allocated, in order to allow huge sparse files.
138  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
139  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
140  */
141 static inline int shmem_acct_block(unsigned long flags)
142 {
143         return (flags & VM_NORESERVE) ?
144                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
145 }
146
147 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
148 {
149         if (flags & VM_NORESERVE)
150                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
151 }
152
153 static const struct super_operations shmem_ops;
154 static const struct address_space_operations shmem_aops;
155 static const struct file_operations shmem_file_operations;
156 static const struct inode_operations shmem_inode_operations;
157 static const struct inode_operations shmem_dir_inode_operations;
158 static const struct inode_operations shmem_special_inode_operations;
159 static const struct vm_operations_struct shmem_vm_ops;
160
161 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
162         .ra_pages       = 0,    /* No readahead */
163         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
164 };
165
166 static LIST_HEAD(shmem_swaplist);
167 static DEFINE_MUTEX(shmem_swaplist_mutex);
168
169 static void shmem_free_blocks(struct inode *inode, long pages)
170 {
171         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
172         if (sbinfo->max_blocks) {
173                 percpu_counter_add(&sbinfo->used_blocks, -pages);
174                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
175         }
176 }
177
178 static int shmem_reserve_inode(struct super_block *sb)
179 {
180         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
181         if (sbinfo->max_inodes) {
182                 spin_lock(&sbinfo->stat_lock);
183                 if (!sbinfo->free_inodes) {
184                         spin_unlock(&sbinfo->stat_lock);
185                         return -ENOSPC;
186                 }
187                 sbinfo->free_inodes--;
188                 spin_unlock(&sbinfo->stat_lock);
189         }
190         return 0;
191 }
192
193 static void shmem_free_inode(struct super_block *sb)
194 {
195         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
196         if (sbinfo->max_inodes) {
197                 spin_lock(&sbinfo->stat_lock);
198                 sbinfo->free_inodes++;
199                 spin_unlock(&sbinfo->stat_lock);
200         }
201 }
202
203 /**
204  * shmem_recalc_inode - recalculate the block usage of an inode
205  * @inode: inode to recalc
206  *
207  * We have to calculate the free blocks since the mm can drop
208  * undirtied hole pages behind our back.
209  *
210  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
211  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
212  *
213  * It has to be called with the spinlock held.
214  */
215 static void shmem_recalc_inode(struct inode *inode)
216 {
217         struct shmem_inode_info *info = SHMEM_I(inode);
218         long freed;
219
220         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
221         if (freed > 0) {
222                 info->alloced -= freed;
223                 shmem_unacct_blocks(info->flags, freed);
224                 shmem_free_blocks(inode, freed);
225         }
226 }
227
228 static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index,
229                            swp_entry_t swap)
230 {
231         if (index < SHMEM_NR_DIRECT)
232                 info->i_direct[index] = swap;
233 }
234
235 static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index)
236 {
237         return (index < SHMEM_NR_DIRECT) ?
238                 info->i_direct[index] : (swp_entry_t){0};
239 }
240
241 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
242 {
243         struct address_space *mapping = inode->i_mapping;
244         struct shmem_inode_info *info = SHMEM_I(inode);
245         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
246         unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
247         pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
248         struct pagevec pvec;
249         pgoff_t index;
250         swp_entry_t swap;
251         int i;
252
253         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
254
255         pagevec_init(&pvec, 0);
256         index = start;
257         while (index <= end && pagevec_lookup(&pvec, mapping, index,
258                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
259                 mem_cgroup_uncharge_start();
260                 for (i = 0; i < pagevec_count(&pvec); i++) {
261                         struct page *page = pvec.pages[i];
262
263                         /* We rely upon deletion not changing page->index */
264                         index = page->index;
265                         if (index > end)
266                                 break;
267
268                         if (!trylock_page(page))
269                                 continue;
270                         WARN_ON(page->index != index);
271                         if (PageWriteback(page)) {
272                                 unlock_page(page);
273                                 continue;
274                         }
275                         truncate_inode_page(mapping, page);
276                         unlock_page(page);
277                 }
278                 pagevec_release(&pvec);
279                 mem_cgroup_uncharge_end();
280                 cond_resched();
281                 index++;
282         }
283
284         if (partial) {
285                 struct page *page = NULL;
286                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
287                 if (page) {
288                         zero_user_segment(page, partial, PAGE_CACHE_SIZE);
289                         set_page_dirty(page);
290                         unlock_page(page);
291                         page_cache_release(page);
292                 }
293         }
294
295         index = start;
296         for ( ; ; ) {
297                 cond_resched();
298                 if (!pagevec_lookup(&pvec, mapping, index,
299                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
300                         if (index == start)
301                                 break;
302                         index = start;
303                         continue;
304                 }
305                 if (index == start && pvec.pages[0]->index > end) {
306                         pagevec_release(&pvec);
307                         break;
308                 }
309                 mem_cgroup_uncharge_start();
310                 for (i = 0; i < pagevec_count(&pvec); i++) {
311                         struct page *page = pvec.pages[i];
312
313                         /* We rely upon deletion not changing page->index */
314                         index = page->index;
315                         if (index > end)
316                                 break;
317
318                         lock_page(page);
319                         WARN_ON(page->index != index);
320                         wait_on_page_writeback(page);
321                         truncate_inode_page(mapping, page);
322                         unlock_page(page);
323                 }
324                 pagevec_release(&pvec);
325                 mem_cgroup_uncharge_end();
326                 index++;
327         }
328
329         if (end > SHMEM_NR_DIRECT)
330                 end = SHMEM_NR_DIRECT;
331
332         spin_lock(&info->lock);
333         for (index = start; index < end; index++) {
334                 swap = shmem_get_swap(info, index);
335                 if (swap.val) {
336                         free_swap_and_cache(swap);
337                         shmem_put_swap(info, index, (swp_entry_t){0});
338                         info->swapped--;
339                 }
340         }
341
342         if (mapping->nrpages) {
343                 spin_unlock(&info->lock);
344                 /*
345                  * A page may have meanwhile sneaked in from swap.
346                  */
347                 truncate_inode_pages_range(mapping, lstart, lend);
348                 spin_lock(&info->lock);
349         }
350
351         shmem_recalc_inode(inode);
352         spin_unlock(&info->lock);
353
354         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
355 }
356 EXPORT_SYMBOL_GPL(shmem_truncate_range);
357
358 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
359 {
360         struct inode *inode = dentry->d_inode;
361         int error;
362
363         error = inode_change_ok(inode, attr);
364         if (error)
365                 return error;
366
367         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
368                 loff_t oldsize = inode->i_size;
369                 loff_t newsize = attr->ia_size;
370
371                 if (newsize != oldsize) {
372                         i_size_write(inode, newsize);
373                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
374                 }
375                 if (newsize < oldsize) {
376                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
377                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
378                         shmem_truncate_range(inode, newsize, (loff_t)-1);
379                         /* unmap again to remove racily COWed private pages */
380                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
381                 }
382         }
383
384         setattr_copy(inode, attr);
385 #ifdef CONFIG_TMPFS_POSIX_ACL
386         if (attr->ia_valid & ATTR_MODE)
387                 error = generic_acl_chmod(inode);
388 #endif
389         return error;
390 }
391
392 static void shmem_evict_inode(struct inode *inode)
393 {
394         struct shmem_inode_info *info = SHMEM_I(inode);
395         struct shmem_xattr *xattr, *nxattr;
396
397         if (inode->i_mapping->a_ops == &shmem_aops) {
398                 shmem_unacct_size(info->flags, inode->i_size);
399                 inode->i_size = 0;
400                 shmem_truncate_range(inode, 0, (loff_t)-1);
401                 if (!list_empty(&info->swaplist)) {
402                         mutex_lock(&shmem_swaplist_mutex);
403                         list_del_init(&info->swaplist);
404                         mutex_unlock(&shmem_swaplist_mutex);
405                 }
406         }
407
408         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
409                 kfree(xattr->name);
410                 kfree(xattr);
411         }
412         BUG_ON(inode->i_blocks);
413         shmem_free_inode(inode->i_sb);
414         end_writeback(inode);
415 }
416
417 static int shmem_unuse_inode(struct shmem_inode_info *info,
418                              swp_entry_t swap, struct page *page)
419 {
420         struct address_space *mapping = info->vfs_inode.i_mapping;
421         pgoff_t index;
422         int error;
423
424         for (index = 0; index < SHMEM_NR_DIRECT; index++)
425                 if (shmem_get_swap(info, index).val == swap.val)
426                         goto found;
427         return 0;
428 found:
429         spin_lock(&info->lock);
430         if (shmem_get_swap(info, index).val != swap.val) {
431                 spin_unlock(&info->lock);
432                 return 0;
433         }
434
435         /*
436          * Move _head_ to start search for next from here.
437          * But be careful: shmem_evict_inode checks list_empty without taking
438          * mutex, and there's an instant in list_move_tail when info->swaplist
439          * would appear empty, if it were the only one on shmem_swaplist.
440          */
441         if (shmem_swaplist.next != &info->swaplist)
442                 list_move_tail(&shmem_swaplist, &info->swaplist);
443
444         /*
445          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
446          * but also to hold up shmem_evict_inode(): so inode cannot be freed
447          * beneath us (pagelock doesn't help until the page is in pagecache).
448          */
449         error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT);
450         /* which does mem_cgroup_uncharge_cache_page on error */
451
452         if (error != -ENOMEM) {
453                 delete_from_swap_cache(page);
454                 set_page_dirty(page);
455                 shmem_put_swap(info, index, (swp_entry_t){0});
456                 info->swapped--;
457                 swap_free(swap);
458                 error = 1;      /* not an error, but entry was found */
459         }
460         spin_unlock(&info->lock);
461         return error;
462 }
463
464 /*
465  * shmem_unuse() search for an eventually swapped out shmem page.
466  */
467 int shmem_unuse(swp_entry_t swap, struct page *page)
468 {
469         struct list_head *this, *next;
470         struct shmem_inode_info *info;
471         int found = 0;
472         int error;
473
474         /*
475          * Charge page using GFP_KERNEL while we can wait, before taking
476          * the shmem_swaplist_mutex which might hold up shmem_writepage().
477          * Charged back to the user (not to caller) when swap account is used.
478          * add_to_page_cache() will be called with GFP_NOWAIT.
479          */
480         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
481         if (error)
482                 goto out;
483         /*
484          * Try to preload while we can wait, to not make a habit of
485          * draining atomic reserves; but don't latch on to this cpu,
486          * it's okay if sometimes we get rescheduled after this.
487          */
488         error = radix_tree_preload(GFP_KERNEL);
489         if (error)
490                 goto uncharge;
491         radix_tree_preload_end();
492
493         mutex_lock(&shmem_swaplist_mutex);
494         list_for_each_safe(this, next, &shmem_swaplist) {
495                 info = list_entry(this, struct shmem_inode_info, swaplist);
496                 if (!info->swapped) {
497                         spin_lock(&info->lock);
498                         if (!info->swapped)
499                                 list_del_init(&info->swaplist);
500                         spin_unlock(&info->lock);
501                 }
502                 if (info->swapped)
503                         found = shmem_unuse_inode(info, swap, page);
504                 cond_resched();
505                 if (found)
506                         break;
507         }
508         mutex_unlock(&shmem_swaplist_mutex);
509
510 uncharge:
511         if (!found)
512                 mem_cgroup_uncharge_cache_page(page);
513         if (found < 0)
514                 error = found;
515 out:
516         unlock_page(page);
517         page_cache_release(page);
518         return error;
519 }
520
521 /*
522  * Move the page from the page cache to the swap cache.
523  */
524 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
525 {
526         struct shmem_inode_info *info;
527         swp_entry_t swap, oswap;
528         struct address_space *mapping;
529         pgoff_t index;
530         struct inode *inode;
531
532         BUG_ON(!PageLocked(page));
533         mapping = page->mapping;
534         index = page->index;
535         inode = mapping->host;
536         info = SHMEM_I(inode);
537         if (info->flags & VM_LOCKED)
538                 goto redirty;
539         if (!total_swap_pages)
540                 goto redirty;
541
542         /*
543          * shmem_backing_dev_info's capabilities prevent regular writeback or
544          * sync from ever calling shmem_writepage; but a stacking filesystem
545          * might use ->writepage of its underlying filesystem, in which case
546          * tmpfs should write out to swap only in response to memory pressure,
547          * and not for the writeback threads or sync.
548          */
549         if (!wbc->for_reclaim) {
550                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
551                 goto redirty;
552         }
553
554         /*
555          * Just for this patch, we have a toy implementation,
556          * which can swap out only the first SHMEM_NR_DIRECT pages:
557          * for simple demonstration of where we need to think about swap.
558          */
559         if (index >= SHMEM_NR_DIRECT)
560                 goto redirty;
561
562         swap = get_swap_page();
563         if (!swap.val)
564                 goto redirty;
565
566         /*
567          * Add inode to shmem_unuse()'s list of swapped-out inodes,
568          * if it's not already there.  Do it now because we cannot take
569          * mutex while holding spinlock, and must do so before the page
570          * is moved to swap cache, when its pagelock no longer protects
571          * the inode from eviction.  But don't unlock the mutex until
572          * we've taken the spinlock, because shmem_unuse_inode() will
573          * prune a !swapped inode from the swaplist under both locks.
574          */
575         mutex_lock(&shmem_swaplist_mutex);
576         if (list_empty(&info->swaplist))
577                 list_add_tail(&info->swaplist, &shmem_swaplist);
578
579         spin_lock(&info->lock);
580         mutex_unlock(&shmem_swaplist_mutex);
581
582         oswap = shmem_get_swap(info, index);
583         if (oswap.val) {
584                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
585                 free_swap_and_cache(oswap);
586                 shmem_put_swap(info, index, (swp_entry_t){0});
587                 info->swapped--;
588         }
589         shmem_recalc_inode(inode);
590
591         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
592                 delete_from_page_cache(page);
593                 shmem_put_swap(info, index, swap);
594                 info->swapped++;
595                 swap_shmem_alloc(swap);
596                 spin_unlock(&info->lock);
597                 BUG_ON(page_mapped(page));
598                 swap_writepage(page, wbc);
599                 return 0;
600         }
601
602         spin_unlock(&info->lock);
603         swapcache_free(swap, NULL);
604 redirty:
605         set_page_dirty(page);
606         if (wbc->for_reclaim)
607                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
608         unlock_page(page);
609         return 0;
610 }
611
612 #ifdef CONFIG_NUMA
613 #ifdef CONFIG_TMPFS
614 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
615 {
616         char buffer[64];
617
618         if (!mpol || mpol->mode == MPOL_DEFAULT)
619                 return;         /* show nothing */
620
621         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
622
623         seq_printf(seq, ",mpol=%s", buffer);
624 }
625
626 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
627 {
628         struct mempolicy *mpol = NULL;
629         if (sbinfo->mpol) {
630                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
631                 mpol = sbinfo->mpol;
632                 mpol_get(mpol);
633                 spin_unlock(&sbinfo->stat_lock);
634         }
635         return mpol;
636 }
637 #endif /* CONFIG_TMPFS */
638
639 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
640                         struct shmem_inode_info *info, pgoff_t index)
641 {
642         struct mempolicy mpol, *spol;
643         struct vm_area_struct pvma;
644
645         spol = mpol_cond_copy(&mpol,
646                         mpol_shared_policy_lookup(&info->policy, index));
647
648         /* Create a pseudo vma that just contains the policy */
649         pvma.vm_start = 0;
650         pvma.vm_pgoff = index;
651         pvma.vm_ops = NULL;
652         pvma.vm_policy = spol;
653         return swapin_readahead(swap, gfp, &pvma, 0);
654 }
655
656 static struct page *shmem_alloc_page(gfp_t gfp,
657                         struct shmem_inode_info *info, pgoff_t index)
658 {
659         struct vm_area_struct pvma;
660
661         /* Create a pseudo vma that just contains the policy */
662         pvma.vm_start = 0;
663         pvma.vm_pgoff = index;
664         pvma.vm_ops = NULL;
665         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
666
667         /*
668          * alloc_page_vma() will drop the shared policy reference
669          */
670         return alloc_page_vma(gfp, &pvma, 0);
671 }
672 #else /* !CONFIG_NUMA */
673 #ifdef CONFIG_TMPFS
674 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
675 {
676 }
677 #endif /* CONFIG_TMPFS */
678
679 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
680                         struct shmem_inode_info *info, pgoff_t index)
681 {
682         return swapin_readahead(swap, gfp, NULL, 0);
683 }
684
685 static inline struct page *shmem_alloc_page(gfp_t gfp,
686                         struct shmem_inode_info *info, pgoff_t index)
687 {
688         return alloc_page(gfp);
689 }
690 #endif /* CONFIG_NUMA */
691
692 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
693 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
694 {
695         return NULL;
696 }
697 #endif
698
699 /*
700  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
701  *
702  * If we allocate a new one we do not mark it dirty. That's up to the
703  * vm. If we swap it in we mark it dirty since we also free the swap
704  * entry since a page cannot live in both the swap and page cache
705  */
706 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
707         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
708 {
709         struct address_space *mapping = inode->i_mapping;
710         struct shmem_inode_info *info = SHMEM_I(inode);
711         struct shmem_sb_info *sbinfo;
712         struct page *page;
713         struct page *prealloc_page = NULL;
714         swp_entry_t swap;
715         int error;
716
717         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
718                 return -EFBIG;
719 repeat:
720         page = find_lock_page(mapping, index);
721         if (page) {
722                 /*
723                  * Once we can get the page lock, it must be uptodate:
724                  * if there were an error in reading back from swap,
725                  * the page would not be inserted into the filecache.
726                  */
727                 BUG_ON(!PageUptodate(page));
728                 goto done;
729         }
730
731         /*
732          * Try to preload while we can wait, to not make a habit of
733          * draining atomic reserves; but don't latch on to this cpu.
734          */
735         error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
736         if (error)
737                 goto out;
738         radix_tree_preload_end();
739
740         if (sgp != SGP_READ && !prealloc_page) {
741                 prealloc_page = shmem_alloc_page(gfp, info, index);
742                 if (prealloc_page) {
743                         SetPageSwapBacked(prealloc_page);
744                         if (mem_cgroup_cache_charge(prealloc_page,
745                                         current->mm, GFP_KERNEL)) {
746                                 page_cache_release(prealloc_page);
747                                 prealloc_page = NULL;
748                         }
749                 }
750         }
751
752         spin_lock(&info->lock);
753         shmem_recalc_inode(inode);
754         swap = shmem_get_swap(info, index);
755         if (swap.val) {
756                 /* Look it up and read it in.. */
757                 page = lookup_swap_cache(swap);
758                 if (!page) {
759                         spin_unlock(&info->lock);
760                         /* here we actually do the io */
761                         if (fault_type)
762                                 *fault_type |= VM_FAULT_MAJOR;
763                         page = shmem_swapin(swap, gfp, info, index);
764                         if (!page) {
765                                 swp_entry_t nswap = shmem_get_swap(info, index);
766                                 if (nswap.val == swap.val) {
767                                         error = -ENOMEM;
768                                         goto out;
769                                 }
770                                 goto repeat;
771                         }
772                         wait_on_page_locked(page);
773                         page_cache_release(page);
774                         goto repeat;
775                 }
776
777                 /* We have to do this with page locked to prevent races */
778                 if (!trylock_page(page)) {
779                         spin_unlock(&info->lock);
780                         wait_on_page_locked(page);
781                         page_cache_release(page);
782                         goto repeat;
783                 }
784                 if (PageWriteback(page)) {
785                         spin_unlock(&info->lock);
786                         wait_on_page_writeback(page);
787                         unlock_page(page);
788                         page_cache_release(page);
789                         goto repeat;
790                 }
791                 if (!PageUptodate(page)) {
792                         spin_unlock(&info->lock);
793                         unlock_page(page);
794                         page_cache_release(page);
795                         error = -EIO;
796                         goto out;
797                 }
798
799                 error = add_to_page_cache_locked(page, mapping,
800                                                  index, GFP_NOWAIT);
801                 if (error) {
802                         spin_unlock(&info->lock);
803                         if (error == -ENOMEM) {
804                                 /*
805                                  * reclaim from proper memory cgroup and
806                                  * call memcg's OOM if needed.
807                                  */
808                                 error = mem_cgroup_shmem_charge_fallback(
809                                                 page, current->mm, gfp);
810                                 if (error) {
811                                         unlock_page(page);
812                                         page_cache_release(page);
813                                         goto out;
814                                 }
815                         }
816                         unlock_page(page);
817                         page_cache_release(page);
818                         goto repeat;
819                 }
820
821                 delete_from_swap_cache(page);
822                 shmem_put_swap(info, index, (swp_entry_t){0});
823                 info->swapped--;
824                 spin_unlock(&info->lock);
825                 set_page_dirty(page);
826                 swap_free(swap);
827
828         } else if (sgp == SGP_READ) {
829                 page = find_get_page(mapping, index);
830                 if (page && !trylock_page(page)) {
831                         spin_unlock(&info->lock);
832                         wait_on_page_locked(page);
833                         page_cache_release(page);
834                         goto repeat;
835                 }
836                 spin_unlock(&info->lock);
837
838         } else if (prealloc_page) {
839                 sbinfo = SHMEM_SB(inode->i_sb);
840                 if (sbinfo->max_blocks) {
841                         if (percpu_counter_compare(&sbinfo->used_blocks,
842                                                 sbinfo->max_blocks) >= 0 ||
843                             shmem_acct_block(info->flags))
844                                 goto nospace;
845                         percpu_counter_inc(&sbinfo->used_blocks);
846                         inode->i_blocks += BLOCKS_PER_PAGE;
847                 } else if (shmem_acct_block(info->flags))
848                         goto nospace;
849
850                 page = prealloc_page;
851                 prealloc_page = NULL;
852
853                 swap = shmem_get_swap(info, index);
854                 if (swap.val)
855                         mem_cgroup_uncharge_cache_page(page);
856                 else
857                         error = add_to_page_cache_lru(page, mapping,
858                                                 index, GFP_NOWAIT);
859                 /*
860                  * At add_to_page_cache_lru() failure,
861                  * uncharge will be done automatically.
862                  */
863                 if (swap.val || error) {
864                         shmem_unacct_blocks(info->flags, 1);
865                         shmem_free_blocks(inode, 1);
866                         spin_unlock(&info->lock);
867                         page_cache_release(page);
868                         goto repeat;
869                 }
870
871                 info->alloced++;
872                 spin_unlock(&info->lock);
873                 clear_highpage(page);
874                 flush_dcache_page(page);
875                 SetPageUptodate(page);
876                 if (sgp == SGP_DIRTY)
877                         set_page_dirty(page);
878
879         } else {
880                 spin_unlock(&info->lock);
881                 error = -ENOMEM;
882                 goto out;
883         }
884 done:
885         *pagep = page;
886         error = 0;
887 out:
888         if (prealloc_page) {
889                 mem_cgroup_uncharge_cache_page(prealloc_page);
890                 page_cache_release(prealloc_page);
891         }
892         return error;
893
894 nospace:
895         /*
896          * Perhaps the page was brought in from swap between find_lock_page
897          * and taking info->lock?  We allow for that at add_to_page_cache_lru,
898          * but must also avoid reporting a spurious ENOSPC while working on a
899          * full tmpfs.
900          */
901         page = find_get_page(mapping, index);
902         spin_unlock(&info->lock);
903         if (page) {
904                 page_cache_release(page);
905                 goto repeat;
906         }
907         error = -ENOSPC;
908         goto out;
909 }
910
911 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
912 {
913         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
914         int error;
915         int ret = VM_FAULT_LOCKED;
916
917         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
918                 return VM_FAULT_SIGBUS;
919
920         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
921         if (error)
922                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
923
924         if (ret & VM_FAULT_MAJOR) {
925                 count_vm_event(PGMAJFAULT);
926                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
927         }
928         return ret;
929 }
930
931 #ifdef CONFIG_NUMA
932 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
933 {
934         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
935         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
936 }
937
938 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
939                                           unsigned long addr)
940 {
941         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
942         pgoff_t index;
943
944         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
945         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
946 }
947 #endif
948
949 int shmem_lock(struct file *file, int lock, struct user_struct *user)
950 {
951         struct inode *inode = file->f_path.dentry->d_inode;
952         struct shmem_inode_info *info = SHMEM_I(inode);
953         int retval = -ENOMEM;
954
955         spin_lock(&info->lock);
956         if (lock && !(info->flags & VM_LOCKED)) {
957                 if (!user_shm_lock(inode->i_size, user))
958                         goto out_nomem;
959                 info->flags |= VM_LOCKED;
960                 mapping_set_unevictable(file->f_mapping);
961         }
962         if (!lock && (info->flags & VM_LOCKED) && user) {
963                 user_shm_unlock(inode->i_size, user);
964                 info->flags &= ~VM_LOCKED;
965                 mapping_clear_unevictable(file->f_mapping);
966                 scan_mapping_unevictable_pages(file->f_mapping);
967         }
968         retval = 0;
969
970 out_nomem:
971         spin_unlock(&info->lock);
972         return retval;
973 }
974
975 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
976 {
977         file_accessed(file);
978         vma->vm_ops = &shmem_vm_ops;
979         vma->vm_flags |= VM_CAN_NONLINEAR;
980         return 0;
981 }
982
983 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
984                                      int mode, dev_t dev, unsigned long flags)
985 {
986         struct inode *inode;
987         struct shmem_inode_info *info;
988         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
989
990         if (shmem_reserve_inode(sb))
991                 return NULL;
992
993         inode = new_inode(sb);
994         if (inode) {
995                 inode->i_ino = get_next_ino();
996                 inode_init_owner(inode, dir, mode);
997                 inode->i_blocks = 0;
998                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
999                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1000                 inode->i_generation = get_seconds();
1001                 info = SHMEM_I(inode);
1002                 memset(info, 0, (char *)inode - (char *)info);
1003                 spin_lock_init(&info->lock);
1004                 info->flags = flags & VM_NORESERVE;
1005                 INIT_LIST_HEAD(&info->swaplist);
1006                 INIT_LIST_HEAD(&info->xattr_list);
1007                 cache_no_acl(inode);
1008
1009                 switch (mode & S_IFMT) {
1010                 default:
1011                         inode->i_op = &shmem_special_inode_operations;
1012                         init_special_inode(inode, mode, dev);
1013                         break;
1014                 case S_IFREG:
1015                         inode->i_mapping->a_ops = &shmem_aops;
1016                         inode->i_op = &shmem_inode_operations;
1017                         inode->i_fop = &shmem_file_operations;
1018                         mpol_shared_policy_init(&info->policy,
1019                                                  shmem_get_sbmpol(sbinfo));
1020                         break;
1021                 case S_IFDIR:
1022                         inc_nlink(inode);
1023                         /* Some things misbehave if size == 0 on a directory */
1024                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1025                         inode->i_op = &shmem_dir_inode_operations;
1026                         inode->i_fop = &simple_dir_operations;
1027                         break;
1028                 case S_IFLNK:
1029                         /*
1030                          * Must not load anything in the rbtree,
1031                          * mpol_free_shared_policy will not be called.
1032                          */
1033                         mpol_shared_policy_init(&info->policy, NULL);
1034                         break;
1035                 }
1036         } else
1037                 shmem_free_inode(sb);
1038         return inode;
1039 }
1040
1041 #ifdef CONFIG_TMPFS
1042 static const struct inode_operations shmem_symlink_inode_operations;
1043 static const struct inode_operations shmem_symlink_inline_operations;
1044
1045 static int
1046 shmem_write_begin(struct file *file, struct address_space *mapping,
1047                         loff_t pos, unsigned len, unsigned flags,
1048                         struct page **pagep, void **fsdata)
1049 {
1050         struct inode *inode = mapping->host;
1051         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1052         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1053 }
1054
1055 static int
1056 shmem_write_end(struct file *file, struct address_space *mapping,
1057                         loff_t pos, unsigned len, unsigned copied,
1058                         struct page *page, void *fsdata)
1059 {
1060         struct inode *inode = mapping->host;
1061
1062         if (pos + copied > inode->i_size)
1063                 i_size_write(inode, pos + copied);
1064
1065         set_page_dirty(page);
1066         unlock_page(page);
1067         page_cache_release(page);
1068
1069         return copied;
1070 }
1071
1072 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1073 {
1074         struct inode *inode = filp->f_path.dentry->d_inode;
1075         struct address_space *mapping = inode->i_mapping;
1076         pgoff_t index;
1077         unsigned long offset;
1078         enum sgp_type sgp = SGP_READ;
1079
1080         /*
1081          * Might this read be for a stacking filesystem?  Then when reading
1082          * holes of a sparse file, we actually need to allocate those pages,
1083          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1084          */
1085         if (segment_eq(get_fs(), KERNEL_DS))
1086                 sgp = SGP_DIRTY;
1087
1088         index = *ppos >> PAGE_CACHE_SHIFT;
1089         offset = *ppos & ~PAGE_CACHE_MASK;
1090
1091         for (;;) {
1092                 struct page *page = NULL;
1093                 pgoff_t end_index;
1094                 unsigned long nr, ret;
1095                 loff_t i_size = i_size_read(inode);
1096
1097                 end_index = i_size >> PAGE_CACHE_SHIFT;
1098                 if (index > end_index)
1099                         break;
1100                 if (index == end_index) {
1101                         nr = i_size & ~PAGE_CACHE_MASK;
1102                         if (nr <= offset)
1103                                 break;
1104                 }
1105
1106                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1107                 if (desc->error) {
1108                         if (desc->error == -EINVAL)
1109                                 desc->error = 0;
1110                         break;
1111                 }
1112                 if (page)
1113                         unlock_page(page);
1114
1115                 /*
1116                  * We must evaluate after, since reads (unlike writes)
1117                  * are called without i_mutex protection against truncate
1118                  */
1119                 nr = PAGE_CACHE_SIZE;
1120                 i_size = i_size_read(inode);
1121                 end_index = i_size >> PAGE_CACHE_SHIFT;
1122                 if (index == end_index) {
1123                         nr = i_size & ~PAGE_CACHE_MASK;
1124                         if (nr <= offset) {
1125                                 if (page)
1126                                         page_cache_release(page);
1127                                 break;
1128                         }
1129                 }
1130                 nr -= offset;
1131
1132                 if (page) {
1133                         /*
1134                          * If users can be writing to this page using arbitrary
1135                          * virtual addresses, take care about potential aliasing
1136                          * before reading the page on the kernel side.
1137                          */
1138                         if (mapping_writably_mapped(mapping))
1139                                 flush_dcache_page(page);
1140                         /*
1141                          * Mark the page accessed if we read the beginning.
1142                          */
1143                         if (!offset)
1144                                 mark_page_accessed(page);
1145                 } else {
1146                         page = ZERO_PAGE(0);
1147                         page_cache_get(page);
1148                 }
1149
1150                 /*
1151                  * Ok, we have the page, and it's up-to-date, so
1152                  * now we can copy it to user space...
1153                  *
1154                  * The actor routine returns how many bytes were actually used..
1155                  * NOTE! This may not be the same as how much of a user buffer
1156                  * we filled up (we may be padding etc), so we can only update
1157                  * "pos" here (the actor routine has to update the user buffer
1158                  * pointers and the remaining count).
1159                  */
1160                 ret = actor(desc, page, offset, nr);
1161                 offset += ret;
1162                 index += offset >> PAGE_CACHE_SHIFT;
1163                 offset &= ~PAGE_CACHE_MASK;
1164
1165                 page_cache_release(page);
1166                 if (ret != nr || !desc->count)
1167                         break;
1168
1169                 cond_resched();
1170         }
1171
1172         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1173         file_accessed(filp);
1174 }
1175
1176 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1177                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1178 {
1179         struct file *filp = iocb->ki_filp;
1180         ssize_t retval;
1181         unsigned long seg;
1182         size_t count;
1183         loff_t *ppos = &iocb->ki_pos;
1184
1185         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1186         if (retval)
1187                 return retval;
1188
1189         for (seg = 0; seg < nr_segs; seg++) {
1190                 read_descriptor_t desc;
1191
1192                 desc.written = 0;
1193                 desc.arg.buf = iov[seg].iov_base;
1194                 desc.count = iov[seg].iov_len;
1195                 if (desc.count == 0)
1196                         continue;
1197                 desc.error = 0;
1198                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1199                 retval += desc.written;
1200                 if (desc.error) {
1201                         retval = retval ?: desc.error;
1202                         break;
1203                 }
1204                 if (desc.count > 0)
1205                         break;
1206         }
1207         return retval;
1208 }
1209
1210 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1211                                 struct pipe_inode_info *pipe, size_t len,
1212                                 unsigned int flags)
1213 {
1214         struct address_space *mapping = in->f_mapping;
1215         struct inode *inode = mapping->host;
1216         unsigned int loff, nr_pages, req_pages;
1217         struct page *pages[PIPE_DEF_BUFFERS];
1218         struct partial_page partial[PIPE_DEF_BUFFERS];
1219         struct page *page;
1220         pgoff_t index, end_index;
1221         loff_t isize, left;
1222         int error, page_nr;
1223         struct splice_pipe_desc spd = {
1224                 .pages = pages,
1225                 .partial = partial,
1226                 .flags = flags,
1227                 .ops = &page_cache_pipe_buf_ops,
1228                 .spd_release = spd_release_page,
1229         };
1230
1231         isize = i_size_read(inode);
1232         if (unlikely(*ppos >= isize))
1233                 return 0;
1234
1235         left = isize - *ppos;
1236         if (unlikely(left < len))
1237                 len = left;
1238
1239         if (splice_grow_spd(pipe, &spd))
1240                 return -ENOMEM;
1241
1242         index = *ppos >> PAGE_CACHE_SHIFT;
1243         loff = *ppos & ~PAGE_CACHE_MASK;
1244         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1245         nr_pages = min(req_pages, pipe->buffers);
1246
1247         spd.nr_pages = find_get_pages_contig(mapping, index,
1248                                                 nr_pages, spd.pages);
1249         index += spd.nr_pages;
1250         error = 0;
1251
1252         while (spd.nr_pages < nr_pages) {
1253                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1254                 if (error)
1255                         break;
1256                 unlock_page(page);
1257                 spd.pages[spd.nr_pages++] = page;
1258                 index++;
1259         }
1260
1261         index = *ppos >> PAGE_CACHE_SHIFT;
1262         nr_pages = spd.nr_pages;
1263         spd.nr_pages = 0;
1264
1265         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1266                 unsigned int this_len;
1267
1268                 if (!len)
1269                         break;
1270
1271                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1272                 page = spd.pages[page_nr];
1273
1274                 if (!PageUptodate(page) || page->mapping != mapping) {
1275                         error = shmem_getpage(inode, index, &page,
1276                                                         SGP_CACHE, NULL);
1277                         if (error)
1278                                 break;
1279                         unlock_page(page);
1280                         page_cache_release(spd.pages[page_nr]);
1281                         spd.pages[page_nr] = page;
1282                 }
1283
1284                 isize = i_size_read(inode);
1285                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1286                 if (unlikely(!isize || index > end_index))
1287                         break;
1288
1289                 if (end_index == index) {
1290                         unsigned int plen;
1291
1292                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1293                         if (plen <= loff)
1294                                 break;
1295
1296                         this_len = min(this_len, plen - loff);
1297                         len = this_len;
1298                 }
1299
1300                 spd.partial[page_nr].offset = loff;
1301                 spd.partial[page_nr].len = this_len;
1302                 len -= this_len;
1303                 loff = 0;
1304                 spd.nr_pages++;
1305                 index++;
1306         }
1307
1308         while (page_nr < nr_pages)
1309                 page_cache_release(spd.pages[page_nr++]);
1310
1311         if (spd.nr_pages)
1312                 error = splice_to_pipe(pipe, &spd);
1313
1314         splice_shrink_spd(pipe, &spd);
1315
1316         if (error > 0) {
1317                 *ppos += error;
1318                 file_accessed(in);
1319         }
1320         return error;
1321 }
1322
1323 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1324 {
1325         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1326
1327         buf->f_type = TMPFS_MAGIC;
1328         buf->f_bsize = PAGE_CACHE_SIZE;
1329         buf->f_namelen = NAME_MAX;
1330         if (sbinfo->max_blocks) {
1331                 buf->f_blocks = sbinfo->max_blocks;
1332                 buf->f_bavail =
1333                 buf->f_bfree  = sbinfo->max_blocks -
1334                                 percpu_counter_sum(&sbinfo->used_blocks);
1335         }
1336         if (sbinfo->max_inodes) {
1337                 buf->f_files = sbinfo->max_inodes;
1338                 buf->f_ffree = sbinfo->free_inodes;
1339         }
1340         /* else leave those fields 0 like simple_statfs */
1341         return 0;
1342 }
1343
1344 /*
1345  * File creation. Allocate an inode, and we're done..
1346  */
1347 static int
1348 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1349 {
1350         struct inode *inode;
1351         int error = -ENOSPC;
1352
1353         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1354         if (inode) {
1355                 error = security_inode_init_security(inode, dir,
1356                                                      &dentry->d_name, NULL,
1357                                                      NULL, NULL);
1358                 if (error) {
1359                         if (error != -EOPNOTSUPP) {
1360                                 iput(inode);
1361                                 return error;
1362                         }
1363                 }
1364 #ifdef CONFIG_TMPFS_POSIX_ACL
1365                 error = generic_acl_init(inode, dir);
1366                 if (error) {
1367                         iput(inode);
1368                         return error;
1369                 }
1370 #else
1371                 error = 0;
1372 #endif
1373                 dir->i_size += BOGO_DIRENT_SIZE;
1374                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1375                 d_instantiate(dentry, inode);
1376                 dget(dentry); /* Extra count - pin the dentry in core */
1377         }
1378         return error;
1379 }
1380
1381 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1382 {
1383         int error;
1384
1385         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1386                 return error;
1387         inc_nlink(dir);
1388         return 0;
1389 }
1390
1391 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1392                 struct nameidata *nd)
1393 {
1394         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1395 }
1396
1397 /*
1398  * Link a file..
1399  */
1400 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1401 {
1402         struct inode *inode = old_dentry->d_inode;
1403         int ret;
1404
1405         /*
1406          * No ordinary (disk based) filesystem counts links as inodes;
1407          * but each new link needs a new dentry, pinning lowmem, and
1408          * tmpfs dentries cannot be pruned until they are unlinked.
1409          */
1410         ret = shmem_reserve_inode(inode->i_sb);
1411         if (ret)
1412                 goto out;
1413
1414         dir->i_size += BOGO_DIRENT_SIZE;
1415         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1416         inc_nlink(inode);
1417         ihold(inode);   /* New dentry reference */
1418         dget(dentry);           /* Extra pinning count for the created dentry */
1419         d_instantiate(dentry, inode);
1420 out:
1421         return ret;
1422 }
1423
1424 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1425 {
1426         struct inode *inode = dentry->d_inode;
1427
1428         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1429                 shmem_free_inode(inode->i_sb);
1430
1431         dir->i_size -= BOGO_DIRENT_SIZE;
1432         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1433         drop_nlink(inode);
1434         dput(dentry);   /* Undo the count from "create" - this does all the work */
1435         return 0;
1436 }
1437
1438 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1439 {
1440         if (!simple_empty(dentry))
1441                 return -ENOTEMPTY;
1442
1443         drop_nlink(dentry->d_inode);
1444         drop_nlink(dir);
1445         return shmem_unlink(dir, dentry);
1446 }
1447
1448 /*
1449  * The VFS layer already does all the dentry stuff for rename,
1450  * we just have to decrement the usage count for the target if
1451  * it exists so that the VFS layer correctly free's it when it
1452  * gets overwritten.
1453  */
1454 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1455 {
1456         struct inode *inode = old_dentry->d_inode;
1457         int they_are_dirs = S_ISDIR(inode->i_mode);
1458
1459         if (!simple_empty(new_dentry))
1460                 return -ENOTEMPTY;
1461
1462         if (new_dentry->d_inode) {
1463                 (void) shmem_unlink(new_dir, new_dentry);
1464                 if (they_are_dirs)
1465                         drop_nlink(old_dir);
1466         } else if (they_are_dirs) {
1467                 drop_nlink(old_dir);
1468                 inc_nlink(new_dir);
1469         }
1470
1471         old_dir->i_size -= BOGO_DIRENT_SIZE;
1472         new_dir->i_size += BOGO_DIRENT_SIZE;
1473         old_dir->i_ctime = old_dir->i_mtime =
1474         new_dir->i_ctime = new_dir->i_mtime =
1475         inode->i_ctime = CURRENT_TIME;
1476         return 0;
1477 }
1478
1479 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1480 {
1481         int error;
1482         int len;
1483         struct inode *inode;
1484         struct page *page;
1485         char *kaddr;
1486         struct shmem_inode_info *info;
1487
1488         len = strlen(symname) + 1;
1489         if (len > PAGE_CACHE_SIZE)
1490                 return -ENAMETOOLONG;
1491
1492         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1493         if (!inode)
1494                 return -ENOSPC;
1495
1496         error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
1497                                              NULL, NULL);
1498         if (error) {
1499                 if (error != -EOPNOTSUPP) {
1500                         iput(inode);
1501                         return error;
1502                 }
1503                 error = 0;
1504         }
1505
1506         info = SHMEM_I(inode);
1507         inode->i_size = len-1;
1508         if (len <= SHMEM_SYMLINK_INLINE_LEN) {
1509                 /* do it inline */
1510                 memcpy(info->inline_symlink, symname, len);
1511                 inode->i_op = &shmem_symlink_inline_operations;
1512         } else {
1513                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1514                 if (error) {
1515                         iput(inode);
1516                         return error;
1517                 }
1518                 inode->i_mapping->a_ops = &shmem_aops;
1519                 inode->i_op = &shmem_symlink_inode_operations;
1520                 kaddr = kmap_atomic(page, KM_USER0);
1521                 memcpy(kaddr, symname, len);
1522                 kunmap_atomic(kaddr, KM_USER0);
1523                 set_page_dirty(page);
1524                 unlock_page(page);
1525                 page_cache_release(page);
1526         }
1527         dir->i_size += BOGO_DIRENT_SIZE;
1528         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1529         d_instantiate(dentry, inode);
1530         dget(dentry);
1531         return 0;
1532 }
1533
1534 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1535 {
1536         nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
1537         return NULL;
1538 }
1539
1540 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1541 {
1542         struct page *page = NULL;
1543         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1544         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1545         if (page)
1546                 unlock_page(page);
1547         return page;
1548 }
1549
1550 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1551 {
1552         if (!IS_ERR(nd_get_link(nd))) {
1553                 struct page *page = cookie;
1554                 kunmap(page);
1555                 mark_page_accessed(page);
1556                 page_cache_release(page);
1557         }
1558 }
1559
1560 #ifdef CONFIG_TMPFS_XATTR
1561 /*
1562  * Superblocks without xattr inode operations may get some security.* xattr
1563  * support from the LSM "for free". As soon as we have any other xattrs
1564  * like ACLs, we also need to implement the security.* handlers at
1565  * filesystem level, though.
1566  */
1567
1568 static int shmem_xattr_get(struct dentry *dentry, const char *name,
1569                            void *buffer, size_t size)
1570 {
1571         struct shmem_inode_info *info;
1572         struct shmem_xattr *xattr;
1573         int ret = -ENODATA;
1574
1575         info = SHMEM_I(dentry->d_inode);
1576
1577         spin_lock(&info->lock);
1578         list_for_each_entry(xattr, &info->xattr_list, list) {
1579                 if (strcmp(name, xattr->name))
1580                         continue;
1581
1582                 ret = xattr->size;
1583                 if (buffer) {
1584                         if (size < xattr->size)
1585                                 ret = -ERANGE;
1586                         else
1587                                 memcpy(buffer, xattr->value, xattr->size);
1588                 }
1589                 break;
1590         }
1591         spin_unlock(&info->lock);
1592         return ret;
1593 }
1594
1595 static int shmem_xattr_set(struct dentry *dentry, const char *name,
1596                            const void *value, size_t size, int flags)
1597 {
1598         struct inode *inode = dentry->d_inode;
1599         struct shmem_inode_info *info = SHMEM_I(inode);
1600         struct shmem_xattr *xattr;
1601         struct shmem_xattr *new_xattr = NULL;
1602         size_t len;
1603         int err = 0;
1604
1605         /* value == NULL means remove */
1606         if (value) {
1607                 /* wrap around? */
1608                 len = sizeof(*new_xattr) + size;
1609                 if (len <= sizeof(*new_xattr))
1610                         return -ENOMEM;
1611
1612                 new_xattr = kmalloc(len, GFP_KERNEL);
1613                 if (!new_xattr)
1614                         return -ENOMEM;
1615
1616                 new_xattr->name = kstrdup(name, GFP_KERNEL);
1617                 if (!new_xattr->name) {
1618                         kfree(new_xattr);
1619                         return -ENOMEM;
1620                 }
1621
1622                 new_xattr->size = size;
1623                 memcpy(new_xattr->value, value, size);
1624         }
1625
1626         spin_lock(&info->lock);
1627         list_for_each_entry(xattr, &info->xattr_list, list) {
1628                 if (!strcmp(name, xattr->name)) {
1629                         if (flags & XATTR_CREATE) {
1630                                 xattr = new_xattr;
1631                                 err = -EEXIST;
1632                         } else if (new_xattr) {
1633                                 list_replace(&xattr->list, &new_xattr->list);
1634                         } else {
1635                                 list_del(&xattr->list);
1636                         }
1637                         goto out;
1638                 }
1639         }
1640         if (flags & XATTR_REPLACE) {
1641                 xattr = new_xattr;
1642                 err = -ENODATA;
1643         } else {
1644                 list_add(&new_xattr->list, &info->xattr_list);
1645                 xattr = NULL;
1646         }
1647 out:
1648         spin_unlock(&info->lock);
1649         if (xattr)
1650                 kfree(xattr->name);
1651         kfree(xattr);
1652         return err;
1653 }
1654
1655 static const struct xattr_handler *shmem_xattr_handlers[] = {
1656 #ifdef CONFIG_TMPFS_POSIX_ACL
1657         &generic_acl_access_handler,
1658         &generic_acl_default_handler,
1659 #endif
1660         NULL
1661 };
1662
1663 static int shmem_xattr_validate(const char *name)
1664 {
1665         struct { const char *prefix; size_t len; } arr[] = {
1666                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1667                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1668         };
1669         int i;
1670
1671         for (i = 0; i < ARRAY_SIZE(arr); i++) {
1672                 size_t preflen = arr[i].len;
1673                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1674                         if (!name[preflen])
1675                                 return -EINVAL;
1676                         return 0;
1677                 }
1678         }
1679         return -EOPNOTSUPP;
1680 }
1681
1682 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1683                               void *buffer, size_t size)
1684 {
1685         int err;
1686
1687         /*
1688          * If this is a request for a synthetic attribute in the system.*
1689          * namespace use the generic infrastructure to resolve a handler
1690          * for it via sb->s_xattr.
1691          */
1692         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1693                 return generic_getxattr(dentry, name, buffer, size);
1694
1695         err = shmem_xattr_validate(name);
1696         if (err)
1697                 return err;
1698
1699         return shmem_xattr_get(dentry, name, buffer, size);
1700 }
1701
1702 static int shmem_setxattr(struct dentry *dentry, const char *name,
1703                           const void *value, size_t size, int flags)
1704 {
1705         int err;
1706
1707         /*
1708          * If this is a request for a synthetic attribute in the system.*
1709          * namespace use the generic infrastructure to resolve a handler
1710          * for it via sb->s_xattr.
1711          */
1712         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1713                 return generic_setxattr(dentry, name, value, size, flags);
1714
1715         err = shmem_xattr_validate(name);
1716         if (err)
1717                 return err;
1718
1719         if (size == 0)
1720                 value = "";  /* empty EA, do not remove */
1721
1722         return shmem_xattr_set(dentry, name, value, size, flags);
1723
1724 }
1725
1726 static int shmem_removexattr(struct dentry *dentry, const char *name)
1727 {
1728         int err;
1729
1730         /*
1731          * If this is a request for a synthetic attribute in the system.*
1732          * namespace use the generic infrastructure to resolve a handler
1733          * for it via sb->s_xattr.
1734          */
1735         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1736                 return generic_removexattr(dentry, name);
1737
1738         err = shmem_xattr_validate(name);
1739         if (err)
1740                 return err;
1741
1742         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1743 }
1744
1745 static bool xattr_is_trusted(const char *name)
1746 {
1747         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1748 }
1749
1750 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1751 {
1752         bool trusted = capable(CAP_SYS_ADMIN);
1753         struct shmem_xattr *xattr;
1754         struct shmem_inode_info *info;
1755         size_t used = 0;
1756
1757         info = SHMEM_I(dentry->d_inode);
1758
1759         spin_lock(&info->lock);
1760         list_for_each_entry(xattr, &info->xattr_list, list) {
1761                 size_t len;
1762
1763                 /* skip "trusted." attributes for unprivileged callers */
1764                 if (!trusted && xattr_is_trusted(xattr->name))
1765                         continue;
1766
1767                 len = strlen(xattr->name) + 1;
1768                 used += len;
1769                 if (buffer) {
1770                         if (size < used) {
1771                                 used = -ERANGE;
1772                                 break;
1773                         }
1774                         memcpy(buffer, xattr->name, len);
1775                         buffer += len;
1776                 }
1777         }
1778         spin_unlock(&info->lock);
1779
1780         return used;
1781 }
1782 #endif /* CONFIG_TMPFS_XATTR */
1783
1784 static const struct inode_operations shmem_symlink_inline_operations = {
1785         .readlink       = generic_readlink,
1786         .follow_link    = shmem_follow_link_inline,
1787 #ifdef CONFIG_TMPFS_XATTR
1788         .setxattr       = shmem_setxattr,
1789         .getxattr       = shmem_getxattr,
1790         .listxattr      = shmem_listxattr,
1791         .removexattr    = shmem_removexattr,
1792 #endif
1793 };
1794
1795 static const struct inode_operations shmem_symlink_inode_operations = {
1796         .readlink       = generic_readlink,
1797         .follow_link    = shmem_follow_link,
1798         .put_link       = shmem_put_link,
1799 #ifdef CONFIG_TMPFS_XATTR
1800         .setxattr       = shmem_setxattr,
1801         .getxattr       = shmem_getxattr,
1802         .listxattr      = shmem_listxattr,
1803         .removexattr    = shmem_removexattr,
1804 #endif
1805 };
1806
1807 static struct dentry *shmem_get_parent(struct dentry *child)
1808 {
1809         return ERR_PTR(-ESTALE);
1810 }
1811
1812 static int shmem_match(struct inode *ino, void *vfh)
1813 {
1814         __u32 *fh = vfh;
1815         __u64 inum = fh[2];
1816         inum = (inum << 32) | fh[1];
1817         return ino->i_ino == inum && fh[0] == ino->i_generation;
1818 }
1819
1820 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1821                 struct fid *fid, int fh_len, int fh_type)
1822 {
1823         struct inode *inode;
1824         struct dentry *dentry = NULL;
1825         u64 inum = fid->raw[2];
1826         inum = (inum << 32) | fid->raw[1];
1827
1828         if (fh_len < 3)
1829                 return NULL;
1830
1831         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1832                         shmem_match, fid->raw);
1833         if (inode) {
1834                 dentry = d_find_alias(inode);
1835                 iput(inode);
1836         }
1837
1838         return dentry;
1839 }
1840
1841 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1842                                 int connectable)
1843 {
1844         struct inode *inode = dentry->d_inode;
1845
1846         if (*len < 3) {
1847                 *len = 3;
1848                 return 255;
1849         }
1850
1851         if (inode_unhashed(inode)) {
1852                 /* Unfortunately insert_inode_hash is not idempotent,
1853                  * so as we hash inodes here rather than at creation
1854                  * time, we need a lock to ensure we only try
1855                  * to do it once
1856                  */
1857                 static DEFINE_SPINLOCK(lock);
1858                 spin_lock(&lock);
1859                 if (inode_unhashed(inode))
1860                         __insert_inode_hash(inode,
1861                                             inode->i_ino + inode->i_generation);
1862                 spin_unlock(&lock);
1863         }
1864
1865         fh[0] = inode->i_generation;
1866         fh[1] = inode->i_ino;
1867         fh[2] = ((__u64)inode->i_ino) >> 32;
1868
1869         *len = 3;
1870         return 1;
1871 }
1872
1873 static const struct export_operations shmem_export_ops = {
1874         .get_parent     = shmem_get_parent,
1875         .encode_fh      = shmem_encode_fh,
1876         .fh_to_dentry   = shmem_fh_to_dentry,
1877 };
1878
1879 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
1880                                bool remount)
1881 {
1882         char *this_char, *value, *rest;
1883
1884         while (options != NULL) {
1885                 this_char = options;
1886                 for (;;) {
1887                         /*
1888                          * NUL-terminate this option: unfortunately,
1889                          * mount options form a comma-separated list,
1890                          * but mpol's nodelist may also contain commas.
1891                          */
1892                         options = strchr(options, ',');
1893                         if (options == NULL)
1894                                 break;
1895                         options++;
1896                         if (!isdigit(*options)) {
1897                                 options[-1] = '\0';
1898                                 break;
1899                         }
1900                 }
1901                 if (!*this_char)
1902                         continue;
1903                 if ((value = strchr(this_char,'=')) != NULL) {
1904                         *value++ = 0;
1905                 } else {
1906                         printk(KERN_ERR
1907                             "tmpfs: No value for mount option '%s'\n",
1908                             this_char);
1909                         return 1;
1910                 }
1911
1912                 if (!strcmp(this_char,"size")) {
1913                         unsigned long long size;
1914                         size = memparse(value,&rest);
1915                         if (*rest == '%') {
1916                                 size <<= PAGE_SHIFT;
1917                                 size *= totalram_pages;
1918                                 do_div(size, 100);
1919                                 rest++;
1920                         }
1921                         if (*rest)
1922                                 goto bad_val;
1923                         sbinfo->max_blocks =
1924                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1925                 } else if (!strcmp(this_char,"nr_blocks")) {
1926                         sbinfo->max_blocks = memparse(value, &rest);
1927                         if (*rest)
1928                                 goto bad_val;
1929                 } else if (!strcmp(this_char,"nr_inodes")) {
1930                         sbinfo->max_inodes = memparse(value, &rest);
1931                         if (*rest)
1932                                 goto bad_val;
1933                 } else if (!strcmp(this_char,"mode")) {
1934                         if (remount)
1935                                 continue;
1936                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1937                         if (*rest)
1938                                 goto bad_val;
1939                 } else if (!strcmp(this_char,"uid")) {
1940                         if (remount)
1941                                 continue;
1942                         sbinfo->uid = simple_strtoul(value, &rest, 0);
1943                         if (*rest)
1944                                 goto bad_val;
1945                 } else if (!strcmp(this_char,"gid")) {
1946                         if (remount)
1947                                 continue;
1948                         sbinfo->gid = simple_strtoul(value, &rest, 0);
1949                         if (*rest)
1950                                 goto bad_val;
1951                 } else if (!strcmp(this_char,"mpol")) {
1952                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
1953                                 goto bad_val;
1954                 } else {
1955                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1956                                this_char);
1957                         return 1;
1958                 }
1959         }
1960         return 0;
1961
1962 bad_val:
1963         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1964                value, this_char);
1965         return 1;
1966
1967 }
1968
1969 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1970 {
1971         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1972         struct shmem_sb_info config = *sbinfo;
1973         unsigned long inodes;
1974         int error = -EINVAL;
1975
1976         if (shmem_parse_options(data, &config, true))
1977                 return error;
1978
1979         spin_lock(&sbinfo->stat_lock);
1980         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1981         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
1982                 goto out;
1983         if (config.max_inodes < inodes)
1984                 goto out;
1985         /*
1986          * Those tests also disallow limited->unlimited while any are in
1987          * use, so i_blocks will always be zero when max_blocks is zero;
1988          * but we must separately disallow unlimited->limited, because
1989          * in that case we have no record of how much is already in use.
1990          */
1991         if (config.max_blocks && !sbinfo->max_blocks)
1992                 goto out;
1993         if (config.max_inodes && !sbinfo->max_inodes)
1994                 goto out;
1995
1996         error = 0;
1997         sbinfo->max_blocks  = config.max_blocks;
1998         sbinfo->max_inodes  = config.max_inodes;
1999         sbinfo->free_inodes = config.max_inodes - inodes;
2000
2001         mpol_put(sbinfo->mpol);
2002         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2003 out:
2004         spin_unlock(&sbinfo->stat_lock);
2005         return error;
2006 }
2007
2008 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2009 {
2010         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2011
2012         if (sbinfo->max_blocks != shmem_default_max_blocks())
2013                 seq_printf(seq, ",size=%luk",
2014                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2015         if (sbinfo->max_inodes != shmem_default_max_inodes())
2016                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2017         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2018                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2019         if (sbinfo->uid != 0)
2020                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2021         if (sbinfo->gid != 0)
2022                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2023         shmem_show_mpol(seq, sbinfo->mpol);
2024         return 0;
2025 }
2026 #endif /* CONFIG_TMPFS */
2027
2028 static void shmem_put_super(struct super_block *sb)
2029 {
2030         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2031
2032         percpu_counter_destroy(&sbinfo->used_blocks);
2033         kfree(sbinfo);
2034         sb->s_fs_info = NULL;
2035 }
2036
2037 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2038 {
2039         struct inode *inode;
2040         struct dentry *root;
2041         struct shmem_sb_info *sbinfo;
2042         int err = -ENOMEM;
2043
2044         /* Round up to L1_CACHE_BYTES to resist false sharing */
2045         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2046                                 L1_CACHE_BYTES), GFP_KERNEL);
2047         if (!sbinfo)
2048                 return -ENOMEM;
2049
2050         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2051         sbinfo->uid = current_fsuid();
2052         sbinfo->gid = current_fsgid();
2053         sb->s_fs_info = sbinfo;
2054
2055 #ifdef CONFIG_TMPFS
2056         /*
2057          * Per default we only allow half of the physical ram per
2058          * tmpfs instance, limiting inodes to one per page of lowmem;
2059          * but the internal instance is left unlimited.
2060          */
2061         if (!(sb->s_flags & MS_NOUSER)) {
2062                 sbinfo->max_blocks = shmem_default_max_blocks();
2063                 sbinfo->max_inodes = shmem_default_max_inodes();
2064                 if (shmem_parse_options(data, sbinfo, false)) {
2065                         err = -EINVAL;
2066                         goto failed;
2067                 }
2068         }
2069         sb->s_export_op = &shmem_export_ops;
2070 #else
2071         sb->s_flags |= MS_NOUSER;
2072 #endif
2073
2074         spin_lock_init(&sbinfo->stat_lock);
2075         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2076                 goto failed;
2077         sbinfo->free_inodes = sbinfo->max_inodes;
2078
2079         sb->s_maxbytes = MAX_LFS_FILESIZE;
2080         sb->s_blocksize = PAGE_CACHE_SIZE;
2081         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2082         sb->s_magic = TMPFS_MAGIC;
2083         sb->s_op = &shmem_ops;
2084         sb->s_time_gran = 1;
2085 #ifdef CONFIG_TMPFS_XATTR
2086         sb->s_xattr = shmem_xattr_handlers;
2087 #endif
2088 #ifdef CONFIG_TMPFS_POSIX_ACL
2089         sb->s_flags |= MS_POSIXACL;
2090 #endif
2091
2092         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2093         if (!inode)
2094                 goto failed;
2095         inode->i_uid = sbinfo->uid;
2096         inode->i_gid = sbinfo->gid;
2097         root = d_alloc_root(inode);
2098         if (!root)
2099                 goto failed_iput;
2100         sb->s_root = root;
2101         return 0;
2102
2103 failed_iput:
2104         iput(inode);
2105 failed:
2106         shmem_put_super(sb);
2107         return err;
2108 }
2109
2110 static struct kmem_cache *shmem_inode_cachep;
2111
2112 static struct inode *shmem_alloc_inode(struct super_block *sb)
2113 {
2114         struct shmem_inode_info *info;
2115         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2116         if (!info)
2117                 return NULL;
2118         return &info->vfs_inode;
2119 }
2120
2121 static void shmem_destroy_callback(struct rcu_head *head)
2122 {
2123         struct inode *inode = container_of(head, struct inode, i_rcu);
2124         INIT_LIST_HEAD(&inode->i_dentry);
2125         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2126 }
2127
2128 static void shmem_destroy_inode(struct inode *inode)
2129 {
2130         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2131                 /* only struct inode is valid if it's an inline symlink */
2132                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2133         }
2134         call_rcu(&inode->i_rcu, shmem_destroy_callback);
2135 }
2136
2137 static void shmem_init_inode(void *foo)
2138 {
2139         struct shmem_inode_info *info = foo;
2140         inode_init_once(&info->vfs_inode);
2141 }
2142
2143 static int shmem_init_inodecache(void)
2144 {
2145         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2146                                 sizeof(struct shmem_inode_info),
2147                                 0, SLAB_PANIC, shmem_init_inode);
2148         return 0;
2149 }
2150
2151 static void shmem_destroy_inodecache(void)
2152 {
2153         kmem_cache_destroy(shmem_inode_cachep);
2154 }
2155
2156 static const struct address_space_operations shmem_aops = {
2157         .writepage      = shmem_writepage,
2158         .set_page_dirty = __set_page_dirty_no_writeback,
2159 #ifdef CONFIG_TMPFS
2160         .write_begin    = shmem_write_begin,
2161         .write_end      = shmem_write_end,
2162 #endif
2163         .migratepage    = migrate_page,
2164         .error_remove_page = generic_error_remove_page,
2165 };
2166
2167 static const struct file_operations shmem_file_operations = {
2168         .mmap           = shmem_mmap,
2169 #ifdef CONFIG_TMPFS
2170         .llseek         = generic_file_llseek,
2171         .read           = do_sync_read,
2172         .write          = do_sync_write,
2173         .aio_read       = shmem_file_aio_read,
2174         .aio_write      = generic_file_aio_write,
2175         .fsync          = noop_fsync,
2176         .splice_read    = shmem_file_splice_read,
2177         .splice_write   = generic_file_splice_write,
2178 #endif
2179 };
2180
2181 static const struct inode_operations shmem_inode_operations = {
2182         .setattr        = shmem_setattr,
2183         .truncate_range = shmem_truncate_range,
2184 #ifdef CONFIG_TMPFS_XATTR
2185         .setxattr       = shmem_setxattr,
2186         .getxattr       = shmem_getxattr,
2187         .listxattr      = shmem_listxattr,
2188         .removexattr    = shmem_removexattr,
2189 #endif
2190 };
2191
2192 static const struct inode_operations shmem_dir_inode_operations = {
2193 #ifdef CONFIG_TMPFS
2194         .create         = shmem_create,
2195         .lookup         = simple_lookup,
2196         .link           = shmem_link,
2197         .unlink         = shmem_unlink,
2198         .symlink        = shmem_symlink,
2199         .mkdir          = shmem_mkdir,
2200         .rmdir          = shmem_rmdir,
2201         .mknod          = shmem_mknod,
2202         .rename         = shmem_rename,
2203 #endif
2204 #ifdef CONFIG_TMPFS_XATTR
2205         .setxattr       = shmem_setxattr,
2206         .getxattr       = shmem_getxattr,
2207         .listxattr      = shmem_listxattr,
2208         .removexattr    = shmem_removexattr,
2209 #endif
2210 #ifdef CONFIG_TMPFS_POSIX_ACL
2211         .setattr        = shmem_setattr,
2212 #endif
2213 };
2214
2215 static const struct inode_operations shmem_special_inode_operations = {
2216 #ifdef CONFIG_TMPFS_XATTR
2217         .setxattr       = shmem_setxattr,
2218         .getxattr       = shmem_getxattr,
2219         .listxattr      = shmem_listxattr,
2220         .removexattr    = shmem_removexattr,
2221 #endif
2222 #ifdef CONFIG_TMPFS_POSIX_ACL
2223         .setattr        = shmem_setattr,
2224 #endif
2225 };
2226
2227 static const struct super_operations shmem_ops = {
2228         .alloc_inode    = shmem_alloc_inode,
2229         .destroy_inode  = shmem_destroy_inode,
2230 #ifdef CONFIG_TMPFS
2231         .statfs         = shmem_statfs,
2232         .remount_fs     = shmem_remount_fs,
2233         .show_options   = shmem_show_options,
2234 #endif
2235         .evict_inode    = shmem_evict_inode,
2236         .drop_inode     = generic_delete_inode,
2237         .put_super      = shmem_put_super,
2238 };
2239
2240 static const struct vm_operations_struct shmem_vm_ops = {
2241         .fault          = shmem_fault,
2242 #ifdef CONFIG_NUMA
2243         .set_policy     = shmem_set_policy,
2244         .get_policy     = shmem_get_policy,
2245 #endif
2246 };
2247
2248 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2249         int flags, const char *dev_name, void *data)
2250 {
2251         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2252 }
2253
2254 static struct file_system_type shmem_fs_type = {
2255         .owner          = THIS_MODULE,
2256         .name           = "tmpfs",
2257         .mount          = shmem_mount,
2258         .kill_sb        = kill_litter_super,
2259 };
2260
2261 int __init shmem_init(void)
2262 {
2263         int error;
2264
2265         error = bdi_init(&shmem_backing_dev_info);
2266         if (error)
2267                 goto out4;
2268
2269         error = shmem_init_inodecache();
2270         if (error)
2271                 goto out3;
2272
2273         error = register_filesystem(&shmem_fs_type);
2274         if (error) {
2275                 printk(KERN_ERR "Could not register tmpfs\n");
2276                 goto out2;
2277         }
2278
2279         shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2280                                  shmem_fs_type.name, NULL);
2281         if (IS_ERR(shm_mnt)) {
2282                 error = PTR_ERR(shm_mnt);
2283                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2284                 goto out1;
2285         }
2286         return 0;
2287
2288 out1:
2289         unregister_filesystem(&shmem_fs_type);
2290 out2:
2291         shmem_destroy_inodecache();
2292 out3:
2293         bdi_destroy(&shmem_backing_dev_info);
2294 out4:
2295         shm_mnt = ERR_PTR(error);
2296         return error;
2297 }
2298
2299 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2300 /**
2301  * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
2302  * @inode: the inode to be searched
2303  * @index: the page offset to be searched
2304  * @pagep: the pointer for the found page to be stored
2305  * @swapp: the pointer for the found swap entry to be stored
2306  *
2307  * If a page is found, refcount of it is incremented. Callers should handle
2308  * these refcount.
2309  */
2310 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
2311                                  struct page **pagep, swp_entry_t *swapp)
2312 {
2313         struct shmem_inode_info *info = SHMEM_I(inode);
2314         struct page *page = NULL;
2315         swp_entry_t swap = {0};
2316
2317         if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2318                 goto out;
2319
2320         spin_lock(&info->lock);
2321 #ifdef CONFIG_SWAP
2322         swap = shmem_get_swap(info, index);
2323         if (swap.val)
2324                 page = find_get_page(&swapper_space, swap.val);
2325         else
2326 #endif
2327                 page = find_get_page(inode->i_mapping, index);
2328         spin_unlock(&info->lock);
2329 out:
2330         *pagep = page;
2331         *swapp = swap;
2332 }
2333 #endif
2334
2335 #else /* !CONFIG_SHMEM */
2336
2337 /*
2338  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2339  *
2340  * This is intended for small system where the benefits of the full
2341  * shmem code (swap-backed and resource-limited) are outweighed by
2342  * their complexity. On systems without swap this code should be
2343  * effectively equivalent, but much lighter weight.
2344  */
2345
2346 #include <linux/ramfs.h>
2347
2348 static struct file_system_type shmem_fs_type = {
2349         .name           = "tmpfs",
2350         .mount          = ramfs_mount,
2351         .kill_sb        = kill_litter_super,
2352 };
2353
2354 int __init shmem_init(void)
2355 {
2356         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2357
2358         shm_mnt = kern_mount(&shmem_fs_type);
2359         BUG_ON(IS_ERR(shm_mnt));
2360
2361         return 0;
2362 }
2363
2364 int shmem_unuse(swp_entry_t swap, struct page *page)
2365 {
2366         return 0;
2367 }
2368
2369 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2370 {
2371         return 0;
2372 }
2373
2374 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2375 {
2376         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2377 }
2378 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2379
2380 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2381 /**
2382  * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
2383  * @inode: the inode to be searched
2384  * @index: the page offset to be searched
2385  * @pagep: the pointer for the found page to be stored
2386  * @swapp: the pointer for the found swap entry to be stored
2387  *
2388  * If a page is found, refcount of it is incremented. Callers should handle
2389  * these refcount.
2390  */
2391 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
2392                                  struct page **pagep, swp_entry_t *swapp)
2393 {
2394         struct page *page = NULL;
2395
2396         if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2397                 goto out;
2398         page = find_get_page(inode->i_mapping, index);
2399 out:
2400         *pagep = page;
2401         *swapp = (swp_entry_t){0};
2402 }
2403 #endif
2404
2405 #define shmem_vm_ops                            generic_file_vm_ops
2406 #define shmem_file_operations                   ramfs_file_operations
2407 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
2408 #define shmem_acct_size(flags, size)            0
2409 #define shmem_unacct_size(flags, size)          do {} while (0)
2410
2411 #endif /* CONFIG_SHMEM */
2412
2413 /* common code */
2414
2415 /**
2416  * shmem_file_setup - get an unlinked file living in tmpfs
2417  * @name: name for dentry (to be seen in /proc/<pid>/maps
2418  * @size: size to be set for the file
2419  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2420  */
2421 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2422 {
2423         int error;
2424         struct file *file;
2425         struct inode *inode;
2426         struct path path;
2427         struct dentry *root;
2428         struct qstr this;
2429
2430         if (IS_ERR(shm_mnt))
2431                 return (void *)shm_mnt;
2432
2433         if (size < 0 || size > MAX_LFS_FILESIZE)
2434                 return ERR_PTR(-EINVAL);
2435
2436         if (shmem_acct_size(flags, size))
2437                 return ERR_PTR(-ENOMEM);
2438
2439         error = -ENOMEM;
2440         this.name = name;
2441         this.len = strlen(name);
2442         this.hash = 0; /* will go */
2443         root = shm_mnt->mnt_root;
2444         path.dentry = d_alloc(root, &this);
2445         if (!path.dentry)
2446                 goto put_memory;
2447         path.mnt = mntget(shm_mnt);
2448
2449         error = -ENOSPC;
2450         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2451         if (!inode)
2452                 goto put_dentry;
2453
2454         d_instantiate(path.dentry, inode);
2455         inode->i_size = size;
2456         inode->i_nlink = 0;     /* It is unlinked */
2457 #ifndef CONFIG_MMU
2458         error = ramfs_nommu_expand_for_mapping(inode, size);
2459         if (error)
2460                 goto put_dentry;
2461 #endif
2462
2463         error = -ENFILE;
2464         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2465                   &shmem_file_operations);
2466         if (!file)
2467                 goto put_dentry;
2468
2469         return file;
2470
2471 put_dentry:
2472         path_put(&path);
2473 put_memory:
2474         shmem_unacct_size(flags, size);
2475         return ERR_PTR(error);
2476 }
2477 EXPORT_SYMBOL_GPL(shmem_file_setup);
2478
2479 /**
2480  * shmem_zero_setup - setup a shared anonymous mapping
2481  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2482  */
2483 int shmem_zero_setup(struct vm_area_struct *vma)
2484 {
2485         struct file *file;
2486         loff_t size = vma->vm_end - vma->vm_start;
2487
2488         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2489         if (IS_ERR(file))
2490                 return PTR_ERR(file);
2491
2492         if (vma->vm_file)
2493                 fput(vma->vm_file);
2494         vma->vm_file = file;
2495         vma->vm_ops = &shmem_vm_ops;
2496         vma->vm_flags |= VM_CAN_NONLINEAR;
2497         return 0;
2498 }
2499
2500 /**
2501  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2502  * @mapping:    the page's address_space
2503  * @index:      the page index
2504  * @gfp:        the page allocator flags to use if allocating
2505  *
2506  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2507  * with any new page allocations done using the specified allocation flags.
2508  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2509  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2510  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2511  *
2512  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2513  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2514  */
2515 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2516                                          pgoff_t index, gfp_t gfp)
2517 {
2518 #ifdef CONFIG_SHMEM
2519         struct inode *inode = mapping->host;
2520         struct page *page;
2521         int error;
2522
2523         BUG_ON(mapping->a_ops != &shmem_aops);
2524         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2525         if (error)
2526                 page = ERR_PTR(error);
2527         else
2528                 unlock_page(page);
2529         return page;
2530 #else
2531         /*
2532          * The tiny !SHMEM case uses ramfs without swap
2533          */
2534         return read_cache_page_gfp(mapping, index, gfp);
2535 #endif
2536 }
2537 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);