UBUNTU: Ubuntu-2.6.38-12.51
[linux-flexiantxendom0-natty.git] / fs / namespace.c
index c859622..433e4b0 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/acct.h>
 #include <linux/seq_file.h>
 #include <linux/mnt_namespace.h>
 #include <linux/namei.h>
+#include <linux/nsproxy.h>
 #include <linux/security.h>
 #include <linux/mount.h>
 #include <linux/ramfs.h>
 #include <linux/log2.h>
 #include <linux/idr.h>
 #include <linux/fs_struct.h>
+#include <linux/fsnotify.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include "pnode.h"
 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
 #define HASH_SIZE (1UL << HASH_SHIFT)
 
-/* spinlock for vfsmount related operations, inplace of dcache_lock */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
-
 static int event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
+static DEFINE_SPINLOCK(mnt_id_lock);
+static int mnt_id_start = 0;
+static int mnt_group_start = 1;
 
 static struct list_head *mount_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
@@ -51,6 +54,16 @@ static struct rw_semaphore namespace_sem;
 struct kobject *fs_kobj;
 EXPORT_SYMBOL_GPL(fs_kobj);
 
+/*
+ * vfsmount lock may be taken for read to prevent changes to the
+ * vfsmount hash, ie. during mountpoint lookups or walking back
+ * up the tree.
+ *
+ * It should be taken for write in all cases where the vfsmount
+ * tree or hash is modified or when a vfsmount structure is modified.
+ */
+DEFINE_BRLOCK(vfsmount_lock);
+
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -61,16 +74,21 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 
 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
 
-/* allocation is serialized by namespace_sem */
+/*
+ * allocation is serialized by namespace_sem, but we need the spinlock to
+ * serialize with freeing.
+ */
 static int mnt_alloc_id(struct vfsmount *mnt)
 {
        int res;
 
 retry:
        ida_pre_get(&mnt_id_ida, GFP_KERNEL);
-       spin_lock(&vfsmount_lock);
-       res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
-       spin_unlock(&vfsmount_lock);
+       spin_lock(&mnt_id_lock);
+       res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
+       if (!res)
+               mnt_id_start = mnt->mnt_id + 1;
+       spin_unlock(&mnt_id_lock);
        if (res == -EAGAIN)
                goto retry;
 
@@ -79,9 +97,12 @@ retry:
 
 static void mnt_free_id(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
-       ida_remove(&mnt_id_ida, mnt->mnt_id);
-       spin_unlock(&vfsmount_lock);
+       int id = mnt->mnt_id;
+       spin_lock(&mnt_id_lock);
+       ida_remove(&mnt_id_ida, id);
+       if (mnt_id_start > id)
+               mnt_id_start = id;
+       spin_unlock(&mnt_id_lock);
 }
 
 /*
@@ -91,10 +112,18 @@ static void mnt_free_id(struct vfsmount *mnt)
  */
 static int mnt_alloc_group_id(struct vfsmount *mnt)
 {
+       int res;
+
        if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
                return -ENOMEM;
 
-       return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
+       res = ida_get_new_above(&mnt_group_ida,
+                               mnt_group_start,
+                               &mnt->mnt_group_id);
+       if (!res)
+               mnt_group_start = mnt->mnt_group_id + 1;
+
+       return res;
 }
 
 /*
@@ -102,10 +131,71 @@ static int mnt_alloc_group_id(struct vfsmount *mnt)
  */
 void mnt_release_group_id(struct vfsmount *mnt)
 {
-       ida_remove(&mnt_group_ida, mnt->mnt_group_id);
+       int id = mnt->mnt_group_id;
+       ida_remove(&mnt_group_ida, id);
+       if (mnt_group_start > id)
+               mnt_group_start = id;
        mnt->mnt_group_id = 0;
 }
 
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_add_count(struct vfsmount *mnt, int n)
+{
+#ifdef CONFIG_SMP
+       this_cpu_add(mnt->mnt_pcp->mnt_count, n);
+#else
+       preempt_disable();
+       mnt->mnt_count += n;
+       preempt_enable();
+#endif
+}
+
+static inline void mnt_set_count(struct vfsmount *mnt, int n)
+{
+#ifdef CONFIG_SMP
+       this_cpu_write(mnt->mnt_pcp->mnt_count, n);
+#else
+       mnt->mnt_count = n;
+#endif
+}
+
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_inc_count(struct vfsmount *mnt)
+{
+       mnt_add_count(mnt, 1);
+}
+
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_dec_count(struct vfsmount *mnt)
+{
+       mnt_add_count(mnt, -1);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
+unsigned int mnt_get_count(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+       unsigned int count = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
+       }
+
+       return count;
+#else
+       return mnt->mnt_count;
+#endif
+}
+
 struct vfsmount *alloc_vfsmnt(const char *name)
 {
        struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -122,7 +212,17 @@ struct vfsmount *alloc_vfsmnt(const char *name)
                                goto out_free_id;
                }
 
-               atomic_set(&mnt->mnt_count, 1);
+#ifdef CONFIG_SMP
+               mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
+               if (!mnt->mnt_pcp)
+                       goto out_free_devname;
+
+               this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
+#else
+               mnt->mnt_count = 1;
+               mnt->mnt_writers = 0;
+#endif
+
                INIT_LIST_HEAD(&mnt->mnt_hash);
                INIT_LIST_HEAD(&mnt->mnt_child);
                INIT_LIST_HEAD(&mnt->mnt_mounts);
@@ -131,10 +231,16 @@ struct vfsmount *alloc_vfsmnt(const char *name)
                INIT_LIST_HEAD(&mnt->mnt_share);
                INIT_LIST_HEAD(&mnt->mnt_slave_list);
                INIT_LIST_HEAD(&mnt->mnt_slave);
-               atomic_set(&mnt->__mnt_writers, 0);
+#ifdef CONFIG_FSNOTIFY
+               INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
+#endif
        }
        return mnt;
 
+#ifdef CONFIG_SMP
+out_free_devname:
+       kfree(mnt->mnt_devname);
+#endif
 out_free_id:
        mnt_free_id(mnt);
 out_free_cache:
@@ -171,65 +277,38 @@ int __mnt_is_readonly(struct vfsmount *mnt)
 }
 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
 
-struct mnt_writer {
-       /*
-        * If holding multiple instances of this lock, they
-        * must be ordered by cpu number.
-        */
-       spinlock_t lock;
-       struct lock_class_key lock_class; /* compiles out with !lockdep */
-       unsigned long count;
-       struct vfsmount *mnt;
-} ____cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+static inline void mnt_inc_writers(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+       this_cpu_inc(mnt->mnt_pcp->mnt_writers);
+#else
+       mnt->mnt_writers++;
+#endif
+}
 
-static int __init init_mnt_writers(void)
+static inline void mnt_dec_writers(struct vfsmount *mnt)
 {
-       int cpu;
-       for_each_possible_cpu(cpu) {
-               struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
-               spin_lock_init(&writer->lock);
-               lockdep_set_class(&writer->lock, &writer->lock_class);
-               writer->count = 0;
-       }
-       return 0;
+#ifdef CONFIG_SMP
+       this_cpu_dec(mnt->mnt_pcp->mnt_writers);
+#else
+       mnt->mnt_writers--;
+#endif
 }
-fs_initcall(init_mnt_writers);
 
-static void unlock_mnt_writers(void)
+static unsigned int mnt_get_writers(struct vfsmount *mnt)
 {
+#ifdef CONFIG_SMP
+       unsigned int count = 0;
        int cpu;
-       struct mnt_writer *cpu_writer;
 
        for_each_possible_cpu(cpu) {
-               cpu_writer = &per_cpu(mnt_writers, cpu);
-               spin_unlock(&cpu_writer->lock);
+               count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
        }
-}
 
-static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
-{
-       if (!cpu_writer->mnt)
-               return;
-       /*
-        * This is in case anyone ever leaves an invalid,
-        * old ->mnt and a count of 0.
-        */
-       if (!cpu_writer->count)
-               return;
-       atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
-       cpu_writer->count = 0;
-}
- /*
- * must hold cpu_writer->lock
- */
-static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
-                                         struct vfsmount *mnt)
-{
-       if (cpu_writer->mnt == mnt)
-               return;
-       __clear_mnt_count(cpu_writer);
-       cpu_writer->mnt = mnt;
+       return count;
+#else
+       return mnt->mnt_writers;
+#endif
 }
 
 /*
@@ -253,74 +332,74 @@ static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
 int mnt_want_write(struct vfsmount *mnt)
 {
        int ret = 0;
-       struct mnt_writer *cpu_writer;
 
-       cpu_writer = &get_cpu_var(mnt_writers);
-       spin_lock(&cpu_writer->lock);
+       preempt_disable();
+       mnt_inc_writers(mnt);
+       /*
+        * The store to mnt_inc_writers must be visible before we pass
+        * MNT_WRITE_HOLD loop below, so that the slowpath can see our
+        * incremented count after it has set MNT_WRITE_HOLD.
+        */
+       smp_mb();
+       while (mnt->mnt_flags & MNT_WRITE_HOLD)
+               cpu_relax();
+       /*
+        * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+        * be set to match its requirements. So we must not load that until
+        * MNT_WRITE_HOLD is cleared.
+        */
+       smp_rmb();
        if (__mnt_is_readonly(mnt)) {
+               mnt_dec_writers(mnt);
                ret = -EROFS;
                goto out;
        }
-       use_cpu_writer_for_mount(cpu_writer, mnt);
-       cpu_writer->count++;
 out:
-       spin_unlock(&cpu_writer->lock);
-       put_cpu_var(mnt_writers);
+       preempt_enable();
        return ret;
 }
 EXPORT_SYMBOL_GPL(mnt_want_write);
 
-static void lock_mnt_writers(void)
-{
-       int cpu;
-       struct mnt_writer *cpu_writer;
-
-       for_each_possible_cpu(cpu) {
-               cpu_writer = &per_cpu(mnt_writers, cpu);
-               spin_lock(&cpu_writer->lock);
-               __clear_mnt_count(cpu_writer);
-               cpu_writer->mnt = NULL;
-       }
+/**
+ * mnt_clone_write - get write access to a mount
+ * @mnt: the mount on which to take a write
+ *
+ * This is effectively like mnt_want_write, except
+ * it must only be used to take an extra write reference
+ * on a mountpoint that we already know has a write reference
+ * on it. This allows some optimisation.
+ *
+ * After finished, mnt_drop_write must be called as usual to
+ * drop the reference.
+ */
+int mnt_clone_write(struct vfsmount *mnt)
+{
+       /* superblock may be r/o */
+       if (__mnt_is_readonly(mnt))
+               return -EROFS;
+       preempt_disable();
+       mnt_inc_writers(mnt);
+       preempt_enable();
+       return 0;
 }
+EXPORT_SYMBOL_GPL(mnt_clone_write);
 
-/*
- * These per-cpu write counts are not guaranteed to have
- * matched increments and decrements on any given cpu.
- * A file open()ed for write on one cpu and close()d on
- * another cpu will imbalance this count.  Make sure it
- * does not get too far out of whack.
+/**
+ * mnt_want_write_file - get write access to a file's mount
+ * @file: the file who's mount on which to take a write
+ *
+ * This is like mnt_want_write, but it takes a file and can
+ * do some optimisations if the file is open for write already
  */
-static void handle_write_count_underflow(struct vfsmount *mnt)
+int mnt_want_write_file(struct file *file)
 {
-       if (atomic_read(&mnt->__mnt_writers) >=
-           MNT_WRITER_UNDERFLOW_LIMIT)
-               return;
-       /*
-        * It isn't necessary to hold all of the locks
-        * at the same time, but doing it this way makes
-        * us share a lot more code.
-        */
-       lock_mnt_writers();
-       /*
-        * vfsmount_lock is for mnt_flags.
-        */
-       spin_lock(&vfsmount_lock);
-       /*
-        * If coalescing the per-cpu writer counts did not
-        * get us back to a positive writer count, we have
-        * a bug.
-        */
-       if ((atomic_read(&mnt->__mnt_writers) < 0) &&
-           !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
-               WARN(1, KERN_DEBUG "leak detected on mount(%p) writers "
-                               "count: %d\n",
-                       mnt, atomic_read(&mnt->__mnt_writers));
-               /* use the flag to keep the dmesg spam down */
-               mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
-       }
-       spin_unlock(&vfsmount_lock);
-       unlock_mnt_writers();
+       struct inode *inode = file->f_dentry->d_inode;
+       if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
+               return mnt_want_write(file->f_path.mnt);
+       else
+               return mnt_clone_write(file->f_path.mnt);
 }
+EXPORT_SYMBOL_GPL(mnt_want_write_file);
 
 /**
  * mnt_drop_write - give up write access to a mount
@@ -332,37 +411,9 @@ static void handle_write_count_underflow(struct vfsmount *mnt)
  */
 void mnt_drop_write(struct vfsmount *mnt)
 {
-       int must_check_underflow = 0;
-       struct mnt_writer *cpu_writer;
-
-       cpu_writer = &get_cpu_var(mnt_writers);
-       spin_lock(&cpu_writer->lock);
-
-       use_cpu_writer_for_mount(cpu_writer, mnt);
-       if (cpu_writer->count > 0) {
-               cpu_writer->count--;
-       } else {
-               must_check_underflow = 1;
-               atomic_dec(&mnt->__mnt_writers);
-       }
-
-       spin_unlock(&cpu_writer->lock);
-       /*
-        * Logically, we could call this each time,
-        * but the __mnt_writers cacheline tends to
-        * be cold, and makes this expensive.
-        */
-       if (must_check_underflow)
-               handle_write_count_underflow(mnt);
-       /*
-        * This could be done right after the spinlock
-        * is taken because the spinlock keeps us on
-        * the cpu, and disables preemption.  However,
-        * putting it here bounds the amount that
-        * __mnt_writers can underflow.  Without it,
-        * we could theoretically wrap __mnt_writers.
-        */
-       put_cpu_var(mnt_writers);
+       preempt_disable();
+       mnt_dec_writers(mnt);
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(mnt_drop_write);
 
@@ -370,32 +421,49 @@ static int mnt_make_readonly(struct vfsmount *mnt)
 {
        int ret = 0;
 
-       lock_mnt_writers();
+       br_write_lock(vfsmount_lock);
+       mnt->mnt_flags |= MNT_WRITE_HOLD;
        /*
-        * With all the locks held, this value is stable
+        * After storing MNT_WRITE_HOLD, we'll read the counters. This store
+        * should be visible before we do.
         */
-       if (atomic_read(&mnt->__mnt_writers) > 0) {
-               ret = -EBUSY;
-               goto out;
-       }
+       smp_mb();
+
        /*
-        * nobody can do a successful mnt_want_write() with all
-        * of the counts in MNT_DENIED_WRITE and the locks held.
+        * With writers on hold, if this value is zero, then there are
+        * definitely no active writers (although held writers may subsequently
+        * increment the count, they'll have to wait, and decrement it after
+        * seeing MNT_READONLY).
+        *
+        * It is OK to have counter incremented on one CPU and decremented on
+        * another: the sum will add up correctly. The danger would be when we
+        * sum up each counter, if we read a counter before it is incremented,
+        * but then read another CPU's count which it has been subsequently
+        * decremented from -- we would see more decrements than we should.
+        * MNT_WRITE_HOLD protects against this scenario, because
+        * mnt_want_write first increments count, then smp_mb, then spins on
+        * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
+        * we're counting up here.
         */
-       spin_lock(&vfsmount_lock);
-       if (!ret)
+       if (mnt_get_writers(mnt) > 0)
+               ret = -EBUSY;
+       else
                mnt->mnt_flags |= MNT_READONLY;
-       spin_unlock(&vfsmount_lock);
-out:
-       unlock_mnt_writers();
+       /*
+        * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
+        * that become unheld will see MNT_READONLY.
+        */
+       smp_wmb();
+       mnt->mnt_flags &= ~MNT_WRITE_HOLD;
+       br_write_unlock(vfsmount_lock);
        return ret;
 }
 
 static void __mnt_unmake_readonly(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_flags &= ~MNT_READONLY;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
@@ -410,12 +478,16 @@ void free_vfsmnt(struct vfsmount *mnt)
 {
        kfree(mnt->mnt_devname);
        mnt_free_id(mnt);
+#ifdef CONFIG_SMP
+       free_percpu(mnt->mnt_pcp);
+#endif
        kmem_cache_free(mnt_cache, mnt);
 }
 
 /*
  * find the first or last mount at @dentry on vfsmount @mnt depending on
  * @dir. If @dir is set return the first mount else return the last mount.
+ * vfsmount_lock must be held for read or write.
  */
 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
                              int dir)
@@ -442,13 +514,14 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
  * lookup_mnt increments the ref count before returning
  * the vfsmount struct.
  */
-struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+struct vfsmount *lookup_mnt(struct path *path)
 {
        struct vfsmount *child_mnt;
-       spin_lock(&vfsmount_lock);
-       if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
+
+       br_read_lock(vfsmount_lock);
+       if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
                mntget(child_mnt);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return child_mnt;
 }
 
@@ -457,6 +530,9 @@ static inline int check_mnt(struct vfsmount *mnt)
        return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns) {
@@ -465,6 +541,9 @@ static void touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void __touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns && ns->event != event) {
@@ -473,6 +552,30 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * Clear dentry's mounted state if it has no remaining mounts.
+ * vfsmount_lock must be held for write.
+ */
+static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
+{
+       unsigned u;
+
+       for (u = 0; u < HASH_SIZE; u++) {
+               struct vfsmount *p;
+
+               list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
+                       if (p->mnt_mountpoint == dentry)
+                               return;
+               }
+       }
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags &= ~DCACHE_MOUNTED;
+       spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
 {
        old_path->dentry = mnt->mnt_mountpoint;
@@ -481,17 +584,25 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
        mnt->mnt_mountpoint = mnt->mnt_root;
        list_del_init(&mnt->mnt_child);
        list_del_init(&mnt->mnt_hash);
-       old_path->dentry->d_mounted--;
+       dentry_reset_mounted(old_path->mnt, old_path->dentry);
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
                        struct vfsmount *child_mnt)
 {
        child_mnt->mnt_parent = mntget(mnt);
        child_mnt->mnt_mountpoint = dget(dentry);
-       dentry->d_mounted++;
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags |= DCACHE_MOUNTED;
+       spin_unlock(&dentry->d_lock);
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void attach_mnt(struct vfsmount *mnt, struct path *path)
 {
        mnt_set_mountpoint(path->mnt, path->dentry, mnt);
@@ -500,8 +611,23 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path)
        list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
 }
 
+static inline void __mnt_make_longterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+       atomic_inc(&mnt->mnt_longterm);
+#endif
+}
+
+/* needs vfsmount lock for write */
+static inline void __mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+       atomic_dec(&mnt->mnt_longterm);
+#endif
+}
+
 /*
- * the caller must hold vfsmount_lock
+ * vfsmount lock must be held for write
  */
 static void commit_tree(struct vfsmount *mnt)
 {
@@ -513,8 +639,11 @@ static void commit_tree(struct vfsmount *mnt)
        BUG_ON(parent == mnt);
 
        list_add_tail(&head, &mnt->mnt_list);
-       list_for_each_entry(m, &head, mnt_list)
+       list_for_each_entry(m, &head, mnt_list) {
                m->mnt_ns = n;
+               __mnt_make_longterm(m);
+       }
+
        list_splice(&head, n->list.prev);
 
        list_add_tail(&mnt->mnt_hash, mount_hashtable +
@@ -567,7 +696,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                                goto out_free;
                }
 
-               mnt->mnt_flags = old->mnt_flags;
+               mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
                atomic_inc(&sb->s_active);
                mnt->mnt_sb = sb;
                mnt->mnt_root = dget(root);
@@ -579,7 +708,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                        mnt->mnt_master = old;
                        CLEAR_MNT_SHARED(mnt);
                } else if (!(flag & CL_PRIVATE)) {
-                       if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+                       if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
                                list_add(&mnt->mnt_share, &old->mnt_share);
                        if (IS_MNT_SLAVE(old))
                                list_add(&mnt->mnt_slave, &old->mnt_slave);
@@ -602,84 +731,98 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
        return NULL;
 }
 
-static inline void __mntput(struct vfsmount *mnt)
+static inline void mntfree(struct vfsmount *mnt)
 {
-       int cpu;
        struct super_block *sb = mnt->mnt_sb;
-       /*
-        * We don't have to hold all of the locks at the
-        * same time here because we know that we're the
-        * last reference to mnt and that no new writers
-        * can come in.
-        */
-       for_each_possible_cpu(cpu) {
-               struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
-               spin_lock(&cpu_writer->lock);
-               if (cpu_writer->mnt != mnt) {
-                       spin_unlock(&cpu_writer->lock);
-                       continue;
-               }
-               atomic_add(cpu_writer->count, &mnt->__mnt_writers);
-               cpu_writer->count = 0;
-               /*
-                * Might as well do this so that no one
-                * ever sees the pointer and expects
-                * it to be valid.
-                */
-               cpu_writer->mnt = NULL;
-               spin_unlock(&cpu_writer->lock);
-       }
+
        /*
         * This probably indicates that somebody messed
         * up a mnt_want/drop_write() pair.  If this
         * happens, the filesystem was probably unable
         * to make r/w->r/o transitions.
         */
-       WARN_ON(atomic_read(&mnt->__mnt_writers));
+       /*
+        * The locking used to deal with mnt_count decrement provides barriers,
+        * so mnt_get_writers() below is safe.
+        */
+       WARN_ON(mnt_get_writers(mnt));
+       fsnotify_vfsmount_delete(mnt);
        dput(mnt->mnt_root);
        free_vfsmnt(mnt);
        deactivate_super(sb);
 }
 
-void mntput_no_expire(struct vfsmount *mnt)
+static void mntput_no_expire(struct vfsmount *mnt)
 {
-repeat:
-       if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
-               if (likely(!mnt->mnt_pinned)) {
-                       spin_unlock(&vfsmount_lock);
-                       __mntput(mnt);
-                       return;
-               }
-               atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+put_again:
+#ifdef CONFIG_SMP
+       br_read_lock(vfsmount_lock);
+       if (likely(atomic_read(&mnt->mnt_longterm))) {
+               mnt_dec_count(mnt);
+               br_read_unlock(vfsmount_lock);
+               return;
+       }
+       br_read_unlock(vfsmount_lock);
+
+       br_write_lock(vfsmount_lock);
+       mnt_dec_count(mnt);
+       if (mnt_get_count(mnt)) {
+               br_write_unlock(vfsmount_lock);
+               return;
+       }
+#else
+       mnt_dec_count(mnt);
+       if (likely(mnt_get_count(mnt)))
+               return;
+       br_write_lock(vfsmount_lock);
+#endif
+       if (unlikely(mnt->mnt_pinned)) {
+               mnt_add_count(mnt, mnt->mnt_pinned + 1);
                mnt->mnt_pinned = 0;
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                acct_auto_close_mnt(mnt);
-               security_sb_umount_close(mnt);
-               goto repeat;
+               goto put_again;
        }
+       br_write_unlock(vfsmount_lock);
+       mntfree(mnt);
 }
 
-EXPORT_SYMBOL(mntput_no_expire);
+void mntput(struct vfsmount *mnt)
+{
+       if (mnt) {
+               /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+               if (unlikely(mnt->mnt_expiry_mark))
+                       mnt->mnt_expiry_mark = 0;
+               mntput_no_expire(mnt);
+       }
+}
+EXPORT_SYMBOL(mntput);
+
+struct vfsmount *mntget(struct vfsmount *mnt)
+{
+       if (mnt)
+               mnt_inc_count(mnt);
+       return mnt;
+}
+EXPORT_SYMBOL(mntget);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_pinned++;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
-
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        if (mnt->mnt_pinned) {
-               atomic_inc(&mnt->mnt_count);
+               mnt_inc_count(mnt);
                mnt->mnt_pinned--;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
-
 EXPORT_SYMBOL(mnt_unpin);
 
 static inline void mangle(struct seq_file *m, const char *s)
@@ -763,6 +906,21 @@ static void m_stop(struct seq_file *m, void *v)
        up_read(&namespace_sem);
 }
 
+int mnt_had_events(struct proc_mounts *p)
+{
+       struct mnt_namespace *ns = p->ns;
+       int res = 0;
+
+       br_read_lock(vfsmount_lock);
+       if (p->event != ns->event) {
+               p->event = ns->event;
+               res = 1;
+       }
+       br_read_unlock(vfsmount_lock);
+
+       return res;
+}
+
 struct proc_fs_info {
        int flag;
        const char *str;
@@ -795,7 +953,6 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
                { MNT_NOATIME, ",noatime" },
                { MNT_NODIRATIME, ",nodiratime" },
                { MNT_RELATIME, ",relatime" },
-               { MNT_STRICTATIME, ",strictatime" },
                { 0, NULL }
        };
        const struct proc_fs_info *fs_infop;
@@ -960,12 +1117,13 @@ int may_umount_tree(struct vfsmount *mnt)
        int minimum_refs = 0;
        struct vfsmount *p;
 
-       spin_lock(&vfsmount_lock);
+       /* write lock needed for mnt_get_count */
+       br_write_lock(vfsmount_lock);
        for (p = mnt; p; p = next_mnt(p, mnt)) {
-               actual_refs += atomic_read(&p->mnt_count);
+               actual_refs += mnt_get_count(p);
                minimum_refs += 2;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
        if (actual_refs > minimum_refs)
                return 0;
@@ -991,10 +1149,12 @@ EXPORT_SYMBOL(may_umount_tree);
 int may_umount(struct vfsmount *mnt)
 {
        int ret = 1;
-       spin_lock(&vfsmount_lock);
+       down_read(&namespace_sem);
+       br_write_lock(vfsmount_lock);
        if (propagate_mount_busy(mnt, 2))
                ret = 0;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
+       up_read(&namespace_sem);
        return ret;
 }
 
@@ -1009,13 +1169,14 @@ void release_mounts(struct list_head *head)
                if (mnt->mnt_parent != mnt) {
                        struct dentry *dentry;
                        struct vfsmount *m;
-                       spin_lock(&vfsmount_lock);
+
+                       br_write_lock(vfsmount_lock);
                        dentry = mnt->mnt_mountpoint;
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt_root;
                        mnt->mnt_parent = mnt;
                        m->mnt_ghosts--;
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                        dput(dentry);
                        mntput(m);
                }
@@ -1023,28 +1184,35 @@ void release_mounts(struct list_head *head)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ * namespace_sem must be held for write
+ */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
+       LIST_HEAD(tmp_list);
        struct vfsmount *p;
 
        for (p = mnt; p; p = next_mnt(p, mnt))
-               list_move(&p->mnt_hash, kill);
+               list_move(&p->mnt_hash, &tmp_list);
 
        if (propagate)
-               propagate_umount(kill);
+               propagate_umount(&tmp_list);
 
-       list_for_each_entry(p, kill, mnt_hash) {
+       list_for_each_entry(p, &tmp_list, mnt_hash) {
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
                p->mnt_ns = NULL;
+               __mnt_make_shortterm(p);
                list_del_init(&p->mnt_child);
                if (p->mnt_parent != p) {
                        p->mnt_parent->mnt_ghosts++;
-                       p->mnt_mountpoint->d_mounted--;
+                       dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
                }
                change_mnt_propagation(p, MS_PRIVATE);
        }
+       list_splice(&tmp_list, kill);
 }
 
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
@@ -1070,8 +1238,16 @@ static int do_umount(struct vfsmount *mnt, int flags)
                    flags & (MNT_FORCE | MNT_DETACH))
                        return -EINVAL;
 
-               if (atomic_read(&mnt->mnt_count) != 2)
+               /*
+                * probably don't strictly need the lock here if we examined
+                * all race cases, but it's a slowpath.
+                */
+               br_write_lock(vfsmount_lock);
+               if (mnt_get_count(mnt) != 2) {
+                       br_write_unlock(vfsmount_lock);
                        return -EBUSY;
+               }
+               br_write_unlock(vfsmount_lock);
 
                if (!xchg(&mnt->mnt_expiry_mark, 1))
                        return -EAGAIN;
@@ -1106,17 +1282,14 @@ static int do_umount(struct vfsmount *mnt, int flags)
                 * we just try to remount it readonly.
                 */
                down_write(&sb->s_umount);
-               if (!(sb->s_flags & MS_RDONLY)) {
-                       lock_kernel();
+               if (!(sb->s_flags & MS_RDONLY))
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
-                       unlock_kernel();
-               }
                up_write(&sb->s_umount);
                return retval;
        }
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        event++;
 
        if (!(flags & MNT_DETACH))
@@ -1128,9 +1301,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
                        umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
-       spin_unlock(&vfsmount_lock);
-       if (retval)
-               security_sb_umount_busy(mnt);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
@@ -1148,8 +1319,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
 {
        struct path path;
        int retval;
+       int lookup_flags = 0;
+
+       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+               return -EINVAL;
 
-       retval = user_path(name, &path);
+       if (!(flags & UMOUNT_NOFOLLOW))
+               lookup_flags |= LOOKUP_FOLLOW;
+
+       retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
        if (retval)
                goto out;
        retval = -EINVAL;
@@ -1235,19 +1413,19 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                        q = clone_mnt(p, p->mnt_root, flag);
                        if (!q)
                                goto Enomem;
-                       spin_lock(&vfsmount_lock);
+                       br_write_lock(vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
                        attach_mnt(q, &path);
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                }
        }
        return res;
 Enomem:
        if (res) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                umount_tree(res, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
        return NULL;
@@ -1266,13 +1444,29 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        LIST_HEAD(umount_list);
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        umount_tree(mnt, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
 }
 
+int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+                  struct vfsmount *root)
+{
+       struct vfsmount *mnt;
+       int res = f(root, arg);
+       if (res)
+               return res;
+       list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
+               res = f(mnt, arg);
+               if (res)
+                       return res;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iterate_mounts);
+
 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
 {
        struct vfsmount *p;
@@ -1381,12 +1575,12 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
        if (err)
                goto out_cleanup_ids;
 
+       br_write_lock(vfsmount_lock);
+
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
                        set_mnt_shared(p);
        }
-
-       spin_lock(&vfsmount_lock);
        if (parent_path) {
                detach_mnt(source_mnt, parent_path);
                attach_mnt(source_mnt, path);
@@ -1400,7 +1594,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
                list_del_init(&child->mnt_hash);
                commit_tree(child);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
+
        return 0;
 
  out_cleanup_ids:
@@ -1422,31 +1617,41 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
 
        err = -ENOENT;
        mutex_lock(&path->dentry->d_inode->i_mutex);
-       if (IS_DEADDIR(path->dentry->d_inode))
+       if (cant_mount(path->dentry))
                goto out_unlock;
 
-       err = security_sb_check_sb(mnt, path);
-       if (err)
-               goto out_unlock;
-
-       err = -ENOENT;
-       if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
+       if (!d_unlinked(path->dentry))
                err = attach_recursive_mnt(mnt, path, NULL);
 out_unlock:
        mutex_unlock(&path->dentry->d_inode->i_mutex);
-       if (!err)
-               security_sb_post_addmount(mnt, path);
        return err;
 }
 
 /*
+ * Sanity check the flags to change_mnt_propagation.
+ */
+
+static int flags_to_propagation_type(int flags)
+{
+       int type = flags & ~MS_REC;
+
+       /* Fail if any non-propagation flags are set */
+       if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+               return 0;
+       /* Only one propagation flag should be set */
+       if (!is_power_of_2(type))
+               return 0;
+       return type;
+}
+
+/*
  * recursively change the type of the mountpoint.
  */
 static int do_change_type(struct path *path, int flag)
 {
        struct vfsmount *m, *mnt = path->mnt;
        int recurse = flag & MS_REC;
-       int type = flag & ~MS_REC;
+       int type;
        int err = 0;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1455,6 +1660,10 @@ static int do_change_type(struct path *path, int flag)
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       type = flags_to_propagation_type(flag);
+       if (!type)
+               return -EINVAL;
+
        down_write(&namespace_sem);
        if (type == MS_SHARED) {
                err = invent_group_ids(mnt, recurse);
@@ -1462,10 +1671,10 @@ static int do_change_type(struct path *path, int flag)
                        goto out_unlock;
        }
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
  out_unlock:
        up_write(&namespace_sem);
@@ -1509,9 +1718,10 @@ static int do_loopback(struct path *path, char *old_name,
        err = graft_tree(mnt, path);
        if (err) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+
+               br_write_lock(vfsmount_lock);
                umount_tree(mnt, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
 
@@ -1563,15 +1773,17 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                err = change_mount_flags(path->mnt, flags);
        else
                err = do_remount_sb(sb, flags, data, 0);
-       if (!err)
+       if (!err) {
+               br_write_lock(vfsmount_lock);
+               mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
                path->mnt->mnt_flags = mnt_flags;
+               br_write_unlock(vfsmount_lock);
+       }
        up_write(&sb->s_umount);
        if (!err) {
-               security_sb_post_remount(path->mnt, flags, data);
-
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                touch_mnt_namespace(path->mnt->mnt_ns);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
        }
        return err;
 }
@@ -1600,19 +1812,20 @@ static int do_move_mount(struct path *path, char *old_name)
                return err;
 
        down_write(&namespace_sem);
-       while (d_mountpoint(path->dentry) &&
-              follow_down(&path->mnt, &path->dentry))
-               ;
+       err = follow_down(path, true);
+       if (err < 0)
+               goto out;
+
        err = -EINVAL;
        if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
                goto out;
 
        err = -ENOENT;
        mutex_lock(&path->dentry->d_inode->i_mutex);
-       if (IS_DEADDIR(path->dentry->d_inode))
+       if (cant_mount(path->dentry))
                goto out1;
 
-       if (!IS_ROOT(path->dentry) && d_unhashed(path->dentry))
+       if (d_unlinked(path->dentry))
                goto out1;
 
        err = -EINVAL;
@@ -1660,6 +1873,8 @@ out:
        return err;
 }
 
+static int do_add_mount(struct vfsmount *, struct path *, int);
+
 /*
  * create a new mount for userspace and request it to be added into the
  * namespace's tree
@@ -1668,8 +1883,9 @@ static int do_new_mount(struct path *path, char *type, int flags,
                        int mnt_flags, char *name, void *data)
 {
        struct vfsmount *mnt;
+       int err;
 
-       if (!type || !memchr(type, 0, PAGE_SIZE))
+       if (!type)
                return -EINVAL;
 
        /* we need capabilities... */
@@ -1680,23 +1896,58 @@ static int do_new_mount(struct path *path, char *type, int flags,
        if (IS_ERR(mnt))
                return PTR_ERR(mnt);
 
-       return do_add_mount(mnt, path, mnt_flags, NULL);
+       err = do_add_mount(mnt, path, mnt_flags);
+       if (err)
+               mntput(mnt);
+       return err;
+}
+
+int finish_automount(struct vfsmount *m, struct path *path)
+{
+       int err;
+       /* The new mount record should have at least 2 refs to prevent it being
+        * expired before we get a chance to add it
+        */
+       BUG_ON(mnt_get_count(m) < 2);
+
+       if (m->mnt_sb == path->mnt->mnt_sb &&
+           m->mnt_root == path->dentry) {
+               err = -ELOOP;
+               goto fail;
+       }
+
+       err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+       if (!err)
+               return 0;
+fail:
+       /* remove m from any expiration list it may be on */
+       if (!list_empty(&m->mnt_expire)) {
+               down_write(&namespace_sem);
+               br_write_lock(vfsmount_lock);
+               list_del_init(&m->mnt_expire);
+               br_write_unlock(vfsmount_lock);
+               up_write(&namespace_sem);
+       }
+       mntput(m);
+       mntput(m);
+       return err;
 }
 
 /*
  * add a mount into a namespace's mount tree
- * - provide the option of adding the new mount to an expiration list
  */
-int do_add_mount(struct vfsmount *newmnt, struct path *path,
-                int mnt_flags, struct list_head *fslist)
+static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
 {
        int err;
 
+       mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
+
        down_write(&namespace_sem);
        /* Something was mounted here while we slept */
-       while (d_mountpoint(path->dentry) &&
-              follow_down(&path->mnt, &path->dentry))
-               ;
+       err = follow_down(path, true);
+       if (err < 0)
+               goto unlock;
+
        err = -EINVAL;
        if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
                goto unlock;
@@ -1712,22 +1963,29 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
                goto unlock;
 
        newmnt->mnt_flags = mnt_flags;
-       if ((err = graft_tree(newmnt, path)))
-               goto unlock;
-
-       if (fslist) /* add to the specified expiration list */
-               list_add_tail(&newmnt->mnt_expire, fslist);
-
-       up_write(&namespace_sem);
-       return 0;
+       err = graft_tree(newmnt, path);
 
 unlock:
        up_write(&namespace_sem);
-       mntput(newmnt);
        return err;
 }
 
-EXPORT_SYMBOL_GPL(do_add_mount);
+/**
+ * mnt_set_expiry - Put a mount on an expiration list
+ * @mnt: The mount to list.
+ * @expiry_list: The list to add the mount to.
+ */
+void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
+{
+       down_write(&namespace_sem);
+       br_write_lock(vfsmount_lock);
+
+       list_add_tail(&mnt->mnt_expire, expiry_list);
+
+       br_write_unlock(vfsmount_lock);
+       up_write(&namespace_sem);
+}
+EXPORT_SYMBOL(mnt_set_expiry);
 
 /*
  * process a list of expirable mountpoints with the intent of discarding any
@@ -1744,7 +2002,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                return;
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
         * following criteria:
@@ -1763,7 +2021,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                touch_mnt_namespace(mnt->mnt_ns);
                umount_tree(mnt, 1, &umounts);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
 
        release_mounts(&umounts);
@@ -1820,6 +2078,8 @@ resume:
 /*
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
+ *
+ * vfsmount_lock must be held for write
  */
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
 {
@@ -1898,6 +2158,23 @@ int copy_mount_options(const void __user * data, unsigned long *where)
        return 0;
 }
 
+int copy_mount_string(const void __user *data, char **where)
+{
+       char *tmp;
+
+       if (!data) {
+               *where = NULL;
+               return 0;
+       }
+
+       tmp = strndup_user(data, PAGE_SIZE);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+
+       *where = tmp;
+       return 0;
+}
+
 /*
  * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
  * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
@@ -1927,12 +2204,20 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
 
        if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
                return -EINVAL;
-       if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
-               return -EINVAL;
 
        if (data_page)
                ((char *)data_page)[PAGE_SIZE - 1] = 0;
 
+       /* ... and get the mountpoint */
+       retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
+       if (retval)
+               return retval;
+
+       retval = security_sb_mount(dev_name, &path,
+                                  type_page, flags, data_page);
+       if (retval)
+               goto dput_out;
+
        /* Default to relatime unless overriden */
        if (!(flags & MS_NOATIME))
                mnt_flags |= MNT_RELATIME;
@@ -1953,20 +2238,10 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
        if (flags & MS_RDONLY)
                mnt_flags |= MNT_READONLY;
 
-       flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
+       flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
                   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
                   MS_STRICTATIME);
 
-       /* ... and get the mountpoint */
-       retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
-       if (retval)
-               return retval;
-
-       retval = security_sb_mount(dev_name, &path,
-                                  type_page, flags, data_page);
-       if (retval)
-               goto dput_out;
-
        if (flags & MS_REMOUNT)
                retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
                                    data_page);
@@ -1984,6 +2259,37 @@ dput_out:
        return retval;
 }
 
+static struct mnt_namespace *alloc_mnt_ns(void)
+{
+       struct mnt_namespace *new_ns;
+
+       new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
+       if (!new_ns)
+               return ERR_PTR(-ENOMEM);
+       atomic_set(&new_ns->count, 1);
+       new_ns->root = NULL;
+       INIT_LIST_HEAD(&new_ns->list);
+       init_waitqueue_head(&new_ns->poll);
+       new_ns->event = 0;
+       return new_ns;
+}
+
+void mnt_make_longterm(struct vfsmount *mnt)
+{
+       __mnt_make_longterm(mnt);
+}
+
+void mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+       if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
+               return;
+       br_write_lock(vfsmount_lock);
+       atomic_dec(&mnt->mnt_longterm);
+       br_write_unlock(vfsmount_lock);
+#endif
+}
+
 /*
  * Allocate a new namespace structure and populate it with contents
  * copied from the namespace of the passed in task structure.
@@ -1995,14 +2301,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
        struct vfsmount *p, *q;
 
-       new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
-       if (!new_ns)
-               return ERR_PTR(-ENOMEM);
-
-       atomic_set(&new_ns->count, 1);
-       INIT_LIST_HEAD(&new_ns->list);
-       init_waitqueue_head(&new_ns->poll);
-       new_ns->event = 0;
+       new_ns = alloc_mnt_ns();
+       if (IS_ERR(new_ns))
+               return new_ns;
 
        down_write(&namespace_sem);
        /* First pass: copy the tree topology */
@@ -2013,9 +2314,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                kfree(new_ns);
                return ERR_PTR(-ENOMEM);
        }
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2026,14 +2327,19 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        q = new_ns->root;
        while (p) {
                q->mnt_ns = new_ns;
+               __mnt_make_longterm(q);
                if (fs) {
                        if (p == fs->root.mnt) {
-                               rootmnt = p;
                                fs->root.mnt = mntget(q);
+                               __mnt_make_longterm(q);
+                               mnt_make_shortterm(p);
+                               rootmnt = p;
                        }
                        if (p == fs->pwd.mnt) {
-                               pwdmnt = p;
                                fs->pwd.mnt = mntget(q);
+                               __mnt_make_longterm(q);
+                               mnt_make_shortterm(p);
+                               pwdmnt = p;
                        }
                }
                p = next_mnt(p, mnt_ns->root);
@@ -2066,45 +2372,64 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
        return new_ns;
 }
 
+/**
+ * create_mnt_ns - creates a private namespace and adds a root filesystem
+ * @mnt: pointer to the new root filesystem mountpoint
+ */
+struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+{
+       struct mnt_namespace *new_ns;
+
+       new_ns = alloc_mnt_ns();
+       if (!IS_ERR(new_ns)) {
+               mnt->mnt_ns = new_ns;
+               __mnt_make_longterm(mnt);
+               new_ns->root = mnt;
+               list_add(&new_ns->list, &new_ns->root->mnt_list);
+       }
+       return new_ns;
+}
+EXPORT_SYMBOL(create_mnt_ns);
+
 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
                char __user *, type, unsigned long, flags, void __user *, data)
 {
-       int retval;
+       int ret;
+       char *kernel_type;
+       char *kernel_dir;
+       char *kernel_dev;
        unsigned long data_page;
-       unsigned long type_page;
-       unsigned long dev_page;
-       char *dir_page;
 
-       retval = copy_mount_options(type, &type_page);
-       if (retval < 0)
-               return retval;
+       ret = copy_mount_string(type, &kernel_type);
+       if (ret < 0)
+               goto out_type;
 
-       dir_page = getname(dir_name);
-       retval = PTR_ERR(dir_page);
-       if (IS_ERR(dir_page))
-               goto out1;
+       kernel_dir = getname(dir_name);
+       if (IS_ERR(kernel_dir)) {
+               ret = PTR_ERR(kernel_dir);
+               goto out_dir;
+       }
 
-       retval = copy_mount_options(dev_name, &dev_page);
-       if (retval < 0)
-               goto out2;
+       ret = copy_mount_string(dev_name, &kernel_dev);
+       if (ret < 0)
+               goto out_dev;
 
-       retval = copy_mount_options(data, &data_page);
-       if (retval < 0)
-               goto out3;
+       ret = copy_mount_options(data, &data_page);
+       if (ret < 0)
+               goto out_data;
 
-       lock_kernel();
-       retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
-                         flags, (void *)data_page);
-       unlock_kernel();
-       free_page(data_page);
+       ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
+               (void *) data_page);
 
-out3:
-       free_page(dev_page);
-out2:
-       putname(dir_page);
-out1:
-       free_page(type_page);
-       return retval;
+       free_page(data_page);
+out_data:
+       kfree(kernel_dev);
+out_dev:
+       putname(kernel_dir);
+out_dir:
+       kfree(kernel_type);
+out_type:
+       return ret;
 }
 
 /*
@@ -2145,9 +2470,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        error = user_path_dir(new_root, &new);
        if (error)
                goto out0;
-       error = -EINVAL;
-       if (!check_mnt(new.mnt))
-               goto out1;
 
        error = user_path_dir(put_old, &old);
        if (error)
@@ -2159,10 +2481,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
                goto out1;
        }
 
-       read_lock(&current->fs->lock);
-       root = current->fs->root;
-       path_get(&current->fs->root);
-       read_unlock(&current->fs->lock);
+       get_fs_root(current->fs, &root);
        down_write(&namespace_sem);
        mutex_lock(&old.dentry->d_inode->i_mutex);
        error = -EINVAL;
@@ -2170,14 +2489,14 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
                IS_MNT_SHARED(new.mnt->mnt_parent) ||
                IS_MNT_SHARED(root.mnt->mnt_parent))
                goto out2;
-       if (!check_mnt(root.mnt))
+       if (!check_mnt(root.mnt) || !check_mnt(new.mnt))
                goto out2;
        error = -ENOENT;
-       if (IS_DEADDIR(new.dentry->d_inode))
+       if (cant_mount(old.dentry))
                goto out2;
-       if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry))
+       if (d_unlinked(new.dentry))
                goto out2;
-       if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry))
+       if (d_unlinked(old.dentry))
                goto out2;
        error = -EBUSY;
        if (new.mnt == root.mnt ||
@@ -2194,19 +2513,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
                goto out2; /* not attached */
        /* make sure we can reach put_old from new_root */
        tmp = old.mnt;
-       spin_lock(&vfsmount_lock);
        if (tmp != new.mnt) {
                for (;;) {
                        if (tmp->mnt_parent == tmp)
-                               goto out3; /* already mounted on put_old */
+                               goto out2; /* already mounted on put_old */
                        if (tmp->mnt_parent == new.mnt)
                                break;
                        tmp = tmp->mnt_parent;
                }
                if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
-                       goto out3;
+                       goto out2;
        } else if (!is_subdir(old.dentry, new.dentry))
-               goto out3;
+               goto out2;
+       br_write_lock(vfsmount_lock);
        detach_mnt(new.mnt, &parent_path);
        detach_mnt(root.mnt, &root_parent);
        /* mount old root on put_old */
@@ -2214,9 +2533,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* mount new_root on / */
        attach_mnt(new.mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        chroot_fs_refs(&root, &new);
-       security_sb_post_pivotroot(&root, &new);
+
        error = 0;
        path_put(&root_parent);
        path_put(&parent_path);
@@ -2229,9 +2548,6 @@ out1:
        path_put(&new);
 out0:
        return error;
-out3:
-       spin_unlock(&vfsmount_lock);
-       goto out2;
 }
 
 static void __init init_mount_tree(void)
@@ -2243,16 +2559,10 @@ static void __init init_mount_tree(void)
        mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
        if (IS_ERR(mnt))
                panic("Can't create rootfs");
-       ns = kmalloc(sizeof(*ns), GFP_KERNEL);
-       if (!ns)
+
+       ns = create_mnt_ns(mnt);
+       if (IS_ERR(ns))
                panic("Can't allocate initial namespace");
-       atomic_set(&ns->count, 1);
-       INIT_LIST_HEAD(&ns->list);
-       init_waitqueue_head(&ns->poll);
-       ns->event = 0;
-       list_add(&mnt->mnt_list, &ns->list);
-       ns->root = mnt;
-       mnt->mnt_ns = ns;
 
        init_task.nsproxy->mnt_ns = ns;
        get_mnt_ns(ns);
@@ -2284,6 +2594,8 @@ void __init mnt_init(void)
        for (u = 0; u < HASH_SIZE; u++)
                INIT_LIST_HEAD(&mount_hashtable[u]);
 
+       br_lock_init(vfsmount_lock);
+
        err = sysfs_init();
        if (err)
                printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2295,17 +2607,18 @@ void __init mnt_init(void)
        init_mount_tree();
 }
 
-void __put_mnt_ns(struct mnt_namespace *ns)
+void put_mnt_ns(struct mnt_namespace *ns)
 {
-       struct vfsmount *root = ns->root;
        LIST_HEAD(umount_list);
-       ns->root = NULL;
-       spin_unlock(&vfsmount_lock);
+
+       if (!atomic_dec_and_test(&ns->count))
+               return;
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
-       umount_tree(root, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
+       umount_tree(ns->root, 0, &umount_list);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(ns);
 }
+EXPORT_SYMBOL(put_mnt_ns);