NOMMU: Lock i_mmap_mutex for access to the VMA prio list
authorDavid Howells <dhowells@redhat.com>
Thu, 23 Feb 2012 13:50:35 +0000 (13:50 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Mar 2012 00:31:11 +0000 (16:31 -0800)
commit 918e556ec214ed2f584e4cac56d7b29e4bb6bf27 upstream.

Lock i_mmap_mutex for access to the VMA prio list to prevent concurrent
access.  Currently, certain parts of the mmap handling are protected by
the region mutex, but not all.

Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

mm/nommu.c

index b982290..ee7e57e 100644 (file)
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* add the VMA to the tree */
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_remove(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* remove from the MM's tree and list */
@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        down_write(&nommu_region_sem);
+       mutex_lock(&inode->i_mapping->i_mmap_mutex);
 
        /* search for VMAs that fall within the dead zone */
        vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                /* found one - only interested if it's shared out of the page
                 * cache */
                if (vma->vm_flags & VM_SHARED) {
+                       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
                        up_write(&nommu_region_sem);
                        return -ETXTBSY; /* not quite true, but near enough */
                }
@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                }
        }
 
+       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
        up_write(&nommu_region_sem);
        return 0;
 }