* The caller must hold down_write(¤t->mm->mmap_sem).
*/
-unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff)
{
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
-EXPORT_SYMBOL(do_mmap_pgoff);
+
+unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ if (unlikely(offset + PAGE_ALIGN(len) < offset))
+ return -EINVAL;
+ if (unlikely(offset & ~PAGE_MASK))
+ return -EINVAL;
+ return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(do_mmap);
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ unsigned long ret;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flag, offset);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+EXPORT_SYMBOL(vm_mmap);
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
tlb_finish_mmu(&tlb, start, end);
}
+static inline void unmap_vma(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_XEN
+ if (unlikely(vma->vm_ops && vma->vm_ops->unmap))
+ vma->vm_ops->unmap(vma);
+#endif
+}
+
/*
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
vma->vm_prev = NULL;
do {
rb_erase(&vma->vm_rb, &mm->mm_rb);
+ unmap_vma(vma);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
}
EXPORT_SYMBOL(do_munmap);
-int vm_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+int vm_munmap(unsigned long start, size_t len)
{
int ret;
+ struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
profile_munmap(addr);
- return vm_munmap(current->mm, addr, len);
+ return vm_munmap(addr, len);
}
static inline void verify_mm_writelocked(struct mm_struct *mm)
arch_exit_mmap(mm);
+#ifdef CONFIG_XEN
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ unmap_vma(vma);
+#endif
+
vma = mm->mmap;
if (!vma) /* Can happen if dup_mmap() received an OOM */
return;