#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+#ifndef CONFIG_XEN
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+#else
+#define VM_SAO 0
+#define VM_FOREIGN 0x20000000 /* Has pages belonging to another VM */
+#endif
#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+#ifdef CONFIG_XEN
+struct vm_foreign_map {
+ struct page **map;
+};
+#endif
+
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+
+#ifdef CONFIG_XEN
+ /* Area-specific function for clearing the PTE at @ptep. Returns the
+ * original value of @ptep. */
+ pte_t (*zap_pte)(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, int is_fullmm);
+
+ /* called before close() to indicate no more pages should be mapped */
+ void (*unmap)(struct vm_area_struct *area);
+#endif
+
#ifdef CONFIG_NUMA
/*
* set_policy() op must add a reference to any non-NULL @new mempolicy
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
+#ifndef CONFIG_KERNEL_DESKTOP
+#define VM_MAX_READAHEAD 512 /* kbytes */
+#else
#define VM_MAX_READAHEAD 128 /* kbytes */
+#endif
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,