Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
[linux-flexiantxendom0-3.2.10.git] / arch / arm / mm / flush.c
index 43474d8..7745854 100644 (file)
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
+#include <linux/highmem.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
-#include <asm/system.h>
+#include <asm/highmem.h>
+#include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
 
 #include "mm.h"
 
 #ifdef CONFIG_CPU_CACHE_VIPT
 
-#define ALIAS_FLUSH_START      0xffff4000
-
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 {
-       unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+       unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
        const int zero = 0;
 
-       set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
-       flush_tlb_kernel_page(to);
+       set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 
        asm(    "mcrr   p15, 0, %1, %0, c14\n"
        "       mcr     p15, 0, %2, c7, c10, 4"
            :
            : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
            : "cc");
-       __flush_icache_all();
+}
+
+static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
+{
+       unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+       unsigned long offset = vaddr & (PAGE_SIZE - 1);
+       unsigned long to;
+
+       set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
+       to = va + offset;
+       flush_icache_range(to, to + len);
 }
 
 void flush_cache_mm(struct mm_struct *mm)
@@ -51,7 +60,6 @@ void flush_cache_mm(struct mm_struct *mm)
                    :
                    : "r" (0)
                    : "cc");
-               __flush_icache_all();
        }
 }
 
@@ -68,8 +76,10 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
                    :
                    : "r" (0)
                    : "cc");
-               __flush_icache_all();
        }
+
+       if (vma->vm_flags & VM_EXEC)
+               __flush_icache_all();
 }
 
 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
@@ -79,53 +89,98 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
                return;
        }
 
-       if (cache_is_vipt_aliasing())
+       if (cache_is_vipt_aliasing()) {
                flush_pfn_alias(pfn, user_addr);
+               __flush_icache_all();
+       }
+
+       if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
+               __flush_icache_all();
 }
 
+#else
+#define flush_pfn_alias(pfn,vaddr)             do { } while (0)
+#define flush_icache_alias(pfn,vaddr,len)      do { } while (0)
+#endif
+
+static void flush_ptrace_access_other(void *args)
+{
+       __flush_icache_all();
+}
+
+static
 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                        unsigned long uaddr, void *kaddr,
-                        unsigned long len, int write)
+                        unsigned long uaddr, void *kaddr, unsigned long len)
 {
        if (cache_is_vivt()) {
-               vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+                       unsigned long addr = (unsigned long)kaddr;
+                       __cpuc_coherent_kern_range(addr, addr + len);
+               }
                return;
        }
 
        if (cache_is_vipt_aliasing()) {
                flush_pfn_alias(page_to_pfn(page), uaddr);
+               __flush_icache_all();
                return;
        }
 
-       /* VIPT non-aliasing cache */
-       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
-           vma->vm_flags & VM_EXEC) {
+       /* VIPT non-aliasing D-cache */
+       if (vma->vm_flags & VM_EXEC) {
                unsigned long addr = (unsigned long)kaddr;
-               /* only flushing the kernel mapping on non-aliasing VIPT */
-               __cpuc_coherent_kern_range(addr, addr + len);
+               if (icache_is_vipt_aliasing())
+                       flush_icache_alias(page_to_pfn(page), uaddr, len);
+               else
+                       __cpuc_coherent_kern_range(addr, addr + len);
+               if (cache_ops_need_broadcast())
+                       smp_call_function(flush_ptrace_access_other,
+                                         NULL, 1);
        }
 }
-#else
-#define flush_pfn_alias(pfn,vaddr)     do { } while (0)
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space.  Really, we want to allow our "user
+ * space" model to handle this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long uaddr, void *dst, const void *src,
+                      unsigned long len)
+{
+#ifdef CONFIG_SMP
+       preempt_disable();
+#endif
+       memcpy(dst, src, len);
+       flush_ptrace_access(vma, page, uaddr, dst, len);
+#ifdef CONFIG_SMP
+       preempt_enable();
 #endif
+}
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
-       void *addr = page_address(page);
-
        /*
         * Writeback any data associated with the kernel mapping of this
         * page.  This ensures that data in the physical page is mutually
         * coherent with the kernels mapping.
         */
-#ifdef CONFIG_HIGHMEM
-       /*
-        * kmap_atomic() doesn't set the page virtual address, and
-        * kunmap_atomic() takes care of cache flushing already.
-        */
-       if (addr)
-#endif
-               __cpuc_flush_dcache_page(addr);
+       if (!PageHighMem(page)) {
+               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+       } else {
+               void *addr = kmap_high_get(page);
+               if (addr) {
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_high(page);
+               } else if (cache_is_vipt()) {
+                       /* unmapped pages might still be cached */
+                       addr = kmap_atomic(page);
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_atomic(addr);
+               }
+       }
 
        /*
         * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -169,6 +224,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
        flush_dcache_mmap_unlock(mapping);
 }
 
+#if __LINUX_ARM_ARCH__ >= 6
+void __sync_icache_dcache(pte_t pteval)
+{
+       unsigned long pfn;
+       struct page *page;
+       struct address_space *mapping;
+
+       if (!pte_present_user(pteval))
+               return;
+       if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+               /* only flush non-aliasing VIPT caches for exec mappings */
+               return;
+       pfn = pte_pfn(pteval);
+       if (!pfn_valid(pfn))
+               return;
+
+       page = pfn_to_page(pfn);
+       if (cache_is_vipt_aliasing())
+               mapping = page_mapping(page);
+       else
+               mapping = NULL;
+
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               __flush_dcache_page(mapping, page);
+
+       if (pte_exec(pteval))
+               __flush_icache_all();
+}
+#endif
+
 /*
  * Ensure cache coherency between kernel mapping and userspace mapping
  * of this page.
@@ -185,23 +270,32 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
  *  kernel cache lines for later.  Otherwise, we assume we have
  *  aliasing mappings.
  *
- * Note that we disable the lazy flush for SMP.
+ * Note that we disable the lazy flush for SMP configurations where
+ * the cache maintenance operations are not automatically broadcasted.
  */
 void flush_dcache_page(struct page *page)
 {
-       struct address_space *mapping = page_mapping(page);
+       struct address_space *mapping;
 
-#ifndef CONFIG_SMP
-       if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
-               set_bit(PG_dcache_dirty, &page->flags);
-       else
-#endif
-       {
+       /*
+        * The zero page is never written to, so never has any dirty
+        * cache lines, and therefore never needs to be flushed.
+        */
+       if (page == ZERO_PAGE(0))
+               return;
+
+       mapping = page_mapping(page);
+
+       if (!cache_ops_need_broadcast() &&
+           mapping && !mapping_mapped(mapping))
+               clear_bit(PG_dcache_clean, &page->flags);
+       else {
                __flush_dcache_page(mapping, page);
                if (mapping && cache_is_vivt())
                        __flush_dcache_aliases(mapping, page);
                else if (mapping)
                        __flush_icache_all();
+               set_bit(PG_dcache_clean, &page->flags);
        }
 }
 EXPORT_SYMBOL(flush_dcache_page);
@@ -235,6 +329,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
                 * userspace address only.
                 */
                flush_pfn_alias(pfn, vmaddr);
+               __flush_icache_all();
        }
 
        /*
@@ -242,5 +337,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
         * in this mapping of the page.  FIXME: this is overkill
         * since we actually ask for a write-back and invalidate.
         */
-       __cpuc_flush_dcache_page(page_address(page));
+       __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 }