ARM: 6941/1: cache: ensure MVA is cacheline aligned in flush_kern_dcache_area
authorWill Deacon <will.deacon@arm.com>
Thu, 26 May 2011 10:20:19 +0000 (11:20 +0100)
committerSteve Conklin <sconklin@canonical.com>
Fri, 15 Jul 2011 17:21:10 +0000 (12:21 -0500)
BugLink: http://bugs.launchpad.net/bugs/793702

commit a248b13b21ae00b97638b4f435c8df3075808b5d upstream.

The v6 and v7 implementations of flush_kern_dcache_area do not align
the passed MVA to the size of a cacheline in the data cache. If a
misaligned address is used, only a subset of the requested area will
be flushed. This has been observed to cause failures in SMP boot where
the secondary_data initialised by the primary CPU is not cacheline
aligned, causing the secondary CPUs to read incorrect values for their
pgd and stack pointers.

This patch ensures that the base address is cacheline aligned before
flushing the d-cache.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>

arch/arm/mm/cache-v6.S
arch/arm/mm/cache-v7.S

index c96fa1b..73b4a8b 100644 (file)
@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
  */
 ENTRY(v6_flush_kern_dcache_area)
        add     r1, r0, r1
+       bic     r0, r0, #D_CACHE_LINE_SIZE - 1
 1:
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line
index 6136e68..d9b5cab 100644 (file)
@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
 ENTRY(v7_flush_kern_dcache_area)
        dcache_line_size r2, r3
        add     r1, r0, r1
+       sub     r3, r2, #1
+       bic     r0, r0, r3
 1:
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line / unified line
        add     r0, r0, r2