[RFC] powerpc: Merge 32/64 cacheflush code
David Gibson
david at gibson.dropbear.id.au
Tue Dec 20 12:06:17 EST 2005
On Mon, Dec 19, 2005 at 08:52:47AM -0600, Milton Miller wrote:
> On Mon Dec 19 16:44:10 EST 2005, David Gibson wrote:
>
> >+extern void wback_dcache_range(unsigned long start, unsigned long
> >stop);
> >+extern void wback_inval_dcache_range(unsigned long start, unsigned
> >long stop);
>
> I think that while we are here we should change the arguments to be
> pointers (void *). The assembly doesn't care, and almost all of the
> users are casting from pointer to usigned long at the call site, with
> dart being the exception. The instruction cache flush should also
> change.
True. And while we're at that, the dcache flushing functions are
almost invariable called as *_dcache_range(start, start+length), so
how about changing them to take start and length instead of start and
end. However, flush_icache_range() is called from generic code, so I
don't want to change it's interface.
Revised patch below:
powerpc: Merge 32/64 cacheflush code
This patch merges the cache flushing code for 32 and 64 bit powerpc
machines. This means the ppc64_caches mechanism for determining
correct cache sizes at runtime is ported to 32-bit, and is thus
renamed as 'powerpc_caches'. The merged cache flushing functions go
in new file arch/powerpc/kernel/cache.S.
Previously, the ppc32 version of flush_dcache_range() did a writeback
and invalidate of the given cache lines (dcbf) whereas the ppc64
version did just a writeback (dcbst). In general, there's no
consistent meaning of "flush" as one or the other, so this patch also
renames the dcache flushing functions less ambiguously. The new names
are:
wback_dcache_range() - previously flush_dcache_range() on
ppc64 and clean_dcache_range() on ppc32
wback_inval_dcache_range() - previously
flush_inval_dcache_range() on ppc64 and flush_dcache_range on ppc32
invalidate_dcache_range() - didn't previously exist on ppc64,
unchanged on ppc32
Finally we also cleanup the initialization of the powerpc_caches
structure from the old ppc64 specific version. We remove a pointless
loop, and remove a dependence on _machine.
arch/powerpc/kernel/Makefile | 2
arch/powerpc/kernel/align.c | 2
arch/powerpc/kernel/asm-offsets.c | 12 -
arch/powerpc/kernel/cache.S | 229 +++++++++++++++++++++++++++++++++++++
arch/powerpc/kernel/misc_32.S | 123 -------------------
arch/powerpc/kernel/misc_64.S | 182 -----------------------------
arch/powerpc/kernel/ppc_ksyms.c | 2
arch/powerpc/kernel/setup-common.c | 89 ++++++++++++++
arch/powerpc/kernel/setup_32.c | 33 +----
arch/powerpc/kernel/setup_64.c | 123 +------------------
arch/powerpc/kernel/vdso.c | 8 -
arch/powerpc/sysdev/dart_iommu.c | 3
arch/ppc/8xx_io/cs4218_tdm.c | 8 -
arch/ppc/8xx_io/enet.c | 3
arch/ppc/8xx_io/fec.c | 3
arch/ppc/kernel/dma-mapping.c | 16 --
arch/ppc/kernel/misc.S | 19 +--
arch/ppc/kernel/ppc_ksyms.c | 2
drivers/char/agp/uninorth-agp.c | 23 +--
drivers/macintosh/smu.c | 9 -
drivers/net/fec.c | 3
drivers/serial/mpsc.c | 34 +----
include/asm-powerpc/asm-compat.h | 2
include/asm-powerpc/cache.h | 9 -
include/asm-powerpc/cacheflush.h | 15 --
include/asm-powerpc/page_64.h | 4
include/asm-ppc/io.h | 6
27 files changed, 420 insertions(+), 544 deletions(-)
Index: working-2.6/include/asm-powerpc/cache.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/cache.h 2005-11-23 15:56:35.000000000 +1100
+++ working-2.6/include/asm-powerpc/cache.h 2005-12-20 12:03:10.000000000 +1100
@@ -21,8 +21,8 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
-struct ppc64_caches {
+#ifndef __ASSEMBLY__
+struct powerpc_caches {
u32 dsize; /* L1 d-cache size */
u32 dline_size; /* L1 d-cache line size */
u32 log_dline_size;
@@ -33,8 +33,9 @@ struct ppc64_caches {
u32 ilines_per_page;
};
-extern struct ppc64_caches ppc64_caches;
-#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+extern struct powerpc_caches powerpc_caches;
+extern void initialize_cache_info(void);
+#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
Index: working-2.6/arch/powerpc/kernel/Makefile
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/Makefile 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/Makefile 2005-12-20 12:03:10.000000000 +1100
@@ -13,7 +13,7 @@ endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
- prom_parse.o
+ prom_parse.o cache.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o systbl.o \
Index: working-2.6/arch/powerpc/kernel/align.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/align.c 2005-11-23 15:56:22.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/align.c 2005-12-20 12:03:10.000000000 +1100
@@ -231,7 +231,7 @@ static int emulate_dcbz(struct pt_regs *
int i, size;
#ifdef __powerpc64__
- size = ppc64_caches.dline_size;
+ size = powerpc_caches.dline_size;
#else
size = L1_CACHE_BYTES;
#endif
Index: working-2.6/arch/powerpc/kernel/asm-offsets.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/asm-offsets.c 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/asm-offsets.c 2005-12-20 12:03:10.000000000 +1100
@@ -99,13 +99,13 @@ int main(void)
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#endif /* CONFIG_PPC32 */
+ DEFINE(DCACHEL1LINESIZE, offsetof(struct powerpc_caches, dline_size));
+ DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct powerpc_caches, log_dline_size));
+ DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct powerpc_caches, dlines_per_page));
+ DEFINE(ICACHEL1LINESIZE, offsetof(struct powerpc_caches, iline_size));
+ DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct powerpc_caches, log_iline_size));
+ DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct powerpc_caches, ilines_per_page));
#ifdef CONFIG_PPC64
- DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
- DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
- DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
- DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
- DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
- DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
Index: working-2.6/arch/powerpc/kernel/cache.S
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ working-2.6/arch/powerpc/kernel/cache.S 2005-12-20 12:03:30.000000000 +1100
@@ -0,0 +1,229 @@
+/*
+ * arch/powerpc/kernel/cache.S
+ *
+ * Cache-flushing functions.
+ * Copyright (C) 2005 David Gibson <dwg at au1.ibm.com>
+ * Based on earlier code:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt at linuxppc.org)
+ * Largely rewritten by Cort Dougan (cort at cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc at us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret at us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/reg.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start through stop-1 inclusive
+ */
+_KPROBE(__flush_icache_range)
+BEGIN_FTR_SECTION
+ blr /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ * and in some cases i-cache and d-cache line sizes differ from
+ * each other.
+ */
+ LOAD_REG_ADDR(r10, powerpc_caches)
+ lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get dcache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+1: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 1b
+ sync
+
+/* Now invalidate the instruction cache */
+ lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5
+ lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get icache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+2: icbi 0,r6
+ add r6,r6,r7
+ bdnz 2b
+ sync /* additional sync needed on g4 */
+ isync
+ blr
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+BEGIN_FTR_SECTION
+ blr /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ /* Flush the dcache */
+ LOAD_REG_ADDR(r7, powerpc_caches)
+ PPC_CLRRLI r3,r3,PAGE_SHIFT /* Page align */
+ lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get dcache lines per page */
+ lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
+ mr r6,r3
+ mtctr r4
+0: dcbst 0,r6
+ add r6,r6,r5
+ bdnz 0b
+ sync
+
+ /* Now invalidate the icache */
+ lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get icache lines per page */
+ lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
+ mtctr r4
+1: icbi 0,r3
+ add r3,r3,r5
+ bdnz 1b
+ sync /* additional sync needed on g4 */
+ isync
+ blr
+
+/*
+ * Like above, but only do the D-cache.
+ *
+ * wback_dcache_range(void *start, unsigned long len)
+ *
+ * writeback all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(wback_dcache_range)
+ LOAD_REG_ADDR(r10, powerpc_caches)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ and r8,r3,r5 /* get cacheline offset of start */
+ add r8,r8,r4 /* add length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get dcache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+0: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ blr
+
+/*
+ * wback_inval_dcache_range(void *start, unsigned long len)
+ *
+ * writeback and invalidate all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(wback_inval_dcache_range)
+ LOAD_REG_ADDR(r10, powerpc_caches)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ and r8,r3,r5 /* get cacheline offset of start */
+ add r8,r8,r4 /* add length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get dcache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ sync /* FIXME: is this necessary? */
+ isync /* FIXME: is this necessary? */
+ mtctr r8
+0: dcbf 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ isync /* FIXME: is this necessary? */
+ blr
+
+/*
+ * Like above, but invalidate the D-cache. This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ *
+ * invalidate_dcache_range(void *start, unsigned long stop)
+ */
+_GLOBAL(invalidate_dcache_range)
+ LOAD_REG_ADDR(r10, powerpc_caches)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ /* FIXME: should BUG() on non-aligned parameters instead */
+ and r8,r3,r5 /* get cacheline offset of start */
+ add r8,r8,r4 /* add length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get dcache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+0: dcbi 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ blr
+
+#ifdef CONFIG_U3_DART
+/*
+ * Like above, but works on non-mapped physical addresses.
+ * Use only for non-LPAR setups ! It also assumes real mode
+ * is cacheable. Used for flushing out the DART before using
+ * it as uncacheable memory
+ *
+ * wback_dcache_phys_range(unsigned long start, unsigned long len)
+ *
+ * writeback all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(wback_dcache_phys_range)
+ LOAD_REG_ADDR(r10, powerpc_caches)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ and r8,r3,r5 /* get cacheline offset of start */
+ add r8,r8,r4 /* add length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get dcache line shift */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mfmsr r5 /* Disable MMU Data Relocation */
+ ori r0,r5,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ mtctr r8
+0: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ isync
+ mtmsr r5 /* Re-enable MMU Data Relocation */
+ sync
+ isync
+ blr
+#endif /* CONFIG_U3_DART */
\ No newline at end of file
Index: working-2.6/arch/powerpc/kernel/misc_64.S
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/misc_64.S 2005-12-20 12:03:09.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/misc_64.S 2005-12-20 12:03:10.000000000 +1100
@@ -142,188 +142,6 @@ _GLOBAL(call_with_mmu_off)
mtspr SPRN_SRR1,r0
rfid
-
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
-
-/*
- * Write any modified data cache blocks out to memory
- * and invalidate the corresponding instruction cache blocks.
- *
- * flush_icache_range(unsigned long start, unsigned long stop)
- *
- * flush all bytes from start through stop-1 inclusive
- */
-
-_KPROBE(__flush_icache_range)
-
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- * and in some cases i-cache and d-cache line sizes differ from
- * each other.
- */
- ld r10,PPC64_CACHES at toc(r2)
- lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-1: dcbst 0,r6
- add r6,r6,r7
- bdnz 1b
- sync
-
-/* Now invalidate the instruction cache */
-
- lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5
- lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-2: icbi 0,r6
- add r6,r6,r7
- bdnz 2b
- isync
- blr
- .previous .text
-/*
- * Like above, but only do the D-cache.
- *
- * flush_dcache_range(unsigned long start, unsigned long stop)
- *
- * flush all bytes from start to stop-1 inclusive
- */
-_GLOBAL(flush_dcache_range)
-
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- */
- ld r10,PPC64_CACHES at toc(r2)
- lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-0: dcbst 0,r6
- add r6,r6,r7
- bdnz 0b
- sync
- blr
-
-/*
- * Like above, but works on non-mapped physical addresses.
- * Use only for non-LPAR setups ! It also assumes real mode
- * is cacheable. Used for flushing out the DART before using
- * it as uncacheable memory
- *
- * flush_dcache_phys_range(unsigned long start, unsigned long stop)
- *
- * flush all bytes from start to stop-1 inclusive
- */
-_GLOBAL(flush_dcache_phys_range)
- ld r10,PPC64_CACHES at toc(r2)
- lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mfmsr r5 /* Disable MMU Data Relocation */
- ori r0,r5,MSR_DR
- xori r0,r0,MSR_DR
- sync
- mtmsr r0
- sync
- isync
- mtctr r8
-0: dcbst 0,r6
- add r6,r6,r7
- bdnz 0b
- sync
- isync
- mtmsr r5 /* Re-enable MMU Data Relocation */
- sync
- isync
- blr
-
-_GLOBAL(flush_inval_dcache_range)
- ld r10,PPC64_CACHES at toc(r2)
- lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- sync
- isync
- mtctr r8
-0: dcbf 0,r6
- add r6,r6,r7
- bdnz 0b
- sync
- isync
- blr
-
-
-/*
- * Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- *
- * void __flush_dcache_icache(void *page)
- */
-_GLOBAL(__flush_dcache_icache)
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- */
-
-/* Flush the dcache */
- ld r7,PPC64_CACHES at toc(r2)
- clrrdi r3,r3,PAGE_SHIFT /* Page align */
- lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
- lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
- mr r6,r3
- mtctr r4
-0: dcbst 0,r6
- add r6,r6,r5
- bdnz 0b
- sync
-
-/* Now invalidate the icache */
-
- lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
- lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
- mtctr r4
-1: icbi 0,r3
- add r3,r3,r5
- bdnz 1b
- isync
- blr
-
/*
* I/O string operations
*
Index: working-2.6/arch/powerpc/kernel/setup_64.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/setup_64.c 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/setup_64.c 2005-12-20 12:03:10.000000000 +1100
@@ -103,25 +103,6 @@ int boot_cpuid_phys = 0;
dev_t boot_dev;
u64 ppc64_pft_size;
-/* Pick defaults since we might want to patch instructions
- * before we've read this from the device tree.
- */
-struct ppc64_caches ppc64_caches = {
- .dline_size = 0x80,
- .log_dline_size = 7,
- .iline_size = 0x80,
- .log_iline_size = 7
-};
-EXPORT_SYMBOL_GPL(ppc64_caches);
-
-/*
- * These are used in binfmt_elf.c to put aux entries on the stack
- * for each elf executable being started.
- */
-int dcache_bsize;
-int icache_bsize;
-int ucache_bsize;
-
/* The main machine-dep calls structure
*/
struct machdep_calls ppc_md;
@@ -345,81 +326,6 @@ void smp_release_cpus(void)
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
- * Initialize some remaining members of the ppc64_caches and systemcfg
- * structures
- * (at least until we get rid of them completely). This is mostly some
- * cache informations about the CPU that will be used by cache flush
- * routines and/or provided to userland
- */
-static void __init initialize_cache_info(void)
-{
- struct device_node *np;
- unsigned long num_cpus = 0;
-
- DBG(" -> initialize_cache_info()\n");
-
- for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
- num_cpus += 1;
-
- /* We're assuming *all* of the CPUs have the same
- * d-cache and i-cache sizes... -Peter
- */
-
- if ( num_cpus == 1 ) {
- u32 *sizep, *lsizep;
- u32 size, lsize;
- const char *dc, *ic;
-
- /* Then read cache informations */
- if (_machine == PLATFORM_POWERMAC) {
- dc = "d-cache-block-size";
- ic = "i-cache-block-size";
- } else {
- dc = "d-cache-line-size";
- ic = "i-cache-line-size";
- }
-
- size = 0;
- lsize = cur_cpu_spec->dcache_bsize;
- sizep = (u32 *)get_property(np, "d-cache-size", NULL);
- if (sizep != NULL)
- size = *sizep;
- lsizep = (u32 *) get_property(np, dc, NULL);
- if (lsizep != NULL)
- lsize = *lsizep;
- if (sizep == 0 || lsizep == 0)
- DBG("Argh, can't find dcache properties ! "
- "sizep: %p, lsizep: %p\n", sizep, lsizep);
-
- ppc64_caches.dsize = size;
- ppc64_caches.dline_size = lsize;
- ppc64_caches.log_dline_size = __ilog2(lsize);
- ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
-
- size = 0;
- lsize = cur_cpu_spec->icache_bsize;
- sizep = (u32 *)get_property(np, "i-cache-size", NULL);
- if (sizep != NULL)
- size = *sizep;
- lsizep = (u32 *)get_property(np, ic, NULL);
- if (lsizep != NULL)
- lsize = *lsizep;
- if (sizep == 0 || lsizep == 0)
- DBG("Argh, can't find icache properties ! "
- "sizep: %p, lsizep: %p\n", sizep, lsizep);
-
- ppc64_caches.isize = size;
- ppc64_caches.iline_size = lsize;
- ppc64_caches.log_iline_size = __ilog2(lsize);
- ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
- }
- }
-
- DBG(" <- initialize_cache_info()\n");
-}
-
-
-/*
* Do some initial setup of the system. The parameters are those which
* were passed in from the bootloader.
*/
@@ -437,14 +343,13 @@ void __init setup_system(void)
#endif
/*
- * Fill the ppc64_caches & systemcfg structures with informations
- * retreived from the device-tree. Need to be called before
+ * Fill the powerpc_caches structure with information
+ * retreived from the device-tree. Needs to be called before
* finish_device_tree() since the later requires some of the
- * informations filled up here to properly parse the interrupt
- * tree.
- * It also sets up the cache line sizes which allows to call
- * routines like flush_icache_range (used by the hash init
- * later on).
+ * information filled up here to properly parse the interrupt
+ * tree. It also sets up the cache line sizes which allows to
+ * call routines like flush_icache_range (used by the hash
+ * init later on).
*/
initialize_cache_info();
@@ -514,10 +419,10 @@ void __init setup_system(void)
ppc64_interrupt_controller);
printk("platform = 0x%x\n", _machine);
printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
- printk("ppc64_caches.dcache_line_size = 0x%x\n",
- ppc64_caches.dline_size);
- printk("ppc64_caches.icache_line_size = 0x%x\n",
- ppc64_caches.iline_size);
+ printk("powerpc_caches.dcache_line_size = 0x%x\n",
+ powerpc_caches.dline_size);
+ printk("powerpc_caches.icache_line_size = 0x%x\n",
+ powerpc_caches.iline_size);
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#if PHYSICAL_START > 0
@@ -597,14 +502,6 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = cmd_line;
- /*
- * Set cache line size based on type of cpu as a default.
- * Systems with OF can look in the properties on the cpu node(s)
- * for a possibly more accurate value.
- */
- dcache_bsize = ppc64_caches.dline_size;
- icache_bsize = ppc64_caches.iline_size;
-
/* reboot on panic */
panic_timeout = 180;
Index: working-2.6/arch/powerpc/kernel/vdso.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/vdso.c 2005-11-29 16:23:57.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/vdso.c 2005-12-20 12:03:10.000000000 +1100
@@ -671,10 +671,10 @@ void __init vdso_init(void)
vdso_data->processor = mfspr(SPRN_PVR);
vdso_data->platform = _machine;
vdso_data->physicalMemorySize = lmb_phys_mem_size();
- vdso_data->dcache_size = ppc64_caches.dsize;
- vdso_data->dcache_line_size = ppc64_caches.dline_size;
- vdso_data->icache_size = ppc64_caches.isize;
- vdso_data->icache_line_size = ppc64_caches.iline_size;
+ vdso_data->dcache_size = powerpc_caches.dsize;
+ vdso_data->dcache_line_size = powerpc_caches.dline_size;
+ vdso_data->icache_size = powerpc_caches.isize;
+ vdso_data->icache_line_size = powerpc_caches.iline_size;
/*
* Calculate the size of the 64 bits vDSO
Index: working-2.6/include/asm-powerpc/page_64.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/page_64.h 2005-12-19 14:18:25.000000000 +1100
+++ working-2.6/include/asm-powerpc/page_64.h 2005-12-20 12:03:10.000000000 +1100
@@ -40,8 +40,8 @@ static __inline__ void clear_page(void *
{
unsigned long lines, line_size;
- line_size = ppc64_caches.dline_size;
- lines = ppc64_caches.dlines_per_page;
+ line_size = powerpc_caches.dline_size;
+ lines = powerpc_caches.dlines_per_page;
__asm__ __volatile__(
"mtctr %1 # clear_page\n\
Index: working-2.6/arch/powerpc/kernel/setup-common.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/setup-common.c 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/setup-common.c 2005-12-20 12:03:10.000000000 +1100
@@ -466,3 +466,92 @@ static int __init early_xmon(char *p)
}
early_param("xmon", early_xmon);
#endif
+
+/* Pick defaults since we might want to patch instructions
+ * before we've read this from the device tree.
+ */
+struct powerpc_caches powerpc_caches = {
+ .dline_size = L1_CACHE_BYTES,
+ .log_dline_size = L1_CACHE_SHIFT,
+ .iline_size = L1_CACHE_BYTES,
+ .log_iline_size = L1_CACHE_SHIFT,
+};
+EXPORT_SYMBOL_GPL(powerpc_caches);
+
+/*
+ * These are used in binfmt_elf.c to put aux entries on the stack
+ * for each elf executable being started.
+ */
+int dcache_bsize;
+int icache_bsize;
+int ucache_bsize;
+
+/*
+ * Initialize the powerpc_caches structure. This is some cache
+ * informations about the CPU that will be used by cache flush
+ * routines and/or provided to userland
+ */
+void __init initialize_cache_info(void)
+{
+ struct device_node *np;
+ u32 *sizep, *lsizep;
+ u32 size, lsize;
+
+ DBG(" -> initialize_cache_info()\n");
+
+ /* We're assuming *all* of the CPUs have the same d-cache and
+ * i-cache sizes... -Peter
+ */
+ np = of_find_node_by_type(NULL, "cpu");
+ BUG_ON(!np);
+
+ size = 0;
+ lsize = cur_cpu_spec->dcache_bsize;
+ sizep = (u32 *)get_property(np, "d-cache-size", NULL);
+ if (sizep)
+ size = *sizep;
+ lsizep = (u32 *) get_property(np, "d-cache-line-size", NULL);
+ if (! lsizep)
+ lsizep = (u32 *) get_property(np, "d-cache-block-size", NULL);
+ if (lsizep)
+ lsize = *lsizep;
+ if (!sizep || !lsizep)
+ DBG("Argh, can't find dcache properties! "
+ "sizep: %p, lsizep: %p\n", sizep, lsizep);
+
+ powerpc_caches.dsize = size;
+ powerpc_caches.dline_size = lsize;
+ powerpc_caches.log_dline_size = __ilog2(lsize);
+ powerpc_caches.dlines_per_page = PAGE_SIZE / lsize;
+
+ size = 0;
+ lsize = cur_cpu_spec->icache_bsize;
+ sizep = (u32 *)get_property(np, "i-cache-size", NULL);
+ if (sizep)
+ size = *sizep;
+ lsizep = (u32 *)get_property(np, "i-cache-line-size", NULL);
+ if (! lsizep)
+ lsizep = (u32 *) get_property(np, "i-cache-block-size", NULL);
+ if (lsizep)
+ lsize = *lsizep;
+ if (!sizep || !lsizep)
+ DBG("Argh, can't find icache properties ! "
+ "sizep: %p, lsizep: %p\n", sizep, lsizep);
+
+ powerpc_caches.isize = size;
+ powerpc_caches.iline_size = lsize;
+ powerpc_caches.log_iline_size = __ilog2(lsize);
+ powerpc_caches.ilines_per_page = PAGE_SIZE / lsize;
+
+ /*
+ * Set cache line size based on type of cpu as a default.
+ * Systems with OF can look in the properties on the cpu node(s)
+ * for a possibly more accurate value.
+ */
+ dcache_bsize = powerpc_caches.dline_size;
+ icache_bsize = powerpc_caches.iline_size;
+ if (! cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE))
+ ucache_bsize = dcache_bsize;
+
+ DBG(" <- initialize_cache_info()\n");
+}
Index: working-2.6/arch/powerpc/kernel/setup_32.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/setup_32.c 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/setup_32.c 2005-12-20 12:03:10.000000000 +1100
@@ -89,14 +89,6 @@ struct machdep_calls ppc_md;
EXPORT_SYMBOL(ppc_md);
/*
- * These are used in binfmt_elf.c to put aux entries on the stack
- * for each elf executable being started.
- */
-int dcache_bsize;
-int icache_bsize;
-int ucache_bsize;
-
-/*
* We're called here very early in the boot. We determine the machine
* type and call the appropriate low-level setup functions.
* -- Cort <cort at fsmlabs.com>
@@ -294,6 +286,18 @@ void __init setup_arch(char **cmdline_p)
loops_per_jiffy = 500000000 / HZ;
unflatten_device_tree();
+
+ /*
+ * Fill the powerpc_caches structure with information
+ * retreived from the device-tree. Needs to be called before
+ * finish_device_tree() since the later requires some of the
+ * information filled up here to properly parse the interrupt
+ * tree. It also sets up the cache line sizes which allows to
+ * call routines like flush_icache_range (used by the hash
+ * init later on).
+ */
+ initialize_cache_info();
+
check_for_initrd();
if (ppc_md.init_early)
@@ -324,19 +328,6 @@ void __init setup_arch(char **cmdline_p)
}
#endif
- /*
- * Set cache line size based on type of cpu as a default.
- * Systems with OF can look in the properties on the cpu node(s)
- * for a possibly more accurate value.
- */
- if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
- dcache_bsize = cur_cpu_spec->dcache_bsize;
- icache_bsize = cur_cpu_spec->icache_bsize;
- ucache_bsize = 0;
- } else
- ucache_bsize = dcache_bsize = icache_bsize
- = cur_cpu_spec->dcache_bsize;
-
/* reboot on panic */
panic_timeout = 180;
Index: working-2.6/arch/powerpc/kernel/misc_32.S
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/misc_32.S 2005-12-20 12:03:09.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/misc_32.S 2005-12-20 12:03:10.000000000 +1100
@@ -511,129 +511,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_C
blr
/*
- * Write any modified data cache blocks out to memory
- * and invalidate the corresponding instruction cache blocks.
- * This is a no-op on the 601.
- *
- * flush_icache_range(unsigned long start, unsigned long stop)
- */
-_GLOBAL(__flush_icache_range)
-BEGIN_FTR_SECTION
- blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
- li r5,L1_CACHE_BYTES-1
- andc r3,r3,r5
- subf r4,r3,r4
- add r4,r4,r5
- srwi. r4,r4,L1_CACHE_SHIFT
- beqlr
- mtctr r4
- mr r6,r3
-1: dcbst 0,r3
- addi r3,r3,L1_CACHE_BYTES
- bdnz 1b
- sync /* wait for dcbst's to get to ram */
- mtctr r4
-2: icbi 0,r6
- addi r6,r6,L1_CACHE_BYTES
- bdnz 2b
- sync /* additional sync needed on g4 */
- isync
- blr
-/*
- * Write any modified data cache blocks out to memory.
- * Does not invalidate the corresponding cache lines (especially for
- * any corresponding instruction cache).
- *
- * clean_dcache_range(unsigned long start, unsigned long stop)
- */
-_GLOBAL(clean_dcache_range)
- li r5,L1_CACHE_BYTES-1
- andc r3,r3,r5
- subf r4,r3,r4
- add r4,r4,r5
- srwi. r4,r4,L1_CACHE_SHIFT
- beqlr
- mtctr r4
-
-1: dcbst 0,r3
- addi r3,r3,L1_CACHE_BYTES
- bdnz 1b
- sync /* wait for dcbst's to get to ram */
- blr
-
-/*
- * Write any modified data cache blocks out to memory and invalidate them.
- * Does not invalidate the corresponding instruction cache blocks.
- *
- * flush_dcache_range(unsigned long start, unsigned long stop)
- */
-_GLOBAL(flush_dcache_range)
- li r5,L1_CACHE_BYTES-1
- andc r3,r3,r5
- subf r4,r3,r4
- add r4,r4,r5
- srwi. r4,r4,L1_CACHE_SHIFT
- beqlr
- mtctr r4
-
-1: dcbf 0,r3
- addi r3,r3,L1_CACHE_BYTES
- bdnz 1b
- sync /* wait for dcbst's to get to ram */
- blr
-
-/*
- * Like above, but invalidate the D-cache. This is used by the 8xx
- * to invalidate the cache so the PPC core doesn't get stale data
- * from the CPM (no cache snooping here :-).
- *
- * invalidate_dcache_range(unsigned long start, unsigned long stop)
- */
-_GLOBAL(invalidate_dcache_range)
- li r5,L1_CACHE_BYTES-1
- andc r3,r3,r5
- subf r4,r3,r4
- add r4,r4,r5
- srwi. r4,r4,L1_CACHE_SHIFT
- beqlr
- mtctr r4
-
-1: dcbi 0,r3
- addi r3,r3,L1_CACHE_BYTES
- bdnz 1b
- sync /* wait for dcbi's to get to ram */
- blr
-
-/*
- * Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- * This is a no-op on the 601 which has a unified cache.
- *
- * void __flush_dcache_icache(void *page)
- */
-_GLOBAL(__flush_dcache_icache)
-BEGIN_FTR_SECTION
- blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
- mtctr r4
- mr r6,r3
-0: dcbst 0,r3 /* Write line to ram */
- addi r3,r3,L1_CACHE_BYTES
- bdnz 0b
- sync
- mtctr r4
-1: icbi 0,r6
- addi r6,r6,L1_CACHE_BYTES
- bdnz 1b
- sync
- isync
- blr
-
-/*
* Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use
* the physical address (this may be a highmem page without a kernel
Index: working-2.6/arch/powerpc/kernel/ppc_ksyms.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/ppc_ksyms.c 2005-12-19 14:18:24.000000000 +1100
+++ working-2.6/arch/powerpc/kernel/ppc_ksyms.c 2005-12-20 12:03:10.000000000 +1100
@@ -165,7 +165,7 @@ EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(_tlbie);
#endif
EXPORT_SYMBOL(__flush_icache_range);
-EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(wback_dcache_range);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_call_function);
Index: working-2.6/arch/ppc/kernel/dma-mapping.c
===================================================================
--- working-2.6.orig/arch/ppc/kernel/dma-mapping.c 2005-11-23 15:56:22.000000000 +1100
+++ working-2.6/arch/ppc/kernel/dma-mapping.c 2005-12-20 12:03:30.000000000 +1100
@@ -207,11 +207,8 @@ __dma_alloc_coherent(size_t size, dma_ad
* Invalidate any data that might be lurking in the
* kernel direct-mapped region for device DMA.
*/
- {
- unsigned long kaddr = (unsigned long)page_address(page);
- memset(page_address(page), 0, size);
- flush_dcache_range(kaddr, kaddr + size);
- }
+ memset(page_address(page), 0, size);
+ wback_inval_dcache_range(page_address(page), size);
/*
* Allocate a virtual address in the consistent mapping region.
@@ -365,20 +362,17 @@ core_initcall(dma_alloc_init);
*/
void __dma_sync(void *vaddr, size_t size, int direction)
{
- unsigned long start = (unsigned long)vaddr;
- unsigned long end = start + size;
-
switch (direction) {
case DMA_NONE:
BUG();
case DMA_FROM_DEVICE: /* invalidate only */
- invalidate_dcache_range(start, end);
+ invalidate_dcache_range(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
- clean_dcache_range(start, end);
+ wback_dcache_range(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- flush_dcache_range(start, end);
+ wback_inval_dcache_range(vaddr, size);
break;
}
}
Index: working-2.6/arch/ppc/kernel/misc.S
===================================================================
--- working-2.6.orig/arch/ppc/kernel/misc.S 2005-12-19 14:18:25.000000000 +1100
+++ working-2.6/arch/ppc/kernel/misc.S 2005-12-20 12:03:30.000000000 +1100
@@ -527,12 +527,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_C
* Does not invalidate the corresponding cache lines (especially for
* any corresponding instruction cache).
*
- * clean_dcache_range(unsigned long start, unsigned long stop)
+ * wback_dcache_range(void *start, unsigned long len)
*/
-_GLOBAL(clean_dcache_range)
+_GLOBAL(wback_dcache_range)
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
- subf r4,r3,r4
+ and r6,r3,r5
+ add r4,r4,r6
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
@@ -548,12 +549,13 @@ _GLOBAL(clean_dcache_range)
* Write any modified data cache blocks out to memory and invalidate them.
* Does not invalidate the corresponding instruction cache blocks.
*
- * flush_dcache_range(unsigned long start, unsigned long stop)
+ * wback_inval_dcache_range(void *start, unsigned long len)
*/
-_GLOBAL(flush_dcache_range)
+_GLOBAL(wback_inval_dcache_range)
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
- subf r4,r3,r4
+ and r6,r3,r5
+ add r4,r4,r6
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
@@ -570,12 +572,13 @@ _GLOBAL(flush_dcache_range)
* to invalidate the cache so the PPC core doesn't get stale data
* from the CPM (no cache snooping here :-).
*
- * invalidate_dcache_range(unsigned long start, unsigned long stop)
+ * invalidate_dcache_range(void *start, unsigned long len)
*/
_GLOBAL(invalidate_dcache_range)
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
- subf r4,r3,r4
+ and r6,r3,r5
+ add r4,r4,r6
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
Index: working-2.6/include/asm-powerpc/cacheflush.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/cacheflush.h 2005-11-23 15:56:35.000000000 +1100
+++ working-2.6/include/asm-powerpc/cacheflush.h 2005-12-20 12:03:30.000000000 +1100
@@ -44,15 +44,12 @@ extern void flush_dcache_icache_page(str
extern void __flush_dcache_icache_phys(unsigned long physaddr);
#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-#ifdef CONFIG_PPC32
-extern void clean_dcache_range(unsigned long start, unsigned long stop);
-extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
-#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
-#endif
+extern void wback_dcache_range(void *start, unsigned long len);
+extern void wback_inval_dcache_range(void *start, unsigned long len);
+extern void invalidate_dcache_range(void *start, unsigned long len);
+#ifdef CONFIG_U3_DART
+extern void wback_dcache_phys_range(unsigned long start, unsigned long len);
+#endif /* CONFIG_U3_DART */
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
Index: working-2.6/include/asm-ppc/io.h
===================================================================
--- working-2.6.orig/include/asm-ppc/io.h 2005-11-23 15:56:36.000000000 +1100
+++ working-2.6/include/asm-ppc/io.h 2005-12-20 12:03:30.000000000 +1100
@@ -548,11 +548,11 @@ extern void pci_iounmap(struct pci_dev *
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_cache_inv(_start,_size) \
- invalidate_dcache_range(_start, (_start + _size))
+ invalidate_dcache_range((void *)_start, _size)
#define dma_cache_wback(_start,_size) \
- clean_dcache_range(_start, (_start + _size))
+ wback_dcache_range((void *)_start, _size)
#define dma_cache_wback_inv(_start,_size) \
- flush_dcache_range(_start, (_start + _size))
+ wback_inval_dcache_range((void *)_start, _size)
#else
Index: working-2.6/drivers/macintosh/smu.c
===================================================================
--- working-2.6.orig/drivers/macintosh/smu.c 2005-12-19 14:18:25.000000000 +1100
+++ working-2.6/drivers/macintosh/smu.c 2005-12-20 12:03:30.000000000 +1100
@@ -100,7 +100,6 @@ static DECLARE_MUTEX(smu_part_access);
static void smu_start_cmd(void)
{
- unsigned long faddr, fend;
struct smu_cmd *cmd;
if (list_empty(&smu->cmd_list))
@@ -125,9 +124,7 @@ static void smu_start_cmd(void)
memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
/* Flush command and data to RAM */
- faddr = (unsigned long)smu->cmd_buf;
- fend = faddr + smu->cmd_buf->length + 2;
- flush_inval_dcache_range(faddr, fend);
+ wback_inval_dcache_range(smu->cmd_buf, smu->cmd_buf->length + 2);
/* This isn't exactly a DMA mapping here, I suspect
* the SMU is actually communicating with us via i2c to the
@@ -166,7 +163,6 @@ static irqreturn_t smu_db_intr(int irq,
goto bail;
if (rc == 0) {
- unsigned long faddr;
int reply_len;
u8 ack;
@@ -175,8 +171,7 @@ static irqreturn_t smu_db_intr(int irq,
* flush the entire buffer for now as we haven't read the
* reply lenght (it's only 2 cache lines anyway)
*/
- faddr = (unsigned long)smu->cmd_buf;
- flush_inval_dcache_range(faddr, faddr + 256);
+ wback_inval_dcache_range(smu->cmd_buf, 256);
/* Now check ack */
ack = (~cmd->cmd) & 0xff;
Index: working-2.6/arch/ppc/kernel/ppc_ksyms.c
===================================================================
--- working-2.6.orig/arch/ppc/kernel/ppc_ksyms.c 2005-12-19 14:18:25.000000000 +1100
+++ working-2.6/arch/ppc/kernel/ppc_ksyms.c 2005-12-20 12:03:10.000000000 +1100
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(flush_instruction_cache);
EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(__flush_icache_range);
-EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(wback_inval_dcache_range);
EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page);
EXPORT_SYMBOL(flush_tlb_kernel_range);
Index: working-2.6/include/asm-powerpc/asm-compat.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/asm-compat.h 2005-11-23 15:56:35.000000000 +1100
+++ working-2.6/include/asm-powerpc/asm-compat.h 2005-12-20 12:03:10.000000000 +1100
@@ -26,6 +26,7 @@
#define PPC_LLARX stringify_in_c(ldarx)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
+#define PPC_CLRRLI stringify_in_c(clrrdi)
#else /* 32-bit */
@@ -38,6 +39,7 @@
#define PPC_LLARX stringify_in_c(lwarx)
#define PPC_STLCX stringify_in_c(stwcx.)
#define PPC_CNTLZL stringify_in_c(cntlzw)
+#define PPC_CLRRLI stringify_in_c(clrrwi)
#endif
Index: working-2.6/arch/powerpc/sysdev/dart_iommu.c
===================================================================
--- working-2.6.orig/arch/powerpc/sysdev/dart_iommu.c 2005-12-19 14:18:25.000000000 +1100
+++ working-2.6/arch/powerpc/sysdev/dart_iommu.c 2005-12-20 12:03:30.000000000 +1100
@@ -187,8 +187,7 @@ static int dart_init(struct device_node
* from a previous mapping that existed before the kernel took
* over
*/
- flush_dcache_phys_range(dart_tablebase,
- dart_tablebase + dart_tablesize);
+ wback_dcache_phys_range(dart_tablebase, dart_tablesize);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
Index: working-2.6/arch/ppc/8xx_io/enet.c
===================================================================
--- working-2.6.orig/arch/ppc/8xx_io/enet.c 2005-10-25 11:59:53.000000000 +1000
+++ working-2.6/arch/ppc/8xx_io/enet.c 2005-12-20 12:03:30.000000000 +1100
@@ -239,8 +239,7 @@ scc_enet_start_xmit(struct sk_buff *skb,
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- flush_dcache_range((unsigned long)(skb->data),
- (unsigned long)(skb->data + skb->len));
+ wback_inval_dcache_range(skb->data, skb->len);
spin_lock_irq(&cep->lock);
Index: working-2.6/arch/ppc/8xx_io/fec.c
===================================================================
--- working-2.6.orig/arch/ppc/8xx_io/fec.c 2005-10-25 11:59:53.000000000 +1000
+++ working-2.6/arch/ppc/8xx_io/fec.c 2005-12-20 12:03:30.000000000 +1100
@@ -387,8 +387,7 @@ fec_enet_start_xmit(struct sk_buff *skb,
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- flush_dcache_range((unsigned long)skb->data,
- (unsigned long)skb->data + skb->len);
+ wback_inval_dcache_range(skb->data, skb->len);
/* disable interrupts while triggering transmit */
spin_lock_irq(&fep->lock);
Index: working-2.6/drivers/char/agp/uninorth-agp.c
===================================================================
--- working-2.6.orig/drivers/char/agp/uninorth-agp.c 2005-11-23 15:56:23.000000000 +1100
+++ working-2.6/drivers/char/agp/uninorth-agp.c 2005-12-20 12:03:30.000000000 +1100
@@ -157,13 +157,12 @@ static int uninorth_insert_memory(struct
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
agp_bridge->gatt_table[j] =
cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL);
- flush_dcache_range((unsigned long)__va(mem->memory[i]),
- (unsigned long)__va(mem->memory[i])+0x1000);
+ wback_dcache_range(__va(mem->memory[i]), 0x1000);
}
(void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
mb();
- flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start],
- (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]);
+ wback_dcache_range(&agp_bridge->gatt_table[pg_start],
+ mem->page_count*sizeof(*(agp_bridge->gatt_table)));
uninorth_tlbflush(mem);
return 0;
@@ -195,11 +194,10 @@ static int u3_insert_memory(struct agp_m
for (i = 0; i < mem->page_count; i++) {
gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL;
- flush_dcache_range((unsigned long)__va(mem->memory[i]),
- (unsigned long)__va(mem->memory[i])+0x1000);
+ wback_dcache_range(__va(mem->memory[i]), 0x1000);
}
mb();
- flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
+ wback_dcache_range(gp, i*sizeof(*gp));
uninorth_tlbflush(mem);
return 0;
@@ -218,7 +216,7 @@ int u3_remove_memory(struct agp_memory *
for (i = 0; i < mem->page_count; ++i)
gp[i] = 0;
mb();
- flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
+ wback_dcache_range(gp, i*sizeof(*gp));
uninorth_tlbflush(mem);
return 0;
@@ -365,7 +363,7 @@ static int agp_uninorth_resume(struct pc
static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
{
char *table;
- char *table_end;
+ unsigned long table_size;
int size;
int page_order;
int num_entries;
@@ -400,9 +398,10 @@ static int uninorth_create_gatt_table(st
if (table == NULL)
return -ENOMEM;
- table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+ table_size= (PAGE_SIZE * (1 << page_order)) - 1;
- for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ for (page = virt_to_page(table);
+ page <= virt_to_page(table + table_size); page++)
SetPageReserved(page);
bridge->gatt_table_real = (u32 *) table;
@@ -412,7 +411,7 @@ static int uninorth_create_gatt_table(st
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
- flush_dcache_range((unsigned long)table, (unsigned long)table_end);
+ wback_dcache_range(table, table_size);
return 0;
}
Index: working-2.6/drivers/net/fec.c
===================================================================
--- working-2.6.orig/drivers/net/fec.c 2005-11-23 15:56:25.000000000 +1100
+++ working-2.6/drivers/net/fec.c 2005-12-20 12:03:30.000000000 +1100
@@ -361,8 +361,7 @@ fec_enet_start_xmit(struct sk_buff *skb,
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- flush_dcache_range((unsigned long)skb->data,
- (unsigned long)skb->data + skb->len);
+ wback_inval_dcache_range(skb->data, skb->len);
spin_lock_irq(&fep->lock);
Index: working-2.6/arch/ppc/8xx_io/cs4218_tdm.c
===================================================================
--- working-2.6.orig/arch/ppc/8xx_io/cs4218_tdm.c 2005-11-23 15:56:22.000000000 +1100
+++ working-2.6/arch/ppc/8xx_io/cs4218_tdm.c 2005-12-20 12:03:30.000000000 +1100
@@ -1235,8 +1235,7 @@ static void CS_Play(void)
bdp = &tx_base[i];
bdp->cbd_datlen = count;
- flush_dcache_range((ulong)sound_buffers[i],
- (ulong)(sound_buffers[i] + count));
+ wback_inval_dcache_range(sound_buffers[i], count);
if (++i >= sq.max_count)
i = 0;
@@ -1334,9 +1333,8 @@ cs4218_tdm_rx_intr(void *devid)
/* Invalidate the data cache range for this buffer.
*/
- invalidate_dcache_range(
- (uint)(sound_read_buffers[read_sq.rear]),
- (uint)(sound_read_buffers[read_sq.rear] + read_sq.block_size));
+ invalidate_dcache_range(sound_read_buffers[read_sq.rear],
+ read_sq.block_size);
/* Make buffer available again and move on.
*/
Index: working-2.6/drivers/serial/mpsc.c
===================================================================
--- working-2.6.orig/drivers/serial/mpsc.c 2005-11-23 15:56:27.000000000 +1100
+++ working-2.6/drivers/serial/mpsc.c 2005-12-20 12:03:30.000000000 +1100
@@ -308,8 +308,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)txre,
- (ulong)txre + MPSC_TXRE_SIZE);
+ invalidate_dcache_range(txre, MPSC_TXRE_SIZE);
#endif
if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
@@ -685,8 +684,8 @@ mpsc_init_rings(struct mpsc_port_info *p
DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- flush_dcache_range((ulong)pi->dma_region,
- (ulong)pi->dma_region + MPSC_DMA_ALLOC_SIZE);
+ wback_inval_dcache_range(pi->dma_region,
+ MPSC_DMA_ALLOC_SIZE);
#endif
return;
@@ -758,8 +757,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi,
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)rxre,
- (ulong)rxre + MPSC_RXRE_SIZE);
+ invalidate_dcache_range(rxre, MPSC_RXRE_SIZE);
#endif
/*
@@ -782,8 +780,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi,
dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)bp,
- (ulong)bp + MPSC_RXBE_SIZE);
+ invalidate_dcache_range(bp, MPSC_RXBE_SIZE);
#endif
/*
@@ -851,8 +848,7 @@ next_frame:
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- flush_dcache_range((ulong)rxre,
- (ulong)rxre + MPSC_RXRE_SIZE);
+ wback_inval_dcache_range(rxre, MPSC_RXRE_SIZE);
#endif
/* Advance to next descriptor */
@@ -862,8 +858,7 @@ next_frame:
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)rxre,
- (ulong)rxre + MPSC_RXRE_SIZE);
+ invalidate_dcache_range(rxre, MPSC_RXRE_SIZE);
#endif
rc = 1;
@@ -896,8 +891,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- flush_dcache_range((ulong)txre,
- (ulong)txre + MPSC_TXRE_SIZE);
+ wback_inval_dcache_range(txre, MPSC_TXRE_SIZE);
#endif
return;
@@ -945,8 +939,7 @@ mpsc_copy_tx_data(struct mpsc_port_info
dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- flush_dcache_range((ulong)bp,
- (ulong)bp + MPSC_TXBE_SIZE);
+ wback_inval_dcache_range(bp, MPSC_TXBE_SIZE);
#endif
mpsc_setup_tx_desc(pi, i, 1);
@@ -970,8 +963,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)txre,
- (ulong)txre + MPSC_TXRE_SIZE);
+ invalidate_dcache_range(txre, MPSC_TXRE_SIZE);
#endif
while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
@@ -989,8 +981,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- invalidate_dcache_range((ulong)txre,
- (ulong)txre + MPSC_TXRE_SIZE);
+ invalidate_dcache_range(txre, MPSC_TXRE_SIZE);
#endif
}
@@ -1405,8 +1396,7 @@ mpsc_console_write(struct console *co, c
dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
- flush_dcache_range((ulong)bp,
- (ulong)bp + MPSC_TXBE_SIZE);
+ wback_inval_dcache_range(bp, MPSC_TXBE_SIZE);
#endif
mpsc_setup_tx_desc(pi, i, 0);
pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
More information about the Linuxppc64-dev
mailing list