2.6 POWER3/POWER4 compilation fix
Anton Blanchard
anton at samba.org
Sat Nov 8 11:23:30 EST 2003
Hi,
Heres a quick fix I threw together from patches from Hollis and Segher.
I think this is worthy of sending to Linus now, considering all the
build problems we've been having lately.
Thoughts?
Anton
===== arch/ppc64/Kconfig 1.31 vs edited =====
--- 1.31/arch/ppc64/Kconfig Fri Sep 26 14:04:08 2003
+++ edited/arch/ppc64/Kconfig Fri Nov 7 05:29:29 2003
@@ -72,6 +72,14 @@
bool
default y
+config POWER4_ONLY
+ bool "Optimize for POWER4"
+ default n
+ ---help---
+ Cause the compiler to optimize for POWER4 processors. The resulting
+ binary will not work on POWER3 or RS64 processors when compiled with
+ binutils 2.15 or later.
+
config SMP
bool "Symmetric multi-processing support"
---help---
===== arch/ppc64/Makefile 1.34 vs edited =====
--- 1.34/arch/ppc64/Makefile Mon Oct 20 14:32:12 2003
+++ edited/arch/ppc64/Makefile Fri Nov 7 05:29:29 2003
@@ -17,8 +17,13 @@
LDFLAGS := -m elf64ppc
LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
-CFLAGS += -msoft-float -pipe -Wno-uninitialized -mminimal-toc \
- -mcpu=power4
+CFLAGS += -msoft-float -pipe -Wno-uninitialized -mminimal-toc
+
+ifeq ($(CONFIG_POWER4_ONLY),y)
+CFLAGS += -mcpu=power4
+else
+CFLAGS += -mtune=power4
+endif
have_zero_bss := $(shell if $(CC) -fno-zero-initialized-in-bss -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo y; else echo n; fi)
===== arch/ppc64/kernel/pSeries_htab.c 1.9 vs edited =====
--- 1.9/arch/ppc64/kernel/pSeries_htab.c Sat Jun 7 11:19:27 2003
+++ edited/arch/ppc64/kernel/pSeries_htab.c Fri Nov 7 06:14:29 2003
@@ -350,12 +350,8 @@
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory");
- for (i = 0; i < j; i++) {
- asm volatile("\n\
- clrldi %0,%0,16\n\
- tlbiel %0"
- : : "r" (batch->vaddr[i]) : "memory" );
- }
+ for (i = 0; i < j; i++)
+ __tlbiel(batch->vaddr[i]);
asm volatile("ptesync":::"memory");
} else {
@@ -364,12 +360,8 @@
asm volatile("ptesync":::"memory");
- for (i = 0; i < j; i++) {
- asm volatile("\n\
- clrldi %0,%0,16\n\
- tlbie %0"
- : : "r" (batch->vaddr[i]) : "memory" );
- }
+ for (i = 0; i < j; i++)
+ __tlbie(batch->vaddr[i]);
asm volatile("eieio; tlbsync; ptesync":::"memory");
===== include/asm-ppc64/mmu.h 1.8 vs edited =====
--- 1.8/include/asm-ppc64/mmu.h Sun Sep 7 11:24:09 2003
+++ edited/include/asm-ppc64/mmu.h Fri Nov 7 06:24:26 2003
@@ -202,26 +202,41 @@
return (vsid & 0x7fffffffff) ^ page;
}
-static inline void _tlbie(unsigned long va, int large)
+static inline void __tlbie(unsigned long va, int large)
{
- asm volatile("ptesync": : :"memory");
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
- if (large) {
- asm volatile("clrldi %0,%0,16\n\
- tlbie %0,1" : : "r"(va) : "memory");
- } else {
- asm volatile("clrldi %0,%0,16\n\
- tlbie %0,0" : : "r"(va) : "memory");
- }
+ if (large)
+ asm volatile("tlbie %0,1" : : "r"(va) : "memory");
+ else
+ asm volatile("tlbie %0,0" : : "r"(va) : "memory");
+}
+static inline void tlbie(unsigned long va, int large)
+{
+ asm volatile("ptesync": : :"memory");
+ __tlbie(va, large);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbiel(unsigned long va)
+static inline void __tlbiel(unsigned long va)
+{
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
+
+ /* one day Alan Modra will give us a way to do this cleanly :) */
+#ifdef WAITING_FOR_ALANM
+ asm volatile("tlbiel %0" : : "r"(va) : "memory");
+#else
+ asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
+#endif
+}
+
+static inline void tlbiel(unsigned long va)
{
asm volatile("ptesync": : :"memory");
- asm volatile("clrldi %0,%0,16\n\
- tlbiel %0" : : "r"(va) : "memory");
+ __tlbiel(va);
asm volatile("ptesync": : :"memory");
}
** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/
More information about the Linuxppc64-dev
mailing list