[POWERPC] iSeries: fix slb.c for combined build
Stephen Rothwell
sfr at canb.auug.org.au
Mon Nov 13 14:49:18 EST 2006
Signed-off-by: Stephen Rothwell <sfr at canb.auug.org.au>
---
arch/powerpc/mm/slb.c | 39 +++++++++++++++++++--------------------
1 files changed, 19 insertions(+), 20 deletions(-)
--
Cheers,
Stephen Rothwell sfr at canb.auug.org.au
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index d373391..06c0055 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
+#include <asm/firmware.h>
#include <linux/compiler.h>
#ifdef DEBUG
@@ -227,28 +228,26 @@ void slb_initialize(void)
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
-#ifndef CONFIG_PPC_ISERIES
- {
- unsigned long lflags, vflags;
+ if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
+ unsigned long lflags, vflags;
- lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
+ lflags = SLB_VSID_KERNEL | linear_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
+
+ /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ asm volatile("isync; slbia; isync":::"memory");
+ create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
- create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
-
- create_shadowed_slbe(VMALLOC_START, vflags, 1);
-
- /* We don't bolt the stack for the time being - we're in boot,
- * so the stack is in the bolted segment. By the time it goes
- * elsewhere, we'll call _switch() which will bolt in the new
- * one. */
- asm volatile("isync":::"memory");
- }
-#endif /* CONFIG_PPC_ISERIES */
+ create_shadowed_slbe(VMALLOC_START, vflags, 1);
+
+ /* We don't bolt the stack for the time being - we're in boot,
+ * so the stack is in the bolted segment. By the time it goes
+ * elsewhere, we'll call _switch() which will bolt in the new
+ * one. */
+ asm volatile("isync":::"memory");
+ }
get_paca()->stab_rr = SLB_NUM_BOLTED;
}
--
1.4.3.3
More information about the Linuxppc-dev
mailing list