[RFC 3/8] powerpc/slb: Define macros for the bolted slots
Anshuman Khandual
khandual at linux.vnet.ibm.com
Tue Jul 21 16:58:41 AEST 2015
From: "khandual at linux.vnet.ibm.com" <khandual at linux.vnet.ibm.com>
This patch defines macros for all the three bolted SLB slots. This also
renames the 'create_shadowed_slb' function as 'new_shadowed_slb'.
Signed-off-by: Anshuman Khandual <khandual at linux.vnet.ibm.com>
---
arch/powerpc/mm/slb.c | 27 +++++++++++++++------------
1 file changed, 15 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 3842a54..cbeaaa2 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -25,6 +25,9 @@
#include <asm/udbg.h>
#include <asm/code-patching.h>
+#define SLOT_KLINR 0 /* kernel linear map (0xc00000000) */
+#define SLOT_KVIRT 1 /* kernel virtual map (0xd00000000) */
+#define SLOT_KSTACK 2 /* kernel stack map (0xf00000000) */
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
@@ -74,7 +77,7 @@ static inline void slb_shadow_clear(unsigned long slot)
get_slb_shadow()->save_area[slot].esid = 0;
}
-static inline void create_shadowed_slbe(unsigned long ea, int ssize,
+static inline void new_shadowed_slbe(unsigned long ea, int ssize,
unsigned long flags,
unsigned long slot)
{
@@ -103,16 +106,16 @@ static void __slb_flush_and_rebolt(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
- ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
+ ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, SLOT_KSTACK);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
ksp_vsid_data = 0;
- slb_shadow_clear(2);
+ slb_shadow_clear(SLOT_KSTACK);
} else {
/* Update stack slot; others don't change */
- slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
+ slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, SLOT_KSTACK);
ksp_vsid_data =
- be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
+ be64_to_cpu(get_slb_shadow()->save_area[SLOT_KSTACK].vsid);
}
/* We need to do this all in asm, so we're sure we don't touch
@@ -125,7 +128,7 @@ static void __slb_flush_and_rebolt(void)
"slbmte %2,%3\n"
"isync"
:: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
- "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
+ "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, SLOT_KVIRT)),
"r"(ksp_vsid_data),
"r"(ksp_esid_data)
: "memory");
@@ -151,7 +154,7 @@ void slb_vmalloc_update(void)
unsigned long vflags;
vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
slb_flush_and_rebolt();
}
@@ -312,19 +315,19 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
- create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, SLOT_KLINR);
+ new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
/* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also
* get_paca()->kstack hasn't been initialized yet.
* For secondary cpus, we need to bolt the kernel stack slot now.
*/
- slb_shadow_clear(2);
+ slb_shadow_clear(SLOT_KSTACK);
if (raw_smp_processor_id() != boot_cpuid &&
(get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
- create_shadowed_slbe(get_paca()->kstack,
- mmu_kernel_ssize, lflags, 2);
+ new_shadowed_slbe(get_paca()->kstack,
+ mmu_kernel_ssize, lflags, SLOT_KSTACK);
asm volatile("isync":::"memory");
}
--
2.1.0
More information about the Linuxppc-dev
mailing list