[PATCH v3 8/8] powerpc/mm/hash: Rename KERNEL_REGION_ID to LINEAR_MAP_REGION_ID
Aneesh Kumar K.V
aneesh.kumar at linux.ibm.com
Tue Apr 16 20:07:22 AEST 2019
The region actually point to linear map. Rename the #define to
clarify thati.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.ibm.com>
---
arch/powerpc/include/asm/book3s/64/hash.h | 4 ++--
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 2 +-
arch/powerpc/mm/copro_fault.c | 4 ++--
arch/powerpc/mm/slb.c | 4 ++--
arch/powerpc/platforms/cell/spu_base.c | 2 +-
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index c6850a5a931d..e86c338f3ad7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -77,7 +77,7 @@
* Region IDs
*/
#define USER_REGION_ID 0
-#define KERNEL_REGION_ID 1
+#define LINEAR_MAP_REGION_ID 1
#define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(VMALLOC_START)
#define IO_REGION_ID NON_LINEAR_REGION_ID(KERN_IO_START)
#define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(VMEMMAP_BASE)
@@ -108,7 +108,7 @@ static inline int get_region_id(unsigned long ea)
return USER_REGION_ID;
if (ea < KERN_VIRT_START)
- return KERNEL_REGION_ID;
+ return LINEAR_MAP_REGION_ID;
VM_BUG_ON(id != 0xc);
BUILD_BUG_ON(NON_LINEAR_REGION_ID(VMALLOC_START) != 2);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index b146448109fd..5d2adf3c1325 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -788,7 +788,7 @@ static inline unsigned long get_kernel_context(unsigned long ea)
* Depending on Kernel config, kernel region can have one context
* or more.
*/
- if (region_id == KERNEL_REGION_ID) {
+ if (region_id == LINEAR_MAP_REGION_ID) {
/*
* We already verified ea to be not beyond the addr limit.
*/
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 9b0321061bc8..f137286740cb 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -129,8 +129,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
vsidkey = SLB_VSID_KERNEL;
break;
- case KERNEL_REGION_ID:
- pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
+ case LINEAR_MAP_REGION_ID:
+ pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
psize = mmu_linear_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 508573c56411..756cf087590b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -691,7 +691,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
unsigned long flags;
int ssize;
- if (id == KERNEL_REGION_ID) {
+ if (id == LINEAR_MAP_REGION_ID) {
/* We only support upto MAX_PHYSMEM_BITS */
if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
@@ -790,7 +790,7 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
* first class kernel code. But for performance it's probably nicer
* if they go via fast_exception_return too.
*/
- if (id >= KERNEL_REGION_ID) {
+ if (id >= LINEAR_MAP_REGION_ID) {
long err;
#ifdef CONFIG_DEBUG_VM
/* Catch recursive kernel SLB faults. */
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 4770cce1bfe2..6646f152d57b 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -224,7 +224,7 @@ static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
unsigned long ea = (unsigned long)addr;
u64 llp;
- if (get_region_id(ea) == KERNEL_REGION_ID)
+ if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
llp = mmu_psize_defs[mmu_linear_psize].sllp;
else
llp = mmu_psize_defs[mmu_virtual_psize].sllp;
--
2.20.1
More information about the Linuxppc-dev
mailing list