[PATCH V4 09/14] powerpc/mm/hash: VSID 0 is no more an invalid VSID

Aneesh Kumar K.V aneesh.kumar at linux.vnet.ibm.com
Thu Mar 16 21:37:08 AEDT 2017


This is now used by linear mapped region of the kernel. User space still
should not see a VSID 0. But having that VSID check confuse the reader.
Remove the same and convert the error checking to be based on addr value

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/mmu-hash.h |  6 ------
 arch/powerpc/mm/hash_utils_64.c               | 19 +++++++------------
 arch/powerpc/mm/pgtable-hash64.c              |  1 -
 arch/powerpc/mm/tlb_hash64.c                  |  1 -
 4 files changed, 7 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 3897d30820b0..078d7bf93a69 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -673,12 +673,6 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 	unsigned long vsid_bits;
 	unsigned long protovsid;
 
-	/*
-	 * Bad address. We return VSID 0 for that
-	 */
-	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
-		return 0;
-
 	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
 		va_bits = 65;
 
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 0e84200a88f2..d96ba04d8844 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1223,6 +1223,13 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 		ea, access, trap);
 	trace_hash_fault(ea, access, trap);
 
+	/* Bad address. */
+	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) {
+		DBG_LOW("Bad address!\n");
+		rc = 1;
+		goto bail;
+	}
+
 	/* Get region & vsid */
  	switch (REGION_ID(ea)) {
 	case USER_REGION_ID:
@@ -1253,12 +1260,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 	}
 	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
-	/* Bad address. */
-	if (!vsid) {
-		DBG_LOW("Bad address!\n");
-		rc = 1;
-		goto bail;
-	}
 	/* Get pgdir */
 	pgdir = mm->pgd;
 	if (pgdir == NULL) {
@@ -1501,8 +1502,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
 	/* Get VSID */
 	ssize = user_segment_size(ea);
 	vsid = get_vsid(mm->context.id, ea, ssize);
-	if (!vsid)
-		return;
 	/*
 	 * Hash doesn't like irqs. Walking linux page table with irq disabled
 	 * saves us from holding multiple locks.
@@ -1747,10 +1746,6 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
 
-	/* Don't create HPTE entries for bad address */
-	if (!vsid)
-		return;
-
 	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
 				    HPTE_V_BOLTED,
 				    mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 8b85a14b08ea..ddfeb141af29 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -263,7 +263,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
 	if (!is_kernel_addr(addr)) {
 		ssize = user_segment_size(addr);
 		vsid = get_vsid(mm->context.id, addr, ssize);
-		WARN_ON(vsid == 0);
 	} else {
 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 		ssize = mmu_kernel_ssize;
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 4517aa43a8b1..d8fa336bf05d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -87,7 +87,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 		ssize = mmu_kernel_ssize;
 	}
-	WARN_ON(vsid == 0);
 	vpn = hpt_vpn(addr, vsid, ssize);
 	rpte = __real_pte(__pte(pte), ptep);
 
-- 
2.7.4



More information about the Linuxppc-dev mailing list