[PATCH V2 26/31] powerpc/mm: Remove the dependency on pte bit position in asm code
Aneesh Kumar K.V
aneesh.kumar at linux.vnet.ibm.com
Wed Sep 30 12:27:19 AEST 2015
We should not expect pte bit position in asm code. Simply
by moving part of that to C
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.vnet.ibm.com>
---
arch/powerpc/kernel/exceptions-64s.S | 16 +++-------------
arch/powerpc/mm/hash_utils_64.c | 29 +++++++++++++++++++++++++++++
2 files changed, 32 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0a0399c2af11..34920f11dbdd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1556,28 +1556,18 @@ do_hash_page:
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK at h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
- /*
- * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
- * accessing a userspace segment (even from the kernel). We assume
- * kernel addresses always have the high bit set.
- */
- rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
- rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
- orc r0,r12,r0 /* MSR_PR | ~high_bit */
- rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
- ori r4,r4,1 /* add _PAGE_PRESENT */
- rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
/*
* r3 contains the faulting address
- * r4 contains the required access permissions
+ * r4 msr
* r5 contains the trap number
* r6 contains dsisr
*
* at return r3 = 0 for success, 1 for page fault, negative for error
*/
+ mr r4,r12
ld r6,_DSISR(r1)
- bl hash_page /* build HPTE if possible */
+ bl __hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */
/* Success */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2b90850bdaf8..6cd9e40aae01 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1161,6 +1161,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
+int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
+ unsigned long dsisr)
+{
+ unsigned long access = _PAGE_PRESENT;
+ unsigned long flags = 0;
+ struct mm_struct *mm = current->mm;
+
+ if (REGION_ID(ea) == VMALLOC_REGION_ID)
+ mm = &init_mm;
+
+ if (dsisr & DSISR_NOHPTE)
+ flags |= HPTE_NOHPTE_UPDATE;
+
+ if (dsisr & DSISR_ISSTORE)
+ access |= _PAGE_RW;
+ /*
+ * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+ * accessing a userspace segment (even from the kernel). We assume
+ * kernel addresses always have the high bit set.
+ */
+ if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+ access |= _PAGE_USER;
+
+ if (trap == 0x400)
+ access |= _PAGE_EXEC;
+
+ return hash_page_mm(mm, ea, access, trap, flags);
+}
+
void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)
{
--
2.5.0
More information about the Linuxppc-dev
mailing list