[PATCH -V1 7/9] arch/powerpc: Use 50 bits of VSID in slbmte
Aneesh Kumar K.V
aneesh.kumar at linux.vnet.ibm.com
Sat Jun 30 00:17:35 EST 2012
From: "Aneesh Kumar K.V" <aneesh.kumar at linux.vnet.ibm.com>
Increase the number of valid VSID bits in slbmte instruction.
We will use the new bits when we increase valid VSID bits.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.vnet.ibm.com>
---
arch/powerpc/mm/slb_low.S | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index c355af6..c1fc81c 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -226,7 +226,7 @@ _GLOBAL(slb_allocate_user)
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9,256M)
- rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
+ rldimi r11,r10,SLB_VSID_SHIFT,2 /* combine VSID and flags */
/* r3 = EA, r11 = VSID data */
/*
@@ -290,7 +290,7 @@ _GLOBAL(slb_compare_rr_to_size)
slb_finish_load_1T:
srdi r10,r10,40-28 /* get 1T ESID */
ASM_VSID_SCRAMBLE(r10,r9,1T)
- rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
+ rldimi r11,r10,SLB_VSID_SHIFT_1T,2 /* combine VSID and flags */
li r10,MMU_SEGSIZE_1T
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
--
1.7.10
More information about the Linuxppc-dev
mailing list