[PATCH v5 23/23] powerpc/book3s64/pkeys: Optimize FTR_KUAP and FTR_KUEP disabled case

Aneesh Kumar K.V aneesh.kumar at linux.ibm.com
Thu Aug 27 14:09:31 AEST 2020


If FTR_KUAP is disabled kernel will continue to run with the same AMR
value with which it was entered. Hence there is a high chance that
we can return without restoring the AMR value. This also helps the case
when applications are not using the pkey feature. In this case, different
applications will have the same AMR values and hence we can avoid restoring
AMR in this case too.

Also avoid isync() if not really needed.

Do the same for IAMR.

null-syscall benchmark results:

With smap/smep disabled:
Without patch:
	957.95 ns    2778.17 cycles
With patch:
	858.38 ns    2489.30 cycles

With smap/smep enabled:
Without patch:
	1017.26 ns    2950.36 cycles
With patch:
	1021.51 ns    2962.44 cycles

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/kup.h | 61 +++++++++++++++++++++---
 arch/powerpc/kernel/entry_64.S           |  2 +-
 arch/powerpc/kernel/syscall_64.c         | 12 +++--
 3 files changed, 65 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 34a412d2a65b..d71e9a958eb5 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -12,28 +12,54 @@
 
 #ifdef __ASSEMBLY__
 
-.macro kuap_restore_user_amr gpr1
+.macro kuap_restore_user_amr gpr1, gpr2
 #if defined(CONFIG_PPC_PKEY)
 	BEGIN_MMU_FTR_SECTION_NESTED(67)
+	b	100f  // skip_restore_amr
+	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
 	/*
 	 * AMR and IAMR are going to be different when
 	 * returning to userspace.
 	 */
 	ld	\gpr1, STACK_REGS_KUAP(r1)
+
+	/*
+	 * If kuap feature is not enabled, do the mtspr
+	 * only if AMR value is different.
+	 */
+	BEGIN_MMU_FTR_SECTION_NESTED(68)
+	mfspr	\gpr2, SPRN_AMR
+	cmpd	\gpr1, \gpr2
+	beq	99f
+	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68)
+
 	isync
 	mtspr	SPRN_AMR, \gpr1
+99:
 	/*
 	 * Restore IAMR only when returning to userspace
 	 */
 	ld	\gpr1, STACK_REGS_KUEP(r1)
+
+	/*
+	 * If kuep feature is not enabled, do the mtspr
+	 * only if IAMR value is different.
+	 */
+	BEGIN_MMU_FTR_SECTION_NESTED(69)
+	mfspr	\gpr2, SPRN_IAMR
+	cmpd	\gpr1, \gpr2
+	beq	100f
+	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUEP, 69)
+
+	isync
 	mtspr	SPRN_IAMR, \gpr1
 
+100: //skip_restore_amr
 	/* No isync required, see kuap_restore_user_amr() */
-	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY, 67)
 #endif
 .endm
 
-.macro kuap_restore_kernel_amr	gpr1, gpr2
+.macro kuap_restore_kernel_amr gpr1, gpr2
 #if defined(CONFIG_PPC_PKEY)
 
 	BEGIN_MMU_FTR_SECTION_NESTED(67)
@@ -190,18 +216,41 @@ static inline u64 current_thread_iamr(void)
 
 static inline void kuap_restore_user_amr(struct pt_regs *regs)
 {
+	bool restore_amr = false, restore_iamr = false;
+	unsigned long amr, iamr;
+
 	if (!mmu_has_feature(MMU_FTR_PKEY))
 		return;
 
-	isync();
-	mtspr(SPRN_AMR, regs->kuap);
-	mtspr(SPRN_IAMR, regs->kuep);
+	if (!mmu_has_feature(MMU_FTR_KUAP)) {
+		amr = mfspr(SPRN_AMR);
+		if (amr != regs->kuap)
+			restore_amr = true;
+	} else
+		restore_amr = true;
+
+	if (!mmu_has_feature(MMU_FTR_KUEP)) {
+		iamr = mfspr(SPRN_IAMR);
+		if (iamr != regs->kuep)
+			restore_iamr = true;
+	} else
+		restore_iamr = true;
+
+
+	if (restore_amr || restore_iamr) {
+		isync();
+		if (restore_amr)
+			mtspr(SPRN_AMR, regs->kuap);
+		if (restore_iamr)
+			mtspr(SPRN_IAMR, regs->kuep);
+	}
 	/*
 	 * No isync required here because we are about to rfi
 	 * back to previous context before any user accesses
 	 * would be made, which is a CSI.
 	 */
 }
+
 static inline void kuap_restore_kernel_amr(struct pt_regs *regs,
 					   unsigned long amr)
 {
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 68171689db5d..ac6c84a53ff8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -667,7 +667,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
 	bne-	.Lrestore_nvgprs
 
 .Lfast_user_interrupt_return_amr:
-	kuap_restore_user_amr r3
+	kuap_restore_user_amr r3, r4
 .Lfast_user_interrupt_return:
 	ld	r11,_NIP(r1)
 	ld	r12,_MSR(r1)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index e49d604b811b..945a14e41898 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -38,6 +38,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
 #ifdef CONFIG_PPC_PKEY
 	if (mmu_has_feature(MMU_FTR_PKEY)) {
 		unsigned long amr, iamr;
+		bool flush_needed = false;
 		/*
 		 * When entering from userspace we mostly have the AMR/IAMR
 		 * different from kernel default values. Hence don't compare.
@@ -46,11 +47,16 @@ notrace long system_call_exception(long r3, long r4, long r5,
 		iamr = mfspr(SPRN_IAMR);
 		regs->kuap = amr;
 		regs->kuep = iamr;
-		if (mmu_has_feature(MMU_FTR_KUAP))
+		if (mmu_has_feature(MMU_FTR_KUAP)) {
 			mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
-		if (mmu_has_feature(MMU_FTR_KUEP))
+			flush_needed = true;
+		}
+		if (mmu_has_feature(MMU_FTR_KUEP)) {
 			mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
-		isync();
+			flush_needed = true;
+		}
+		if (flush_needed)
+			isync();
 	} else
 #endif
 		kuap_check_amr();
-- 
2.26.2



More information about the Linuxppc-dev mailing list