[PATCH v2 13/28] powerpc/book3s64/kuep: Store/restore userspace IAMR correctly on entry and exit from kernel

Aneesh Kumar K.V aneesh.kumar at linux.ibm.com
Sat May 2 21:22:14 AEST 2020


This prepare kernel to operate with a different value than userspace IAMR.
For this, IAMR needs to be saved and restored on entry and return from the
kernel.

If MMU_FTR_KEY is enabled we always use the key mechanism to implement KUEP
feature. If MMU_FTR_KEY is not supported and if we support MMU_FTR_KUEP
(radix translation on POWER9), we can skip restoring IAMR on return
to userspace. Userspace won't be using IAMR in that specific config.

We don't need to save/restore IAMR on reentry into the kernel due to interrupt
because the kernel doesn't modify IAMR internally.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/kup.h | 46 ++++++++++++++++++++++--
 arch/powerpc/include/asm/ptrace.h        |  6 +++-
 arch/powerpc/kernel/asm-offsets.c        |  4 +++
 arch/powerpc/kernel/syscall_64.c         |  7 ++--
 4 files changed, 58 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 82bef3901672..67320a990f3f 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -17,15 +17,26 @@
 #if defined(CONFIG_PPC_MEM_KEYS)
 	BEGIN_MMU_FTR_SECTION_NESTED(67)
 	/*
-	 * AMR is going to be different when
+	 * AMR and IAMR are going to be different when
 	 * returning to userspace.
 	 */
 	ld	\gpr1, STACK_REGS_KUAP(r1)
 	isync
 	mtspr	SPRN_AMR, \gpr1
+	/*
+	 * Restore IAMR only when returning to userspace
+	 */
+	ld	\gpr1, STACK_REGS_KUEP(r1)
+	mtspr	SPRN_IAMR, \gpr1
 
 	/* No isync required, see kuap_restore_user_amr() */
 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY , 67)
+
+	/*
+	 * We don't check KUEP feature here, because if FTR_PKEY
+	 * is not enabled we don't need to restore IAMR on
+	 * return to usespace.
+	 */
 #endif
 .endm
 
@@ -53,6 +64,9 @@
 	isync
 	mtspr	SPRN_AMR, \gpr2
 	/* No isync required, see kuap_restore_amr() */
+	/*
+	 * No need to restore IAMR when returning to kernel space.
+	 */
 100:  // skip_restore_amr
 #endif
 .endm
@@ -90,6 +104,12 @@
 	b	100f  // skip_save_amr
 	ALT_MMU_FTR_SECTION_END_NESTED_IFSET(MMU_FTR_KUAP, 68)
 
+	/*
+	 * We don't check KUEP feature here, because if FTR_PKEY
+	 * is not enabled we don't need to save IAMR on
+	 * entry from usespace. That is handled by either
+	 * handle_kuap_save_amr or skip_save_amr
+	 */
 
 99: // handle_kuap_save_amr
 	.ifnb \msr_pr_cr
@@ -120,6 +140,25 @@
 102:
 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
 
+	.ifnb \msr_pr_cr
+	beq	\msr_pr_cr, 103f // from kernel space
+	mfspr	\gpr1, SPRN_IAMR
+	std	\gpr1, STACK_REGS_KUEP(r1)
+
+	/*
+	 * update kernel IAMR with AMR_KUEP_BLOCKED only
+	 * if KUAP feature is enabled
+	 */
+	BEGIN_MMU_FTR_SECTION_NESTED(70)
+	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
+	cmpd	\use_cr, \gpr1, \gpr2
+	beq	\use_cr, 103f
+	mtspr	SPRN_IAMR, \gpr2
+	isync
+103:
+	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUEP, 70)
+	.endif
+
 100: // skip_save_amr
 #endif
 .endm
@@ -140,13 +179,13 @@ static inline void kuap_restore_user_amr(struct pt_regs *regs)
 
 	isync();
 	mtspr(SPRN_AMR, regs->kuap);
+	mtspr(SPRN_IAMR, regs->kuep);
 	/*
 	 * No isync required here because we are about to rfi
 	 * back to previous context before any user accesses
 	 * would be made, which is a CSI.
 	 */
 }
-
 static inline void kuap_restore_kernel_amr(struct pt_regs *regs,
 					   unsigned long amr)
 {
@@ -162,6 +201,9 @@ static inline void kuap_restore_kernel_amr(struct pt_regs *regs,
 			 */
 		}
 	}
+	/*
+	 * No need to restore IAMR when returning to kernel space.
+	 */
 }
 
 static inline unsigned long kuap_get_and_check_amr(void)
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index e0195e6b892b..2bfd2b6a72ab 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -56,8 +56,12 @@ struct pt_regs
 #ifdef CONFIG_PPC_KUAP
 			unsigned long kuap;
 #endif
+#ifdef CONFIG_PPC_KUEP
+			unsigned long kuep;
+#endif
+
 		};
-		unsigned long __pad[2];	/* Maintain 16 byte interrupt stack alignment */
+		unsigned long __pad[4];	/* Maintain 16 byte interrupt stack alignment */
 	};
 };
 #endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index fcf24a365fc0..6c7326fc73b9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -353,6 +353,10 @@ int main(void)
 #ifdef CONFIG_PPC_KUAP
 	STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
 #endif
+#ifdef CONFIG_PPC_KUEP
+	STACK_PT_REGS_OFFSET(STACK_REGS_KUEP, kuep);
+#endif
+
 
 #if defined(CONFIG_PPC32)
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index 4d417fdadb2a..faac67d04a70 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -37,14 +37,17 @@ notrace long system_call_exception(long r3, long r4, long r5,
 
 #ifdef CONFIG_PPC_MEM_KEYS
 	if (mmu_has_feature(MMU_FTR_PKEY)) {
-		unsigned long amr;
+		unsigned long amr, iamr;
 		/*
-		 * When entering from userspace we mostly have the AMR
+		 * When entering from userspace we mostly have the AMR/IAMR
 		 * different from kernel default values. Hence don't compare.
 		 */
 		amr = mfspr(SPRN_AMR);
+		iamr = mfspr(SPRN_IAMR);
 		regs->kuap = amr;
+		regs->kuep = iamr;
 		mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+		mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
 		isync();
 	} else
 #endif
-- 
2.26.2



More information about the Linuxppc-dev mailing list