[PATCH 1/2] Fix 2 "[v3, 25/32] powerpc/64: system call implement entry/exit logic in C"
Nicholas Piggin
npiggin at gmail.com
Wed Mar 25 20:30:13 AEDT 2020
This fixes 4 issues caught by TM selftests. First was a tm-syscall bug
that hit due to tabort_syscall being called after interrupts were reconciled
(in a subsequent patch), which led to interrupts being enabled before
tabort_syscall was called. Rather than going through an un-reconciling
interrupts for the return, I just go back to putting the test early in
asm, the C-ification of that wasn't a big win anyway.
Second is the syscall return _TIF_USER_WORK_MASK check would go into an
infinite loop if _TIF_RESTORE_TM became set. The asm code uses
_TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state.
Third is system call return was not calling restore_tm_state, I missed
this completely (alhtough it's in the return from interrupt C conversion
because when the asm syscall code encountered problems it would branch
to the interrupt return code.
Fourth is MSR_VEC missing from restore_math, which was caught by
tm-unavailable selftest taking an unexpected facility unavailable
interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1.
Fourth case also has a fixup in a subsequent patch.
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/kernel/entry_64.S | 12 +++++++++---
arch/powerpc/kernel/syscall_64.c | 25 +++++++++++++------------
2 files changed, 22 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index dc7fd3196d20..403224acdaa8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -64,6 +64,12 @@ exception_marker:
.globl system_call_common
system_call_common:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne .Ltabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
_ASM_NOKPROBE_SYMBOL(system_call_common)
mr r10,r1
ld r1,PACAKSAVE(r13)
@@ -179,7 +185,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
b .Lsyscall_restore_regs_cont
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-_GLOBAL(tabort_syscall) /* (unsigned long nip, unsigned long msr) */
+.Ltabort_syscall:
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
@@ -199,8 +205,8 @@ _GLOBAL(tabort_syscall) /* (unsigned long nip, unsigned long msr) */
li r9, MSR_RI
andc r10, r10, r9
mtmsrd r10, 1
- mtspr SPRN_SRR0, r3
- mtspr SPRN_SRR1, r4
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
RFI_TO_USER
b . /* prevent speculative execution */
#endif
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index ffd601d87065..56533a26f3b7 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -15,8 +15,6 @@
#include <asm/time.h>
#include <asm/unistd.h>
-extern void __noreturn tabort_syscall(unsigned long nip, unsigned long msr);
-
typedef long (*syscall_fn)(long, long, long, long, long, long);
/* Has to run notrace because it is entered "unreconciled" */
@@ -32,10 +30,6 @@ notrace long system_call_exception(long r3, long r4, long r5, long r6, long r7,
BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED);
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely(regs->msr & MSR_TS_T))
- tabort_syscall(regs->nip, regs->msr);
-
account_cpu_user_entry();
#ifdef CONFIG_PPC_SPLPAR
@@ -161,7 +155,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
again:
local_irq_disable();
ti_flags = READ_ONCE(*ti_flagsp);
- while (unlikely(ti_flags & _TIF_USER_WORK_MASK)) {
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
schedule();
@@ -180,13 +174,20 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
- unsigned long mathflags = MSR_FP;
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely((ti_flags & _TIF_RESTORE_TM))) {
+ restore_tm_state(regs);
+ } else {
+ unsigned long mathflags = MSR_FP;
- if (IS_ENABLED(CONFIG_ALTIVEC))
- mathflags |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ mathflags |= MSR_VEC | MSR_VSX;
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ mathflags |= MSR_VEC;
- if ((regs->msr & mathflags) != mathflags)
- restore_math(regs);
+ if ((regs->msr & mathflags) != mathflags)
+ restore_math(regs);
+ }
}
/* This must be done with RI=1 because tracing may touch vmaps */
--
2.23.0
More information about the Linuxppc-dev
mailing list