[PATCH v2 07/30] KVM: PPC: Book3S PR: add C function wrapper for _kvmppc_save/restore_tm()
wei.guo.simon at gmail.com
wei.guo.simon at gmail.com
Wed Feb 28 04:37:14 AEDT 2018
From: Simon Guo <wei.guo.simon at gmail.com>
Currently _kvmppc_save/restore_tm() APIs can only be invoked from
assembly function. This patch adds C function wrappers for them so
that they can be safely called from C function.
Signed-off-by: Simon Guo <wei.guo.simon at gmail.com>
---
arch/powerpc/include/asm/asm-prototypes.h | 6 ++
arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 +--
arch/powerpc/kvm/tm.S | 95 ++++++++++++++++++++++++++++++-
3 files changed, 103 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 002abfc..b0ccd91 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -126,7 +126,13 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
void _mcount(void);
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+/* Transaction memory related */
void tm_enable(void);
void tm_disable(void);
void tm_abort(uint8_t cause);
+
+struct kvm_vcpu;
+void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index df13cea..4b59424 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -790,7 +790,7 @@ BEGIN_FTR_SECTION
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
- bl kvmppc_restore_tm
+ bl __kvmppc_restore_tm
ld r4, HSTATE_KVM_VCPU(r13)
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
@@ -1735,7 +1735,7 @@ BEGIN_FTR_SECTION
*/
mr r3, r9
ld r4, VCPU_MSR(r3)
- bl kvmppc_save_tm
+ bl __kvmppc_save_tm
ld r9, HSTATE_KVM_VCPU(r13)
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
@@ -2596,7 +2596,7 @@ BEGIN_FTR_SECTION
*/
ld r3, HSTATE_KVM_VCPU(r13)
ld r4, VCPU_MSR(r3)
- bl kvmppc_save_tm
+ bl __kvmppc_save_tm
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
@@ -2710,7 +2710,7 @@ BEGIN_FTR_SECTION
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
- bl kvmppc_restore_tm
+ bl __kvmppc_restore_tm
ld r4, HSTATE_KVM_VCPU(r13)
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index 2d6fe5b..269dd11 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -35,7 +35,7 @@
* This can modify all checkpointed registers, but
* restores r1, r2 before exit.
*/
-_GLOBAL(kvmppc_save_tm)
+_GLOBAL(__kvmppc_save_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
@@ -149,6 +149,52 @@ _GLOBAL(kvmppc_save_tm)
blr
/*
+ * _kvmppc_save_tm() is a wrapper around __kvmppc_save_tm(), so that it can
+ * be invoked from C function by PR KVM only.
+ */
+_GLOBAL(_kvmppc_save_tm_pr)
+ mflr r5
+ std r5, PPC_LR_STKOFF(r1)
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+ SAVE_NVGPRS(r1)
+
+ /* save MSR since TM/math bits might be impacted
+ * by __kvmppc_save_tm().
+ */
+ mfmsr r5
+ SAVE_GPR(5, r1)
+
+ /* also save DSCR/CR so that it can be recovered later */
+ mfspr r6, SPRN_DSCR
+ SAVE_GPR(6, r1)
+
+ mfcr r7
+ stw r7, _CCR(r1)
+
+ bl __kvmppc_save_tm
+
+ ld r7, _CCR(r1)
+ mtcr r7
+
+ REST_GPR(6, r1)
+ mtspr SPRN_DSCR, r6
+
+ /* need preserve current MSR's MSR_TS bits */
+ REST_GPR(5, r1)
+ mfmsr r6
+ rldicl r6, r6, 64 - MSR_TS_S_LG, 62
+ rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ mtmsrd r5
+
+ REST_NVGPRS(r1)
+ addi r1, r1, SWITCH_FRAME_SIZE
+ ld r5, PPC_LR_STKOFF(r1)
+ mtlr r5
+ blr
+
+EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
+
+/*
* Restore transactional state and TM-related registers.
* Called with:
* - r3 pointing to the vcpu struct.
@@ -158,7 +204,7 @@ _GLOBAL(kvmppc_save_tm)
* This potentially modifies all checkpointed registers.
* It restores r1, r2 from the PACA.
*/
-_GLOBAL(kvmppc_restore_tm)
+_GLOBAL(__kvmppc_restore_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
@@ -186,6 +232,7 @@ _GLOBAL(kvmppc_restore_tm)
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beqlr /* TM not active in guest */
std r1, HSTATE_SCRATCH2(r13)
+ std r3, HSTATE_SCRATCH1(r13)
/* Make sure the failure summary is set, otherwise we'll program check
* when we trechkpt. It's possible that this might have been not set
@@ -262,6 +309,7 @@ _GLOBAL(kvmppc_restore_tm)
ld r29, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r29
#endif
+ ld r3, HSTATE_SCRATCH1(r13)
ld r1, HSTATE_SCRATCH2(r13)
ld r2, PACATMSCRATCH(r13)
@@ -273,4 +321,47 @@ _GLOBAL(kvmppc_restore_tm)
mtlr r0
blr
+/*
+ * _kvmppc_restore_tm() is a wrapper around __kvmppc_restore_tm(), so that it
+ * can be invoked from C function by PR KVM only.
+ */
+_GLOBAL(_kvmppc_restore_tm_pr)
+ mflr r5
+ std r5, PPC_LR_STKOFF(r1)
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+ SAVE_NVGPRS(r1)
+
+ /* save MSR to avoid TM/math bits change */
+ mfmsr r5
+ SAVE_GPR(5, r1)
+
+ /* also save DSCR/CR so that it can be recovered later */
+ mfspr r6, SPRN_DSCR
+ SAVE_GPR(6, r1)
+
+ mfcr r7
+ stw r7, _CCR(r1)
+
+ bl __kvmppc_restore_tm
+
+ ld r7, _CCR(r1)
+ mtcr r7
+
+ REST_GPR(6, r1)
+ mtspr SPRN_DSCR, r6
+
+ /* need preserve current MSR's MSR_TS bits */
+ REST_GPR(5, r1)
+ mfmsr r6
+ rldicl r6, r6, 64 - MSR_TS_S_LG, 62
+ rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ mtmsrd r5
+
+ REST_NVGPRS(r1)
+ addi r1, r1, SWITCH_FRAME_SIZE
+ ld r5, PPC_LR_STKOFF(r1)
+ mtlr r5
+ blr
+
+EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
#endif
--
1.8.3.1
More information about the Linuxppc-dev
mailing list