[PATCH v7 16/32] KVM: PPC: Book3S HV P9: Read machine check registers while MSR[RI] is 0
Nicholas Piggin
npiggin at gmail.com
Fri May 28 19:07:36 AEST 2021
SRR0/1, DAR, DSISR must all be protected from machine check which can
clobber them. Ensure MSR[RI] is clear while they are live.
Reviewed-by: Alexey Kardashevskiy <aik at ozlabs.ru>
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/kvm/book3s_hv.c | 11 +++++++--
arch/powerpc/kvm/book3s_hv_p9_entry.c | 33 ++++++++++++++++++++++++---
2 files changed, 39 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 777ec786ef71..6d39e4784af6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3571,11 +3571,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_BESCR, vcpu->arch.bescr);
mtspr(SPRN_WORT, vcpu->arch.wort);
mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
mtspr(SPRN_AMR, vcpu->arch.amr);
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+ /*
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
+ */
+
if (!(vcpu->arch.ctrl & 1))
mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
@@ -3618,6 +3623,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
hvregs.vcpu_token = vcpu->vcpu_id;
}
hvregs.hdec_expiry = time_limit;
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs));
kvmhv_restore_hv_return_state(vcpu, &hvregs);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index d2e659940630..a6f89e30040b 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -122,6 +122,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
s64 hdec;
u64 tb, purr, spurr;
u64 *exsave;
+ bool ri_set;
unsigned long msr = mfmsr();
int trap;
unsigned long host_hfscr = mfspr(SPRN_HFSCR);
@@ -192,9 +193,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
- mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
- mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
-
start_timing(vcpu, &vcpu->arch.rm_entry);
vcpu->arch.ceded = 0;
@@ -220,6 +218,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDSISR, HDSISR_CANARY);
+ __mtmsrd(0, 1); /* clear RI */
+
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
accumulate_time(vcpu, &vcpu->arch.guest_time);
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_FAST;
@@ -237,7 +242,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
/* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+
+ /* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
+ ri_set = false;
if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ if (trap != BOOK3S_INTERRUPT_SYSCALL &&
+ (vcpu->arch.shregs.msr & MSR_RI))
+ ri_set = true;
exsave = local_paca->exgen;
} else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
exsave = local_paca->exnmi;
@@ -247,6 +258,22 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+
+ /*
+ * Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
+ * and hstate scratch (which we need to move into exsave to make
+ * re-entrant vs SRESET/MCE)
+ */
+ if (ri_set) {
+ if (unlikely(!(mfmsr() & MSR_RI))) {
+ __mtmsrd(MSR_RI, 1);
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(mfmsr() & MSR_RI);
+ __mtmsrd(MSR_RI, 1);
+ }
+
vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
--
2.23.0
More information about the Linuxppc-dev
mailing list