[PATCH v1 23/55] KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs
Nicholas Piggin
npiggin at gmail.com
Mon Jul 26 13:50:04 AEST 2021
This reduces the number of mtmsrd required to enable facility bits when
saving/restoring registers, by having the KVM code set all bits up front
rather than using individual facility functions that set their particular
MSR bits.
-42 cycles (7803) POWER9 virt-mode NULL hcall
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/kernel/process.c | 24 +++++++++++
arch/powerpc/kvm/book3s_hv.c | 61 ++++++++++++++++++---------
arch/powerpc/kvm/book3s_hv_p9_entry.c | 1 +
3 files changed, 67 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 185beb290580..00b55b38a460 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -593,6 +593,30 @@ static void save_all(struct task_struct *tsk)
msr_check_and_clear(msr_all_available);
}
+void save_user_regs_kvm(void)
+{
+ unsigned long usermsr;
+
+ if (!current->thread.regs)
+ return;
+
+ usermsr = current->thread.regs->msr;
+
+ if (usermsr & MSR_FP)
+ save_fpu(current);
+
+ if (usermsr & MSR_VEC)
+ save_altivec(current);
+
+ if (usermsr & MSR_TM) {
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+}
+EXPORT_SYMBOL_GPL(save_user_regs_kvm);
+
void flush_all_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2e966d62a583..dedcf3ddba3b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4103,6 +4103,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb, next_timer;
+ unsigned long msr;
int trap;
WARN_ON_ONCE(vcpu->arch.ceded);
@@ -4114,8 +4115,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (next_timer < time_limit)
time_limit = next_timer;
+ vcpu->arch.ceded = 0;
+
save_p9_host_os_sprs(&host_os_sprs);
+ /* MSR bits may have been cleared by context switch */
+ msr = 0;
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr |= MSR_VSX;
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ msr |= MSR_TM;
+ msr = msr_check_and_set(msr);
+
kvmppc_subcore_enter_guest();
vc->entry_exit_map = 1;
@@ -4124,12 +4140,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (cpu_has_feature(CPU_FTR_TM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+ msr = mfmsr(); /* TM restore can update msr */
+ }
switch_pmu_to_guest(vcpu, &host_os_sprs);
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
load_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC
load_vr_state(&vcpu->arch.vr);
@@ -4238,7 +4255,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC
store_vr_state(&vcpu->arch.vr);
@@ -4767,6 +4783,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
goto done;
}
+void save_user_regs_kvm(void);
+
static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
@@ -4776,19 +4794,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
unsigned long user_tar = 0;
unsigned int user_vrsave;
struct kvm *kvm;
+ unsigned long msr;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /* No need to go into the guest when all we'll do is come back out */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
- * If the guest has TM enabled, save away their TM-related SPRs
- * (they will get restored by the TM unavailable interrupt).
*/
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
@@ -4796,12 +4819,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
- /* Enable TM so we can read the TM SPRs */
- mtmsr(mfmsr() | MSR_TM);
- current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
- current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
- current->thread.tm_texasr = mfspr(SPRN_TEXASR);
- current->thread.regs->msr &= ~MSR_TM;
}
#endif
@@ -4816,18 +4833,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
- /* No need to go into the guest when all we'll do is come back out */
- if (signal_pending(current)) {
- run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
kvm = vcpu->kvm;
atomic_inc(&kvm->arch.vcpus_running);
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb();
- flush_all_to_thread(current);
+ msr = 0;
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr |= MSR_VSX;
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ msr |= MSR_TM;
+ msr = msr_check_and_set(msr);
+
+ save_user_regs_kvm();
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index a7f63082b4e3..fb9cb34445ea 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -224,6 +224,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vc->tb_offset_applied = vc->tb_offset;
}
+ /* Could avoid mfmsr by passing around, but probably no big deal */
msr = mfmsr();
host_hfscr = mfspr(SPRN_HFSCR);
--
2.23.0
More information about the Linuxppc-dev
mailing list