[PATCH v2 19/33] KVM: PPC: Book3S HV: Nested guest entry via hypercall

David Gibson david at gibson.dropbear.id.au
Tue Oct 2 17:00:09 AEST 2018


On Fri, Sep 28, 2018 at 07:45:49PM +1000, Paul Mackerras wrote:
> This adds a new hypercall, H_ENTER_NESTED, which is used by a nested
> hypervisor to enter one of its nested guests.  The hypercall supplies
> register values in two structs.  Those values are copied by the level 0
> (L0) hypervisor (the one which is running in hypervisor mode) into the
> vcpu struct of the L1 guest, and then the guest is run until an
> interrupt or error occurs which needs to be reported to L1 via the
> hypercall return value.
> 
> Currently this assumes that the L0 and L1 hypervisors are the same
> endianness, and the structs passed as arguments are in native
> endianness.  If they are of different endianness, the version number
> check will fail and the hcall will be rejected.
> 
> Nested hypervisors do not support indep_threads_mode=N, so this adds
> code to print a warning message if the administrator has set
> indep_threads_mode=N, and treat it as Y.
> 
> Signed-off-by: Paul Mackerras <paulus at ozlabs.org>

[snip]
> +/* Register state for entering a nested guest with H_ENTER_NESTED */
> +struct hv_guest_state {
> +	u64 version;		/* version of this structure layout */
> +	u32 lpid;
> +	u32 vcpu_token;
> +	/* These registers are hypervisor privileged (at least for writing) */
> +	u64 lpcr;
> +	u64 pcr;
> +	u64 amor;
> +	u64 dpdes;
> +	u64 hfscr;
> +	s64 tb_offset;
> +	u64 dawr0;
> +	u64 dawrx0;
> +	u64 ciabr;
> +	u64 hdec_expiry;
> +	u64 purr;
> +	u64 spurr;
> +	u64 ic;
> +	u64 vtb;
> +	u64 hdar;
> +	u64 hdsisr;
> +	u64 heir;
> +	u64 asdr;
> +	/* These are OS privileged but need to be set late in guest entry */
> +	u64 srr0;
> +	u64 srr1;
> +	u64 sprg[4];
> +	u64 pidr;
> +	u64 cfar;
> +	u64 ppr;
> +};

I'm guessing the implication here is that most supervisor privileged
registers need to be set by the L1 to the L2 values, before making the
H_ENTER_NESTED call.  Is that right?

[snip]
> +static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
> +{
> +	int r;
> +	int srcu_idx;
> +
> +	vcpu->stat.sum_exits++;
> +
> +	/*
> +	 * This can happen if an interrupt occurs in the last stages
> +	 * of guest entry or the first stages of guest exit (i.e. after
> +	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
> +	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
> +	 * That can happen due to a bug, or due to a machine check
> +	 * occurring at just the wrong time.
> +	 */
> +	if (vcpu->arch.shregs.msr & MSR_HV) {
> +		pr_emerg("KVM trap in HV mode while nested!\n");
> +		pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
> +			 vcpu->arch.trap, kvmppc_get_pc(vcpu),
> +			 vcpu->arch.shregs.msr);
> +		kvmppc_dump_regs(vcpu);
> +		return RESUME_HOST;

To make sure I'm understanding right here, RESUME_HOST will
effectively mean resume the L0, and RESUME_GUEST (without additional
processing) will mean resume the L2, right?

> +	}
> +	switch (vcpu->arch.trap) {
> +	/* We're good on these - the host merely wanted to get our attention */
> +	case BOOK3S_INTERRUPT_HV_DECREMENTER:
> +		vcpu->stat.dec_exits++;
> +		r = RESUME_GUEST;
> +		break;
> +	case BOOK3S_INTERRUPT_EXTERNAL:
> +		vcpu->stat.ext_intr_exits++;
> +		r = RESUME_HOST;
> +		break;
> +	case BOOK3S_INTERRUPT_H_DOORBELL:
> +	case BOOK3S_INTERRUPT_H_VIRT:
> +		vcpu->stat.ext_intr_exits++;
> +		r = RESUME_GUEST;
> +		break;
> +	/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
> +	case BOOK3S_INTERRUPT_HMI:
> +	case BOOK3S_INTERRUPT_PERFMON:
> +	case BOOK3S_INTERRUPT_SYSTEM_RESET:
> +		r = RESUME_GUEST;
> +		break;
> +	case BOOK3S_INTERRUPT_MACHINE_CHECK:
> +		/* Pass the machine check to the L1 guest */
> +		r = RESUME_HOST;
> +		/* Print the MCE event to host console. */
> +		machine_check_print_event_info(&vcpu->arch.mce_evt, false);
> +		break;
> +	/*
> +	 * We get these next two if the guest accesses a page which it thinks
> +	 * it has mapped but which is not actually present, either because
> +	 * it is for an emulated I/O device or because the corresonding
> +	 * host page has been paged out.
> +	 */
> +	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
> +		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> +		r = kvmhv_nested_page_fault(vcpu);
> +		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
> +		break;
> +	case BOOK3S_INTERRUPT_H_INST_STORAGE:
> +		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
> +		vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
> +					 DSISR_SRR1_MATCH_64S;
> +		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
> +			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
> +		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> +		r = kvmhv_nested_page_fault(vcpu);
> +		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
> +		break;
> +
> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> +	case BOOK3S_INTERRUPT_HV_SOFTPATCH:
> +		/*
> +		 * This occurs for various TM-related instructions that
> +		 * we need to emulate on POWER9 DD2.2.  We have already
> +		 * handled the cases where the guest was in real-suspend
> +		 * mode and was transitioning to transactional state.
> +		 */
> +		r = kvmhv_p9_tm_emulation(vcpu);
> +		break;
> +#endif
> +
> +	case BOOK3S_INTERRUPT_HV_RM_HARD:
> +		vcpu->arch.trap = 0;
> +		r = RESUME_GUEST;
> +		if (!xive_enabled())
> +			kvmppc_xics_rm_complete(vcpu, 0);
> +		break;
> +	default:
> +		r = RESUME_HOST;
> +		break;
> +	}
> +
> +	return r;
> +}
> +
>  static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
>  					    struct kvm_sregs *sregs)
>  {
> @@ -3098,7 +3203,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
>  /*
>   * Load up hypervisor-mode registers on P9.
>   */
> -static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit)
> +static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
> +				     unsigned long lpcr)

I don't understand this change: you've added a parameter, but there's
no change to the body of the function, so you're not actually using
the new parameter.

[snip]
> +/*
> + * This never fails for a radix guest, as none of the operations it does
> + * for a radix guest can fail or have a way to report failure.
> + * kvmhv_run_single_vcpu() relies on this fact.
> + */
>  static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
>  {
>  	int r = 0;
> @@ -3684,12 +3814,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>  	return vcpu->arch.ret;
>  }
>  
> -static int kvmppc_run_single_vcpu(struct kvm_run *kvm_run,
> -				  struct kvm_vcpu *vcpu, u64 time_limit)
> +int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
> +			  struct kvm_vcpu *vcpu, u64 time_limit,
> +			  unsigned long lpcr)

IIRC this is the streamlined entry path introduced earlier in the
series, yes?  Making the name change where it was introduced would
avoid the extra churn here.

It'd be nice to have something here to make it more obvious that this
path is only for radix guests, but I'm not really sure how to
accomplish that.

>  {
>  	int trap, r, pcpu, pcpu0;
>  	int srcu_idx;
>  	struct kvmppc_vcore *vc;
> +	struct kvm_nested_guest *nested = vcpu->arch.nested;
> +	unsigned long lpid;
>  
>  	trace_kvmppc_run_vcpu_enter(vcpu);
>  
> @@ -3710,16 +3843,8 @@ static int kvmppc_run_single_vcpu(struct kvm_run *kvm_run,
>  	vc->runner = vcpu;
>  
>  	/* See if the MMU is ready to go */
> -	if (!vcpu->kvm->arch.mmu_ready) {
> -		r = kvmhv_setup_mmu(vcpu);
> -		if (r) {
> -			kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
> -			kvm_run->fail_entry.
> -				hardware_entry_failure_reason = 0;
> -			vcpu->arch.ret = r;
> -			goto out;
> -		}
> -	}
> +	if (!vcpu->kvm->arch.mmu_ready)
> +		kvmhv_setup_mmu(vcpu);
>  
>  	if (need_resched())
>  		cond_resched();
> @@ -3741,7 +3866,22 @@ static int kvmppc_run_single_vcpu(struct kvm_run *kvm_run,
>  	if (lazy_irq_pending() || need_resched() || !vcpu->kvm->arch.mmu_ready)
>  		goto out;
>  
> -	kvmppc_core_prepare_to_enter(vcpu);
> +	if (!nested) {
> +		kvmppc_core_prepare_to_enter(vcpu);
> +		if (vcpu->arch.doorbell_request) {
> +			vc->dpdes = 1;
> +			smp_wmb();
> +			vcpu->arch.doorbell_request = 0;
> +		}
> +		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
> +			     &vcpu->arch.pending_exceptions))
> +			lpcr |= LPCR_MER;
> +	} else if (vcpu->arch.pending_exceptions ||
> +		   vcpu->arch.doorbell_request ||
> +		   xive_interrupt_pending(vcpu)) {
> +		vcpu->arch.ret = RESUME_HOST;
> +		goto out;
> +	}
>  
-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/~dgibson
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: not available
URL: <http://lists.ozlabs.org/pipermail/linuxppc-dev/attachments/20181002/e90c49c6/attachment.sig>


More information about the Linuxppc-dev mailing list