[PATCH v5 3/7] kvmppc: H_SVM_INIT_START and H_SVM_INIT_DONE hcalls

janani janani at linux.ibm.com
Wed Jul 10 05:42:10 AEST 2019


On 2019-07-09 05:25, Bharata B Rao wrote:
> H_SVM_INIT_START: Initiate securing a VM
> H_SVM_INIT_DONE: Conclude securing a VM
> 
> As part of H_SVM_INIT_START, register all existing memslots with
> the UV. H_SVM_INIT_DONE call by UV informs HV that transition of
> the guest to secure mode is complete.
> 
> These two states (transition to secure mode STARTED and transition
> to secure mode COMPLETED) are recorded in kvm->arch.secure_guest.
> Setting these states will cause the assembly code that enters the
> guest to call the UV_RETURN ucall instead of trying to enter the
> guest directly.
> 
> Signed-off-by: Bharata B Rao <bharata at linux.ibm.com>
> Acked-by: Paul Mackerras <paulus at ozlabs.org>
  Reviewed-by: Janani Janakiraman <janani at linux.ibm.com>
> ---
>  arch/powerpc/include/asm/hvcall.h         |  2 ++
>  arch/powerpc/include/asm/kvm_book3s_hmm.h | 12 ++++++++
>  arch/powerpc/include/asm/kvm_host.h       |  4 +++
>  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>  arch/powerpc/include/asm/ultravisor.h     |  9 ++++++
>  arch/powerpc/kvm/book3s_hv.c              |  7 +++++
>  arch/powerpc/kvm/book3s_hv_hmm.c          | 34 +++++++++++++++++++++++
>  7 files changed, 69 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/hvcall.h
> b/arch/powerpc/include/asm/hvcall.h
> index 05b8536f6653..fa7695928e30 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -343,6 +343,8 @@
>  /* Platform-specific hcalls used by the Ultravisor */
>  #define H_SVM_PAGE_IN		0xEF00
>  #define H_SVM_PAGE_OUT		0xEF04
> +#define H_SVM_INIT_START	0xEF08
> +#define H_SVM_INIT_DONE		0xEF0C
> 
>  /* Values for 2nd argument to H_SET_MODE */
>  #define H_SET_MODE_RESOURCE_SET_CIABR		1
> diff --git a/arch/powerpc/include/asm/kvm_book3s_hmm.h
> b/arch/powerpc/include/asm/kvm_book3s_hmm.h
> index 21f3de5f2acb..8c7aacabb2e0 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_hmm.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_hmm.h
> @@ -11,6 +11,8 @@ extern unsigned long kvmppc_h_svm_page_out(struct kvm 
> *kvm,
>  					  unsigned long gra,
>  					  unsigned long flags,
>  					  unsigned long page_shift);
> +extern unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
> +extern unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
>  #else
>  static inline unsigned long
>  kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
> @@ -25,5 +27,15 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long 
> gra,
>  {
>  	return H_UNSUPPORTED;
>  }
> +
> +static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
> +{
> +	return H_UNSUPPORTED;
> +}
> +
> +static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
> +{
> +	return H_UNSUPPORTED;
> +}
>  #endif /* CONFIG_PPC_UV */
>  #endif /* __POWERPC_KVM_PPC_HMM_H__ */
> diff --git a/arch/powerpc/include/asm/kvm_host.h
> b/arch/powerpc/include/asm/kvm_host.h
> index ac1a101beb07..0c49c3401c63 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -272,6 +272,10 @@ struct kvm_hpt_info {
> 
>  struct kvm_resize_hpt;
> 
> +/* Flag values for kvm_arch.secure_guest */
> +#define KVMPPC_SECURE_INIT_START	0x1 /* H_SVM_INIT_START has been 
> called */
> +#define KVMPPC_SECURE_INIT_DONE		0x2 /* H_SVM_INIT_DONE completed */
> +
>  struct kvm_arch {
>  	unsigned int lpid;
>  	unsigned int smt_mode;		/* # vcpus per virtual core */
> diff --git a/arch/powerpc/include/asm/ultravisor-api.h
> b/arch/powerpc/include/asm/ultravisor-api.h
> index f1c5800ac705..07b7d638e7af 100644
> --- a/arch/powerpc/include/asm/ultravisor-api.h
> +++ b/arch/powerpc/include/asm/ultravisor-api.h
> @@ -20,6 +20,7 @@
>  /* opcodes */
>  #define UV_WRITE_PATE			0xF104
>  #define UV_RETURN			0xF11C
> +#define UV_REGISTER_MEM_SLOT		0xF120
>  #define UV_PAGE_IN			0xF128
>  #define UV_PAGE_OUT			0xF12C
> 
> diff --git a/arch/powerpc/include/asm/ultravisor.h
> b/arch/powerpc/include/asm/ultravisor.h
> index 16f8e0e8ec3f..b46042f1aa8f 100644
> --- a/arch/powerpc/include/asm/ultravisor.h
> +++ b/arch/powerpc/include/asm/ultravisor.h
> @@ -61,6 +61,15 @@ static inline int uv_page_out(u64 lpid, u64 dst_ra,
> u64 src_gpa, u64 flags,
>  	return ucall(UV_PAGE_OUT, retbuf, lpid, dst_ra, src_gpa, flags,
>  		     page_shift);
>  }
> +
> +static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 
> size,
> +				       u64 flags, u64 slotid)
> +{
> +	unsigned long retbuf[UCALL_BUFSIZE];
> +
> +	return ucall(UV_REGISTER_MEM_SLOT, retbuf, lpid, start_gpa,
> +		     size, flags, slotid);
> +}
>  #endif /* !__ASSEMBLY__ */
> 
>  #endif	/* _ASM_POWERPC_ULTRAVISOR_H */
> diff --git a/arch/powerpc/kvm/book3s_hv.c 
> b/arch/powerpc/kvm/book3s_hv.c
> index 8ee66aa0da58..b8f801d00ad4 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -1097,6 +1097,13 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu 
> *vcpu)
>  					    kvmppc_get_gpr(vcpu, 5),
>  					    kvmppc_get_gpr(vcpu, 6));
>  		break;
> +	case H_SVM_INIT_START:
> +		ret = kvmppc_h_svm_init_start(vcpu->kvm);
> +		break;
> +	case H_SVM_INIT_DONE:
> +		ret = kvmppc_h_svm_init_done(vcpu->kvm);
> +		break;
> +
>  	default:
>  		return RESUME_HOST;
>  	}
> diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c 
> b/arch/powerpc/kvm/book3s_hv_hmm.c
> index 36562b382e70..55bab9c4e60a 100644
> --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> @@ -62,6 +62,40 @@ struct kvmppc_hmm_migrate_args {
>  	unsigned long page_shift;
>  };
> 
> +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
> +{
> +	struct kvm_memslots *slots;
> +	struct kvm_memory_slot *memslot;
> +	int ret = H_SUCCESS;
> +	int srcu_idx;
> +
> +	srcu_idx = srcu_read_lock(&kvm->srcu);
> +	slots = kvm_memslots(kvm);
> +	kvm_for_each_memslot(memslot, slots) {
> +		ret = uv_register_mem_slot(kvm->arch.lpid,
> +					   memslot->base_gfn << PAGE_SHIFT,
> +					   memslot->npages * PAGE_SIZE,
> +					   0, memslot->id);
> +		if (ret < 0) {
> +			ret = H_PARAMETER;
> +			goto out;
> +		}
> +	}
> +	kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
> +out:
> +	srcu_read_unlock(&kvm->srcu, srcu_idx);
> +	return ret;
> +}
> +
> +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
> +{
> +	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
> +		return H_UNSUPPORTED;
> +
> +	kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
> +	return H_SUCCESS;
> +}
> +
>  /*
>   * Bits 60:56 in the rmap entry will be used to identify the
>   * different uses/functions of rmap. This definition with move



More information about the Linuxppc-dev mailing list