[PATCH v5 2/7] kvmppc: Shared pages support for secure guests

janani janani at linux.ibm.com
Wed Jul 10 05:35:02 AEST 2019


On 2019-07-09 05:25, Bharata B Rao wrote:
> A secure guest will share some of its pages with hypervisor (Eg. virtio
> bounce buffers etc). Support shared pages in HMM driver.
> 
> Once a secure page is converted to shared page, HMM driver will stop
> tracking that page.
> 
> Signed-off-by: Bharata B Rao <bharata at linux.ibm.com>
  Reviewed-by: Janani Janakiraman <janani at linux.ibm.com>
> ---
>  arch/powerpc/include/asm/hvcall.h |  3 ++
>  arch/powerpc/kvm/book3s_hv_hmm.c  | 66 +++++++++++++++++++++++++++++--
>  2 files changed, 66 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/hvcall.h
> b/arch/powerpc/include/asm/hvcall.h
> index 2f6b952deb0f..05b8536f6653 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -337,6 +337,9 @@
>  #define H_TLB_INVALIDATE	0xF808
>  #define H_COPY_TOFROM_GUEST	0xF80C
> 
> +/* Flags for H_SVM_PAGE_IN */
> +#define H_PAGE_IN_SHARED        0x1
> +
>  /* Platform-specific hcalls used by the Ultravisor */
>  #define H_SVM_PAGE_IN		0xEF00
>  #define H_SVM_PAGE_OUT		0xEF04
> diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c 
> b/arch/powerpc/kvm/book3s_hv_hmm.c
> index cd34323888b6..36562b382e70 100644
> --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> @@ -52,6 +52,7 @@ struct kvmppc_hmm_page_pvt {
>  	unsigned long *rmap;
>  	unsigned int lpid;
>  	unsigned long gpa;
> +	bool skip_page_out;
>  };
> 
>  struct kvmppc_hmm_migrate_args {
> @@ -215,6 +216,53 @@ static const struct migrate_vma_ops
> kvmppc_hmm_migrate_ops = {
>  	.finalize_and_map = kvmppc_hmm_migrate_finalize_and_map,
>  };
> 
> +/*
> + * Shares the page with HV, thus making it a normal page.
> + *
> + * - If the page is already secure, then provision a new page and 
> share
> + * - If the page is a normal page, share the existing page
> + *
> + * In the former case, uses the HMM fault handler to release the HMM 
> page.
> + */
> +static unsigned long
> +kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long 
> page_shift)
> +{
> +
> +	int ret;
> +	struct page *hmm_page;
> +	struct kvmppc_hmm_page_pvt *pvt;
> +	unsigned long pfn;
> +	unsigned long *rmap;
> +	struct kvm_memory_slot *slot;
> +	unsigned long gfn = gpa >> page_shift;
> +	int srcu_idx;
> +
> +	srcu_idx = srcu_read_lock(&kvm->srcu);
> +	slot = gfn_to_memslot(kvm, gfn);
> +	if (!slot) {
> +		srcu_read_unlock(&kvm->srcu, srcu_idx);
> +		return H_PARAMETER;
> +	}
> +	rmap = &slot->arch.rmap[gfn - slot->base_gfn];
> +	srcu_read_unlock(&kvm->srcu, srcu_idx);
> +
> +	if (kvmppc_is_hmm_pfn(*rmap)) {
> +		hmm_page = pfn_to_page(*rmap & ~KVMPPC_PFN_HMM);
> +		pvt = (struct kvmppc_hmm_page_pvt *)
> +			hmm_devmem_page_get_drvdata(hmm_page);
> +		pvt->skip_page_out = true;
> +	}
> +
> +	pfn = gfn_to_pfn(kvm, gpa >> page_shift);
> +	if (is_error_noslot_pfn(pfn))
> +		return H_PARAMETER;
> +
> +	ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, 
> page_shift);
> +	kvm_release_pfn_clean(pfn);
> +
> +	return (ret == U_SUCCESS) ? H_SUCCESS : H_PARAMETER;
> +}
> +
>  /*
>   * Move page from normal memory to secure memory.
>   */
> @@ -235,9 +283,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned 
> long gpa,
>  	if (page_shift != PAGE_SHIFT)
>  		return H_P3;
> 
> -	if (flags)
> +	if (flags & ~H_PAGE_IN_SHARED)
>  		return H_P2;
> 
> +	if (flags & H_PAGE_IN_SHARED)
> +		return kvmppc_share_page(kvm, gpa, page_shift);
> +
>  	down_read(&kvm->mm->mmap_sem);
>  	srcu_idx = srcu_read_lock(&kvm->srcu);
>  	slot = gfn_to_memslot(kvm, gfn);
> @@ -299,8 +350,17 @@ kvmppc_hmm_fault_migrate_alloc_and_copy(struct
> vm_area_struct *vma,
>  	       hmm_devmem_page_get_drvdata(spage);
> 
>  	pfn = page_to_pfn(dpage);
> -	ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
> -			  pvt->gpa, 0, PAGE_SHIFT);
> +
> +	/*
> +	 * This same alloc_and_copy() callback is used in two cases:
> +	 * - When HV touches a secure page, for which we do page-out
> +	 * - When a secure page is converted to shared page, we touch
> +	 *   the page to essentially discard the HMM page. In this case we
> +	 *   skip page-out.
> +	 */
> +	if (!pvt->skip_page_out)
> +		ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
> +				  pvt->gpa, 0, PAGE_SHIFT);
>  	if (ret == U_SUCCESS)
>  		*dst_pfn = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
>  }


More information about the Linuxppc-dev mailing list