[PATCH v8 3/8] kvmppc: Shared pages support for secure guests
Bharata B Rao
bharata at linux.ibm.com
Tue Sep 10 18:29:41 AEST 2019
A secure guest will share some of its pages with hypervisor (Eg. virtio
bounce buffers etc). Support sharing of pages between hypervisor and
ultravisor.
Once a secure page is converted to shared page, the device page is
unmapped from the HV side page tables.
Signed-off-by: Bharata B Rao <bharata at linux.ibm.com>
---
arch/powerpc/include/asm/hvcall.h | 3 ++
arch/powerpc/kvm/book3s_hv_uvmem.c | 65 ++++++++++++++++++++++++++++--
2 files changed, 65 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 2595d0144958..4e98dd992bd1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -342,6 +342,9 @@
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+/* Flags for H_SVM_PAGE_IN */
+#define H_PAGE_IN_SHARED 0x1
+
/* Platform-specific hcalls used by the Ultravisor */
#define H_SVM_PAGE_IN 0xEF00
#define H_SVM_PAGE_OUT 0xEF04
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index a1eccb065ba9..bcecb643a730 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -46,6 +46,7 @@ struct kvmppc_uvmem_page_pvt {
unsigned long *rmap;
unsigned int lpid;
unsigned long gpa;
+ bool skip_page_out;
};
/*
@@ -159,6 +160,53 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
return ret;
}
+/*
+ * Shares the page with HV, thus making it a normal page.
+ *
+ * - If the page is already secure, then provision a new page and share
+ * - If the page is a normal page, share the existing page
+ *
+ * In the former case, uses the dev_pagemap_ops migrate_to_ram handler
+ * to unmap the device page from QEMU's page tables.
+ */
+static unsigned long
+kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
+{
+
+ int ret = H_PARAMETER;
+ struct page *uvmem_page;
+ struct kvmppc_uvmem_page_pvt *pvt;
+ unsigned long pfn;
+ unsigned long *rmap;
+ struct kvm_memory_slot *slot;
+ unsigned long gfn = gpa >> page_shift;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot)
+ goto out;
+
+ rmap = &slot->arch.rmap[gfn - slot->base_gfn];
+ if (kvmppc_rmap_type(rmap) == KVMPPC_RMAP_UVMEM_PFN) {
+ uvmem_page = pfn_to_page(*rmap & ~KVMPPC_RMAP_UVMEM_PFN);
+ pvt = (struct kvmppc_uvmem_page_pvt *)
+ uvmem_page->zone_device_data;
+ pvt->skip_page_out = true;
+ }
+
+ pfn = gfn_to_pfn(kvm, gfn);
+ if (is_error_noslot_pfn(pfn))
+ goto out;
+
+ if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
+ ret = H_SUCCESS;
+ kvm_release_pfn_clean(pfn);
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return ret;
+}
+
/*
* H_SVM_PAGE_IN: Move page from normal memory to secure memory.
*/
@@ -177,9 +225,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
if (page_shift != PAGE_SHIFT)
return H_P3;
- if (flags)
+ if (flags & ~H_PAGE_IN_SHARED)
return H_P2;
+ if (flags & H_PAGE_IN_SHARED)
+ return kvmppc_share_page(kvm, gpa, page_shift);
+
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
down_read(&kvm->mm->mmap_sem);
@@ -252,8 +303,16 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
pvt = spage->zone_device_data;
pfn = page_to_pfn(dpage);
- ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
- page_shift);
+ /*
+ * This function is used in two cases:
+ * - When HV touches a secure page, for which we do UV_PAGE_OUT
+ * - When a secure page is converted to shared page, we touch
+ * the page to essentially unmap the device page. In this
+ * case we skip page-out.
+ */
+ if (!pvt->skip_page_out)
+ ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
+ page_shift);
if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
--
2.21.0
More information about the Linuxppc-dev
mailing list