[PATCH v3 1/2] powerpc: kvm: pair kvmppc_hv_find_lock_hpte with _unlock_hpte

Liu Ping Fan kernelfans at gmail.com
Fri Nov 8 18:29:53 EST 2013


Highlight the lock pair for the reader. (and later it will the
place to hide the detail about preemption disable)

Signed-off-by: Liu Ping Fan <pingfank at linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/kvm_book3s.h |  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c   |  7 ++-----
 arch/powerpc/kvm/book3s_hv_rm_mmu.c   | 13 ++++++++++---
 3 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f..a818932 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -129,6 +129,7 @@ extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
 			struct kvm_vcpu *vcpu, unsigned long addr,
 			unsigned long status);
+extern void kvmppc_hv_unlock_hpte(ulong *hptep, ulong *hpte_val);
 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
 			unsigned long slb_v, unsigned long valid);
 
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 842f081..97685e7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -479,12 +479,9 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 	if (index < 0)
 		return -ENOENT;
 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
-	v = hptep[0] & ~HPTE_V_HVLOCK;
+	v = hptep[0];
 	gr = kvm->arch.revmap[index].guest_rpte;
-
-	/* Unlock the HPTE */
-	asm volatile("lwsync" : : : "memory");
-	hptep[0] = v;
+	kvmppc_hv_unlock_hpte(hptep, &v);
 
 	gpte->eaddr = eaddr;
 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c51544..0ff9e91 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -749,6 +749,14 @@ static int slb_base_page_shift[4] = {
 	20,	/* 1M, unsupported */
 };
 
+void kvmppc_hv_unlock_hpte(unsigned long *hptep, unsigned long *hpte_val)
+{
+	*hpte_val = *hpte_val & ~HPTE_V_HVLOCK;
+	asm volatile("lwsync" : : : "memory");
+	*hptep = *hpte_val;
+}
+EXPORT_SYMBOL(kvmppc_hv_unlock_hpte);
+
 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 			      unsigned long valid)
 {
@@ -863,12 +871,11 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 		return 0;		/* for prot fault, HPTE disappeared */
 	}
 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
-	v = hpte[0] & ~HPTE_V_HVLOCK;
+	v = hpte[0];
 	r = hpte[1];
 	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
 	gr = rev->guest_rpte;
-
-	unlock_hpte(hpte, v);
+	kvmppc_hv_unlock_hpte(hpte, &v);
 
 	/* For not found, if the HPTE is valid by now, retry the instruction */
 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
-- 
1.8.1.4



More information about the Linuxppc-dev mailing list