[PATCH 4/4] KVM: PPC: Add hugepage support for IOMMU in-kernel handling
Benjamin Herrenschmidt
benh at kernel.crashing.org
Sun Jun 16 14:46:42 EST 2013
On Wed, 2013-06-05 at 16:11 +1000, Alexey Kardashevskiy wrote:
> @@ -185,7 +186,31 @@ static unsigned long kvmppc_realmode_gpa_to_hpa(struct kvm_vcpu *vcpu,
> unsigned long hva, hpa, pg_size = 0, offset;
> unsigned long gfn = gpa >> PAGE_SHIFT;
> bool writing = gpa & TCE_PCI_WRITE;
> + struct kvmppc_iommu_hugepage *hp;
>
> + /*
> + * Try to find an already used hugepage.
> + * If it is not there, the kvmppc_lookup_pte() will return zero
> + * as it won't do get_page() on a huge page in real mode
> + * and therefore the request will be passed to the virtual mode.
> + */
> + if (tt) {
> + spin_lock(&tt->hugepages_lock);
> + list_for_each_entry(hp, &tt->hugepages, list) {
> + if ((gpa < hp->gpa) || (gpa >= hp->gpa + hp->size))
> + continue;
> +
> + /* Calculate host phys address keeping flags and offset in the page */
> + offset = gpa & (hp->size - 1);
> +
> + /* pte_pfn(pte) should return an address aligned to pg_size */
> + hpa = (pte_pfn(hp->pte) << PAGE_SHIFT) + offset;
> + spin_unlock(&tt->hugepages_lock);
> +
> + return hpa;
> + }
> + spin_unlock(&tt->hugepages_lock);
> + }
Wow .... this is run in real mode right ?
spin_lock() and spin_unlock() are a big no-no in real mode. If lockdep
and/or spinlock debugging are enabled and something goes pear-shaped
they are going to bring your whole system down in a blink in quite
horrible ways.
If you are going to do that, you need some kind of custom low-level
lock.
Also, I see that you are basically using a non-ordered list and doing a
linear search in it every time. That's going to COST !
You should really consider a more efficient data structure. You should
also be able to do something that doesn't require locks for readers.
> /* Find a KVM memslot */
> memslot = search_memslots(kvm_memslots(vcpu->kvm), gfn);
> if (!memslot)
> @@ -237,6 +262,10 @@ static long kvmppc_clear_tce_real_mode(struct kvm_vcpu *vcpu,
> if (oldtce & TCE_PCI_WRITE)
> SetPageDirty(page);
>
> + /* Do not put a huge page and continue without error */
> + if (PageCompound(page))
> + continue;
> +
> if (realmode_put_page(page)) {
> ret = H_TOO_HARD;
> break;
> @@ -282,7 +311,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> if (iommu_tce_put_param_check(tbl, ioba, tce))
> return H_PARAMETER;
>
> - hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tce, true);
> + hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt, tce, true);
> if (hpa == ERROR_ADDR) {
> vcpu->arch.tce_reason = H_TOO_HARD;
> return H_TOO_HARD;
> @@ -295,6 +324,11 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> if (unlikely(ret)) {
> struct page *pg = realmode_pfn_to_page(hpa);
> BUG_ON(!pg);
> +
> + /* Do not put a huge page and return an error */
> + if (!PageCompound(pg))
> + return H_HARDWARE;
> +
> if (realmode_put_page(pg)) {
> vcpu->arch.tce_reason = H_HARDWARE;
> return H_TOO_HARD;
> @@ -351,7 +385,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> vcpu->arch.tce_tmp_num = 0;
> vcpu->arch.tce_reason = 0;
>
> - tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu,
> + tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu, NULL,
> tce_list, false);
> if ((unsigned long)tces == ERROR_ADDR)
> return H_TOO_HARD;
> @@ -374,7 +408,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>
> /* Translate TCEs and go get_page */
> for (i = 0; i < npages; ++i) {
> - unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu,
> + unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt,
> vcpu->arch.tce_tmp[i], true);
> if (hpa == ERROR_ADDR) {
> vcpu->arch.tce_tmp_num = i;
Cheers,
Ben.
More information about the Linuxppc-dev
mailing list