[PATCH v7 2/8] KVM: Introduce __kvm_follow_pfn function
Sean Christopherson
seanjc at google.com
Sat Aug 5 08:13:28 AEST 2023
On Tue, Jul 04, 2023, David Stevens wrote:
> From: David Stevens <stevensd at chromium.org>
>
> Introduce __kvm_follow_pfn, which will replace __gfn_to_pfn_memslot.
> __kvm_follow_pfn refactors the old API's arguments into a struct and,
> where possible, combines the boolean arguments into a single flags
> argument.
>
> Signed-off-by: David Stevens <stevensd at chromium.org>
> ---
> include/linux/kvm_host.h | 16 ++++
> virt/kvm/kvm_main.c | 171 ++++++++++++++++++++++-----------------
> virt/kvm/kvm_mm.h | 3 +-
> virt/kvm/pfncache.c | 8 +-
> 4 files changed, 122 insertions(+), 76 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 9d3ac7720da9..ef2763c2b12e 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -97,6 +97,7 @@
> #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
> #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
> #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
> +#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
Hmm, ideally KVM_PFN_ERR_NEEDS_IO would be introduced in a separate prep patch,
e.g. by changing "bool *async" to "bool no_wait". At a glance, I can't tell if
that's feasible though, so consider it more of a "wish" than a request.
> @@ -2572,23 +2561,23 @@ static int kvm_try_get_pfn(kvm_pfn_t pfn)
> return get_page_unless_zero(page);
> }
>
> -static int hva_to_pfn_remapped(struct vm_area_struct *vma,
> - unsigned long addr, bool write_fault,
> - bool *writable, kvm_pfn_t *p_pfn)
> +static int hva_to_pfn_remapped(struct vm_area_struct *vma, struct kvm_follow_pfn *foll,
> + kvm_pfn_t *p_pfn)
Please wrap. KVM still honors the 80 char soft limit unless there's a reason not
to, and in this case it's already wrapping
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
struct kvm_follow_pfn *foll, kvm_pfn_t *p_pfn)
> @@ -2606,8 +2595,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
> goto out;
> }
>
> - if (writable)
> - *writable = pte_write(*ptep);
> + foll->writable = pte_write(*ptep) && foll->allow_write_mapping;
Similar to feedback in my other response, don't condition this on try_map_writable,
i.e. just do:
foll->writable = pte_write(...);
> pfn = pte_pfn(*ptep);
>
> /*
> @@ -2652,24 +2640,22 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
> * 2): @write_fault = false && @writable, @writable will tell the caller
> * whether the mapping is writable.
> */
> -kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
> - bool *async, bool write_fault, bool *writable)
> +kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *foll)
> {
> struct vm_area_struct *vma;
> kvm_pfn_t pfn;
> int npages, r;
>
> /* we can do it either atomically or asynchronously, not both */
> - BUG_ON(atomic && async);
> + BUG_ON(foll->atomic && (foll->flags & FOLL_NOWAIT));
>
> - if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
> + if (hva_to_pfn_fast(foll, &pfn))
> return pfn;
>
> - if (atomic)
> + if (foll->atomic)
> return KVM_PFN_ERR_FAULT;
>
> - npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
> - writable, &pfn);
> + npages = hva_to_pfn_slow(foll, &pfn);
> if (npages == 1)
> return pfn;
> if (npages == -EINTR)
> @@ -2677,83 +2663,122 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
>
> mmap_read_lock(current->mm);
> if (npages == -EHWPOISON ||
> - (!async && check_user_page_hwpoison(addr))) {
> + (!(foll->flags & FOLL_NOWAIT) && check_user_page_hwpoison(foll->hva))) {
Opportunistically align the indentation, as an added bonus that makes the line
length a few chars shorter, i.e.
if (npages == -EHWPOISON ||
(!(foll->flags & FOLL_NOWAIT) && check_user_page_hwpoison(foll->hva))) {
pfn = KVM_PFN_ERR_HWPOISON;
goto exit;
}
More information about the Linuxppc-dev
mailing list