[PATCH v2 10/13] mm/gup: Handle huge pud for follow_pud_mask()

Jason Gunthorpe jgg at nvidia.com
Tue Jan 16 05:49:00 AEDT 2024


On Wed, Jan 03, 2024 at 05:14:20PM +0800, peterx at redhat.com wrote:
> diff --git a/mm/gup.c b/mm/gup.c
> index 63845b3ec44f..760406180222 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -525,6 +525,70 @@ static struct page *no_page_table(struct vm_area_struct *vma,
>  	return NULL;
>  }
>  
> +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
> +static struct page *follow_huge_pud(struct vm_area_struct *vma,
> +				    unsigned long addr, pud_t *pudp,
> +				    int flags, struct follow_page_context *ctx)
> +{
> +	struct mm_struct *mm = vma->vm_mm;
> +	struct page *page;
> +	pud_t pud = *pudp;
> +	unsigned long pfn = pud_pfn(pud);
> +	int ret;
> +
> +	assert_spin_locked(pud_lockptr(mm, pudp));
> +
> +	if ((flags & FOLL_WRITE) && !pud_write(pud))
> +		return NULL;
> +
> +	if (!pud_present(pud))
> +		return NULL;
> +
> +	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
> +
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> +	if (pud_devmap(pud)) {

Can this use IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) ?

> +		/*
> +		 * device mapped pages can only be returned if the caller
> +		 * will manage the page reference count.
> +		 *
> +		 * At least one of FOLL_GET | FOLL_PIN must be set, so
> +		 * assert that here:
> +		 */
> +		if (!(flags & (FOLL_GET | FOLL_PIN)))
> +			return ERR_PTR(-EEXIST);
> +
> +		if (flags & FOLL_TOUCH)
> +			touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
> +
> +		ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
> +		if (!ctx->pgmap)
> +			return ERR_PTR(-EFAULT);
> +	}
> +#endif	/* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
> +	page = pfn_to_page(pfn);
> +
> +	if (!pud_devmap(pud) && !pud_write(pud) &&
> +	    gup_must_unshare(vma, flags, page))
> +		return ERR_PTR(-EMLINK);
> +
> +	ret = try_grab_page(page, flags);
> +	if (ret)
> +		page = ERR_PTR(ret);
> +	else
> +		ctx->page_mask = HPAGE_PUD_NR - 1;
> +
> +	return page;
> +}
> +#else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
> +static struct page *follow_huge_pud(struct vm_area_struct *vma,
> +				    unsigned long addr, pud_t *pudp,
> +				    int flags, struct follow_page_context *ctx)
> +{
> +	return NULL;
> +}
> +#endif	/* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
> +
>  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
>  		pte_t *pte, unsigned int flags)
>  {
> @@ -760,11 +824,11 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
>  
>  	pudp = pud_offset(p4dp, address);
>  	pud = READ_ONCE(*pudp);
> -	if (pud_none(pud))
> +	if (pud_none(pud) || !pud_present(pud))
>  		return no_page_table(vma, flags, address);

Isn't 'pud_none() || !pud_present()' redundent? A none pud is
non-present, by definition?

> -	if (pud_devmap(pud)) {
> +	if (pud_huge(pud)) {
>  		ptl = pud_lock(mm, pudp);
> -		page = follow_devmap_pud(vma, address, pudp, flags, &ctx->pgmap);
> +		page = follow_huge_pud(vma, address, pudp, flags, ctx);
>  		spin_unlock(ptl);
>  		if (page)
>  			return page;

Otherwise it looks OK to me

Reviewed-by: Jason Gunthorpe <jgg at nvidia.com>

Jason


More information about the Linuxppc-dev mailing list