[PATCH v1 07/29] mm/migrate: rename isolate_movable_page() to isolate_movable_ops_page()

Lorenzo Stoakes lorenzo.stoakes at oracle.com
Tue Jul 1 02:24:44 AEST 2025


On Mon, Jun 30, 2025 at 02:59:48PM +0200, David Hildenbrand wrote:
> ... and start moving back to per-page things that will absolutely not be
> folio things in the future. Add documentation and a comment that the
> remaining folio stuff (lock, refcount) will have to be reworked as well.
>
> While at it, convert the VM_BUG_ON() into a WARN_ON_ONCE() and handle
> it gracefully (relevant with further changes), and convert a
> WARN_ON_ONCE() into a VM_WARN_ON_ONCE_PAGE().
>
> Note that we will leave anything that needs a rework (lock, refcount,
> ->lru) to be using folios for now: that perfectly highlights the
> problematic bits.
>
> Reviewed-by: Zi Yan <ziy at nvidia.com>
> Reviewed-by: Harry Yoo <harry.yoo at oracle.com>
> Signed-off-by: David Hildenbrand <david at redhat.com>

Seesm reasonable to me so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes at oracle.com>

> ---
>  include/linux/migrate.h |  4 ++--
>  mm/compaction.c         |  2 +-
>  mm/migrate.c            | 39 +++++++++++++++++++++++++++++----------
>  3 files changed, 32 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index aaa2114498d6d..c0ec7422837bd 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>  		  unsigned long private, enum migrate_mode mode, int reason,
>  		  unsigned int *ret_succeeded);
>  struct folio *alloc_migration_target(struct folio *src, unsigned long private);
> -bool isolate_movable_page(struct page *page, isolate_mode_t mode);
> +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
>  bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
>
>  int migrate_huge_page_move_mapping(struct address_space *mapping,
> @@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
>  static inline struct folio *alloc_migration_target(struct folio *src,
>  		unsigned long private)
>  	{ return NULL; }
> -static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
>  	{ return false; }
>  static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
>  	{ return false; }
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3925cb61dbb8f..17455c5a4be05 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -1093,7 +1093,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
>  					locked = NULL;
>  				}
>
> -				if (isolate_movable_page(page, mode)) {
> +				if (isolate_movable_ops_page(page, mode)) {
>  					folio = page_folio(page);
>  					goto isolate_success;
>  				}
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 767f503f08758..d4b4a7eefb6bd 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -51,8 +51,26 @@
>  #include "internal.h"
>  #include "swap.h"
>
> -bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +/**
> + * isolate_movable_ops_page - isolate a movable_ops page for migration
> + * @page: The page.
> + * @mode: The isolation mode.
> + *
> + * Try to isolate a movable_ops page for migration. Will fail if the page is
> + * not a movable_ops page, if the page is already isolated for migration
> + * or if the page was just was released by its owner.
> + *
> + * Once isolated, the page cannot get freed until it is either putback
> + * or migrated.
> + *
> + * Returns true if isolation succeeded, otherwise false.
> + */
> +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
>  {
> +	/*
> +	 * TODO: these pages will not be folios in the future. All
> +	 * folio dependencies will have to be removed.
> +	 */
>  	struct folio *folio = folio_get_nontail_page(page);
>  	const struct movable_operations *mops;
>
> @@ -73,7 +91,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	 * we use non-atomic bitops on newly allocated page flags so
>  	 * unconditionally grabbing the lock ruins page's owner side.
>  	 */
> -	if (unlikely(!__folio_test_movable(folio)))
> +	if (unlikely(!__PageMovable(page)))
>  		goto out_putfolio;
>
>  	/*
> @@ -90,18 +108,19 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	if (unlikely(!folio_trylock(folio)))
>  		goto out_putfolio;
>
> -	if (!folio_test_movable(folio) || folio_test_isolated(folio))
> +	if (!PageMovable(page) || PageIsolated(page))

I wonder, in the wonderful future where PageXXX() always refers to a page, can
we use something less horrible than these macros?

>  		goto out_no_isolated;
>
> -	mops = folio_movable_ops(folio);
> -	VM_BUG_ON_FOLIO(!mops, folio);
> +	mops = page_movable_ops(page);
> +	if (WARN_ON_ONCE(!mops))
> +		goto out_no_isolated;
>
> -	if (!mops->isolate_page(&folio->page, mode))
> +	if (!mops->isolate_page(page, mode))
>  		goto out_no_isolated;
>
>  	/* Driver shouldn't use the isolated flag */
> -	WARN_ON_ONCE(folio_test_isolated(folio));
> -	folio_set_isolated(folio);
> +	VM_WARN_ON_ONCE_PAGE(PageIsolated(page), page);
> +	SetPageIsolated(page);
>  	folio_unlock(folio);
>
>  	return true;
> @@ -175,8 +194,8 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
>  	if (lru)
>  		isolated = folio_isolate_lru(folio);
>  	else
> -		isolated = isolate_movable_page(&folio->page,
> -						ISOLATE_UNEVICTABLE);
> +		isolated = isolate_movable_ops_page(&folio->page,
> +						    ISOLATE_UNEVICTABLE);
>
>  	if (!isolated)
>  		return false;
> --
> 2.49.0
>


More information about the Linuxppc-dev mailing list