[PATCH v1 16/29] mm: rename __PageMovable() to page_has_movable_ops()
Lorenzo Stoakes
lorenzo.stoakes at oracle.com
Tue Jul 1 20:59:29 AEST 2025
On Mon, Jun 30, 2025 at 02:59:57PM +0200, David Hildenbrand wrote:
> Let's make it clearer that we are talking about movable_ops pages.
>
> While at it, convert a VM_BUG_ON to a VM_WARN_ON_ONCE_PAGE.
<3
>
> Reviewed-by: Zi Yan <ziy at nvidia.com>
> Signed-off-by: David Hildenbrand <david at redhat.com>
Great, love it.
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes at oracle.com>
I noticed that the Simplified Chinese documentation has references for this, but
again we have to defer to somebody fluent in this of course!
but also in mm/memory_hotplug.c in scan_movable_pages():
/*
* PageOffline() pages that are not marked __PageMovable() and
Trivial one but might be worth fixing that up also?
> ---
> include/linux/migrate.h | 2 +-
> include/linux/page-flags.h | 2 +-
> mm/compaction.c | 7 ++-----
> mm/memory-failure.c | 4 ++--
> mm/memory_hotplug.c | 8 +++-----
> mm/migrate.c | 8 ++++----
> mm/page_alloc.c | 2 +-
> mm/page_isolation.c | 10 +++++-----
> 8 files changed, 19 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 25659a685e2aa..e04035f70e36f 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -115,7 +115,7 @@ static inline void __SetPageMovable(struct page *page,
> static inline
> const struct movable_operations *page_movable_ops(struct page *page)
> {
> - VM_BUG_ON(!__PageMovable(page));
> + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
>
> return (const struct movable_operations *)
> ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 4fe5ee67535b2..c67163b73c5ec 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -750,7 +750,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
> PAGE_MAPPING_MOVABLE;
> }
>
> -static __always_inline bool __PageMovable(const struct page *page)
> +static __always_inline bool page_has_movable_ops(const struct page *page)
> {
> return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
> PAGE_MAPPING_MOVABLE;
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 5c37373017014..41fd6a1fe9a33 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -1056,11 +1056,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
> * Skip any other type of page
> */
> if (!PageLRU(page)) {
> - /*
> - * __PageMovable can return false positive so we need
> - * to verify it under page_lock.
> - */
> - if (unlikely(__PageMovable(page)) &&
> + /* Isolation code will deal with any races. */
> + if (unlikely(page_has_movable_ops(page)) &&
> !PageIsolated(page)) {
> if (locked) {
> unlock_page_lruvec_irqrestore(locked, flags);
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index b91a33fb6c694..9e2cff1999347 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1388,8 +1388,8 @@ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
> if (PageSlab(page))
> return false;
>
> - /* Soft offline could migrate non-LRU movable pages */
> - if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
> + /* Soft offline could migrate movable_ops pages */
> + if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
> return true;
>
> return PageLRU(page) || is_free_buddy_page(page);
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 62d45752f9f44..5fad126949d08 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1739,8 +1739,8 @@ bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
>
> #ifdef CONFIG_MEMORY_HOTREMOVE
> /*
> - * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
> - * non-lru movable pages and hugepages). Will skip over most unmovable
> + * Scan pfn range [start,end) to find movable/migratable pages (LRU and
> + * hugetlb folio, movable_ops pages). Will skip over most unmovable
> * pages (esp., pages that can be skipped when offlining), but bail out on
> * definitely unmovable pages.
> *
> @@ -1759,9 +1759,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
> struct folio *folio;
>
> page = pfn_to_page(pfn);
> - if (PageLRU(page))
> - goto found;
> - if (__PageMovable(page))
> + if (PageLRU(page) || page_has_movable_ops(page))
> goto found;
>
> /*
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 040484230aebc..587af35b7390d 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -94,7 +94,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
> * Note that once a page has movable_ops, it will stay that way
> * until the page was freed.
> */
> - if (unlikely(!__PageMovable(page)))
> + if (unlikely(!page_has_movable_ops(page)))
> goto out_putfolio;
>
> /*
> @@ -111,7 +111,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
> if (unlikely(!folio_trylock(folio)))
> goto out_putfolio;
>
> - VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
> + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
> if (PageIsolated(page))
> goto out_no_isolated;
>
> @@ -153,7 +153,7 @@ static void putback_movable_ops_page(struct page *page)
> */
> struct folio *folio = page_folio(page);
>
> - VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
> + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
> VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page);
> folio_lock(folio);
> page_movable_ops(page)->putback_page(page);
> @@ -192,7 +192,7 @@ static int migrate_movable_ops_page(struct page *dst, struct page *src,
> {
> int rc = MIGRATEPAGE_SUCCESS;
>
> - VM_WARN_ON_ONCE_PAGE(!__PageMovable(src), src);
> + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
> VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
> rc = page_movable_ops(src)->migrate_page(dst, src, mode);
> if (rc == MIGRATEPAGE_SUCCESS)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 44e56d31cfeb1..a134b9fa9520e 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2005,7 +2005,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
> * migration are movable. But we don't actually try
> * isolating, as that would be expensive.
> */
> - if (PageLRU(page) || __PageMovable(page))
> + if (PageLRU(page) || page_has_movable_ops(page))
> (*num_movable)++;
> pfn++;
> }
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index ece3bfc56bcd5..b97b965b3ed01 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -21,9 +21,9 @@
> * consequently belong to a single zone.
> *
> * PageLRU check without isolation or lru_lock could race so that
> - * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
> - * check without lock_page also may miss some movable non-lru pages at
> - * race condition. So you can't expect this function should be exact.
> + * MIGRATE_MOVABLE block might include unmovable pages. Similarly, pages
> + * with movable_ops can only be identified some time after they were
> + * allocated. So you can't expect this function should be exact.
> *
> * Returns a page without holding a reference. If the caller wants to
> * dereference that page (e.g., dumping), it has to make sure that it
> @@ -133,7 +133,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
> if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
> continue;
>
> - if (__PageMovable(page) || PageLRU(page))
> + if (PageLRU(page) || page_has_movable_ops(page))
> continue;
>
> /*
> @@ -421,7 +421,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn,
> * proper free and split handling for them.
> */
> VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
> - VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
> + VM_WARN_ON_ONCE_PAGE(page_has_movable_ops(page), page);
>
> goto failed;
> }
> --
> 2.49.0
>
More information about the Linuxppc-dev
mailing list