[PATCH v2 03/11] mm/migrate_device: Make migrate_device_{pfns,range}() take mpfns
Jordan Niethe
jniethe at nvidia.com
Wed Jan 7 20:18:15 AEDT 2026
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn.
This causes an issue for migrate_device_{pfns,range}() which take pfn
parameters because depending on of the device is MEMORY_DEVICE_PRIVATE
or MEMORY_DEVICE_COHERENT will effect how that parameter should be
interpreted.
A MIGRATE_PFN flag will be introduced that distinguishes between mpfns
that contain a pfn vs an offset into device private memory, we will take
advantage of that here.
Update migrate_device_{pfns,range}() to take a mpfn instead of pfn.
Update the users of migrate_device_{pfns,range}() to pass in an mpfn.
To support this change, update the
dpagemap_devmem_ops::populate_devmem_pfn() to instead return mpfns and
rename accordingly.
Signed-off-by: Jordan Niethe <jniethe at nvidia.com>
---
v2: New to series
---
drivers/gpu/drm/drm_pagemap.c | 9 +++---
drivers/gpu/drm/nouveau/nouveau_dmem.c | 5 +--
drivers/gpu/drm/xe/xe_svm.c | 9 +++---
include/drm/drm_pagemap.h | 8 ++---
lib/test_hmm.c | 2 +-
mm/migrate_device.c | 45 ++++++++++++++------------
6 files changed, 41 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 5ddf395847ef..e4c73a9ce68b 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -337,7 +337,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
mmap_assert_locked(mm);
- if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ if (!ops->populate_devmem_mpfn || !ops->copy_to_devmem ||
!ops->copy_to_ram)
return -EOPNOTSUPP;
@@ -390,7 +390,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_finalize;
}
- err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ err = ops->populate_devmem_mpfn(devmem_allocation, npages, migrate.dst);
if (err)
goto err_finalize;
@@ -401,10 +401,9 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_finalize;
for (i = 0; i < npages; ++i) {
- struct page *page = pfn_to_page(migrate.dst[i]);
+ struct page *page = migrate_pfn_to_page(migrate.dst[i]);
pages[i] = page;
- migrate.dst[i] = migrate_pfn(migrate.dst[i]);
drm_pagemap_get_devmem_page(page, zdd);
}
@@ -575,7 +574,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
pagemap_addr = buf + (2 * sizeof(*src) * npages);
pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
- err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ err = ops->populate_devmem_mpfn(devmem_allocation, npages, src);
if (err)
goto err_free;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a7edcdca9701..bd3f7102c3f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -483,8 +483,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
dma_info = kvcalloc(npages, sizeof(*dma_info), GFP_KERNEL | __GFP_NOFAIL);
- migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
- npages);
+ migrate_device_range(src_pfns,
+ migrate_pfn(chunk->pagemap.range.start >> PAGE_SHIFT),
+ npages);
for (i = 0; i < npages; i++) {
if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 55c5a0eb82e1..260676b0d246 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -5,6 +5,7 @@
#include <drm/drm_drv.h>
+#include <linux/migrate.h>
#include "xe_bo.h"
#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
@@ -681,8 +682,8 @@ static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
return &vram->ttm.mm;
}
-static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn)
+static int xe_svm_populate_devmem_mpfn(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct ttm_resource *res = bo->ttm.resource;
@@ -697,7 +698,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
int i;
for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
- pfn[j++] = block_pfn + i;
+ pfn[j++] = migrate_pfn(block_pfn + i);
}
return 0;
@@ -705,7 +706,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
.devmem_release = xe_svm_devmem_release,
- .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
+ .populate_devmem_mpfn = xe_svm_populate_devmem_mpfn,
.copy_to_devmem = xe_svm_copy_to_devmem,
.copy_to_ram = xe_svm_copy_to_ram,
};
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index f6e7e234c089..0d1d083b778a 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -157,17 +157,17 @@ struct drm_pagemap_devmem_ops {
void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
/**
- * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @populate_devmem_mpfn: Populate device memory PFN (required for migration)
* @devmem_allocation: device memory allocation
* @npages: Number of pages to populate
- * @pfn: Array of page frame numbers to populate
+ * @mpfn: Array of migrate page frame numbers to populate
*
* Populate device memory page frame numbers (PFN).
*
* Return: 0 on success, a negative error code on failure.
*/
- int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn);
+ int (*populate_devmem_mpfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
/**
* @copy_to_devmem: Copy to device memory (required for migration)
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 7e5248404d00..a6ff292596f3 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1389,7 +1389,7 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
- migrate_device_range(src_pfns, start_pfn, npages);
+ migrate_device_range(src_pfns, migrate_pfn(start_pfn), npages);
for (i = 0; i < npages; i++) {
struct page *dpage, *spage;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 1a2067f830da..a2baaa2a81f9 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -1354,11 +1354,11 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
}
EXPORT_SYMBOL(migrate_vma_finalize);
-static unsigned long migrate_device_pfn_lock(unsigned long pfn)
+static unsigned long migrate_device_pfn_lock(unsigned long mpfn)
{
struct folio *folio;
- folio = folio_get_nontail_page(pfn_to_page(pfn));
+ folio = folio_get_nontail_page(migrate_pfn_to_page(mpfn));
if (!folio)
return 0;
@@ -1367,13 +1367,14 @@ static unsigned long migrate_device_pfn_lock(unsigned long pfn)
return 0;
}
- return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
+ return mpfn | MIGRATE_PFN_MIGRATE;
}
/**
* migrate_device_range() - migrate device private pfns to normal memory.
- * @src_pfns: array large enough to hold migrating source device private pfns.
- * @start: starting pfn in the range to migrate.
+ * @src_mpfns: array large enough to hold migrating source device private
+ * migrate pfns.
+ * @start: starting migrate pfn in the range to migrate.
* @npages: number of pages to migrate.
*
* migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
@@ -1389,28 +1390,29 @@ static unsigned long migrate_device_pfn_lock(unsigned long pfn)
* allocate destination pages and start copying data from the device to CPU
* memory before calling migrate_device_pages().
*/
-int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+int migrate_device_range(unsigned long *src_mpfns, unsigned long start,
unsigned long npages)
{
- unsigned long i, j, pfn;
+ unsigned long i, j, mpfn;
- for (pfn = start, i = 0; i < npages; pfn++, i++) {
- struct page *page = pfn_to_page(pfn);
+ for (mpfn = start, i = 0; i < npages; i++) {
+ struct page *page = migrate_pfn_to_page(mpfn);
struct folio *folio = page_folio(page);
unsigned int nr = 1;
- src_pfns[i] = migrate_device_pfn_lock(pfn);
+ src_mpfns[i] = migrate_device_pfn_lock(mpfn);
nr = folio_nr_pages(folio);
if (nr > 1) {
- src_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ src_mpfns[i] |= MIGRATE_PFN_COMPOUND;
for (j = 1; j < nr; j++)
- src_pfns[i+j] = 0;
+ src_mpfns[i+j] = 0;
i += j - 1;
- pfn += j - 1;
+ mpfn += (j - 1) << MIGRATE_PFN_SHIFT;
}
+ mpfn += 1 << MIGRATE_PFN_SHIFT;
}
- migrate_device_unmap(src_pfns, npages, NULL);
+ migrate_device_unmap(src_mpfns, npages, NULL);
return 0;
}
@@ -1418,32 +1420,33 @@ EXPORT_SYMBOL(migrate_device_range);
/**
* migrate_device_pfns() - migrate device private pfns to normal memory.
- * @src_pfns: pre-popluated array of source device private pfns to migrate.
+ * @src_mpfns: pre-popluated array of source device private migrate pfns to
+ * migrate.
* @npages: number of pages to migrate.
*
* Similar to migrate_device_range() but supports non-contiguous pre-popluated
* array of device pages to migrate.
*/
-int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)
+int migrate_device_pfns(unsigned long *src_mpfns, unsigned long npages)
{
unsigned long i, j;
for (i = 0; i < npages; i++) {
- struct page *page = pfn_to_page(src_pfns[i]);
+ struct page *page = migrate_pfn_to_page(src_mpfns[i]);
struct folio *folio = page_folio(page);
unsigned int nr = 1;
- src_pfns[i] = migrate_device_pfn_lock(src_pfns[i]);
+ src_mpfns[i] = migrate_device_pfn_lock(src_mpfns[i]);
nr = folio_nr_pages(folio);
if (nr > 1) {
- src_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ src_mpfns[i] |= MIGRATE_PFN_COMPOUND;
for (j = 1; j < nr; j++)
- src_pfns[i+j] = 0;
+ src_mpfns[i+j] = 0;
i += j - 1;
}
}
- migrate_device_unmap(src_pfns, npages, NULL);
+ migrate_device_unmap(src_mpfns, npages, NULL);
return 0;
}
--
2.34.1
More information about the Linuxppc-dev
mailing list