[PATCH v2 4/5] mm/vmalloc: Enable mapping of huge pages at pte level in vmalloc
Christophe Leroy
christophe.leroy at csgroup.eu
Wed May 12 15:01:01 AEST 2021
On some architectures like powerpc, there are huge pages that
are mapped at pte level.
Enable it in vmalloc.
For that, architectures can provide arch_vmap_pte_supported_shift()
that returns the shift for pages to map at pte level.
Signed-off-by: Christophe Leroy <christophe.leroy at csgroup.eu>
---
include/linux/vmalloc.h | 7 +++++++
mm/vmalloc.c | 13 +++++++------
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 13c9b19ec923..bf0a1b7a824e 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -112,6 +112,13 @@ static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, uns
}
#endif
+#ifndef arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ return PAGE_SHIFT;
+}
+#endif
+
/*
* Highlevel APIs for driver use
*/
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 783d23b8c2f7..3de5291c20cd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2895,8 +2895,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL;
}
- if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
- arch_vmap_pmd_supported(prot)) {
+ if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
unsigned long size_per_node;
/*
@@ -2909,11 +2908,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
size_per_node = size;
if (node == NUMA_NO_NODE)
size_per_node /= num_online_nodes();
- if (size_per_node >= PMD_SIZE) {
+ if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
shift = PMD_SHIFT;
- align = max(real_align, 1UL << shift);
- size = ALIGN(real_size, 1UL << shift);
- }
+ else
+ shift = arch_vmap_pte_supported_shift(size_per_node);
+
+ align = max(real_align, 1UL << shift);
+ size = ALIGN(real_size, 1UL << shift);
}
again:
--
2.25.0
More information about the Linuxppc-dev
mailing list