[PATCH v4 27/45] powerpc/8xx: Only 8M pages are hugepte pages now
Christophe Leroy
christophe.leroy at csgroup.eu
Tue May 19 15:49:10 AEST 2020
512k pages are now standard pages, so only 8M pages
are hugepte.
No more handling of normal page tables through hugepd allocation
and freeing, and hugepte helpers can also be simplified.
Signed-off-by: Christophe Leroy <christophe.leroy at csgroup.eu>
---
arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 7 +++----
arch/powerpc/mm/hugetlbpage.c | 16 +++-------------
2 files changed, 6 insertions(+), 17 deletions(-)
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index 785437323576..1c7d4693a78e 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
- return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+ return PAGE_SHIFT_8M;
}
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned int pdshift)
{
- unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+ unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
return hugepd_page(hpd) + idx;
}
@@ -32,8 +32,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
}
static inline int check_and_get_huge_psize(int shift)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 38bad839e608..cfacd364c7aa 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -54,24 +54,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift);
- new = NULL;
- } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
- cachep = NULL;
- num_hugepd = 1;
- new = pte_alloc_one(mm);
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
- new = NULL;
}
- if (!cachep && !new) {
+ if (!cachep) {
WARN_ONCE(1, "No page table cache created for hugetlb tables");
return -ENOMEM;
}
- if (cachep)
- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -102,10 +95,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0);
- if (cachep)
- kmem_cache_free(cachep, new);
- else
- pte_free(mm, new);
+ kmem_cache_free(cachep, new);
} else {
kmemleak_ignore(new);
}
--
2.25.0
More information about the Linuxppc-dev
mailing list