[RFC PATCH v1 4/6] mm: Introduce arch_in_lazy_mmu_mode()
Ryan Roberts
ryan.roberts at arm.com
Sat May 31 00:04:42 AEST 2025
Introduce new arch_in_lazy_mmu_mode() API, which returns true if the
calling context is currently in lazy mmu mode or false otherwise. Each
arch that supports lazy mmu mode must provide an implementation of this
API.
The API will shortly be used to prevent accidental lazy mmu mode nesting
when performing an allocation, and will additionally be used to ensure
pte modification vs tlb flushing order does not get inadvertantly
swapped.
Signed-off-by: Ryan Roberts <ryan.roberts at arm.com>
---
arch/arm64/include/asm/pgtable.h | 8 ++++++++
.../powerpc/include/asm/book3s/64/tlbflush-hash.h | 15 +++++++++++++++
arch/sparc/include/asm/tlbflush_64.h | 1 +
arch/sparc/mm/tlb.c | 12 ++++++++++++
arch/x86/include/asm/paravirt.h | 5 +++++
arch/x86/include/asm/paravirt_types.h | 1 +
arch/x86/kernel/paravirt.c | 6 ++++++
arch/x86/xen/mmu_pv.c | 6 ++++++
include/linux/pgtable.h | 1 +
9 files changed, 55 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 5285757ee0c1..add75dee49f5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -119,6 +119,14 @@ static inline void arch_leave_lazy_mmu_mode(void)
clear_thread_flag(TIF_LAZY_MMU);
}
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ if (in_interrupt())
+ return false;
+
+ return test_thread_flag(TIF_LAZY_MMU);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 146287d9580f..4123a9da32cc 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -57,6 +57,21 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define arch_flush_lazy_mmu_mode() do {} while (0)
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch;
+ bool active;
+
+ if (radix_enabled())
+ return false;
+
+ batch = get_cpu_ptr(&ppc64_tlb_batch);
+ active = batch->active;
+ put_cpu_ptr(&ppc64_tlb_batch);
+
+ return active;
+}
+
extern void hash__tlbiel_all(unsigned int action);
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 8b8cdaa69272..204bc957df9e 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -45,6 +45,7 @@ void flush_tlb_pending(void);
void arch_enter_lazy_mmu_mode(void);
void arch_leave_lazy_mmu_mode(void);
#define arch_flush_lazy_mmu_mode() do {} while (0)
+bool arch_in_lazy_mmu_mode(void);
/* Local cpu only. */
void __flush_tlb_all(void);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index a35ddcca5e76..83ab4ba4f4fb 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -69,6 +69,18 @@ void arch_leave_lazy_mmu_mode(void)
preempt_enable();
}
+bool arch_in_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb;
+ bool active;
+
+ tb = get_cpu_ptr(&tlb_batch);
+ active = tb->active;
+ put_cpu_ptr(&tlb_batch);
+
+ return active;
+}
+
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec, unsigned int hugepage_shift)
{
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b5e59a7ba0d0..c7ea3ccb8a41 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -542,6 +542,11 @@ static inline void arch_flush_lazy_mmu_mode(void)
PVOP_VCALL0(mmu.lazy_mode.flush);
}
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ return PVOP_CALL0(bool, mmu.lazy_mode.in);
+}
+
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 37a8627d8277..41001ca9d010 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -46,6 +46,7 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
+ bool (*in)(void);
} __no_randomize_layout;
#endif
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ab3e172dcc69..9af1a04a47fd 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -106,6 +106,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
+
+static noinstr bool paravirt_retfalse(void)
+{
+ return false;
+}
#endif
struct pv_info pv_info = {
@@ -228,6 +233,7 @@ struct paravirt_patch_template pv_ops = {
.enter = paravirt_nop,
.leave = paravirt_nop,
.flush = paravirt_nop,
+ .in = paravirt_retfalse,
},
.mmu.set_fixmap = native_set_fixmap,
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2a4a8deaf612..74f7a8537911 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2147,6 +2147,11 @@ static void xen_flush_lazy_mmu(void)
preempt_enable();
}
+static bool xen_in_lazy_mmu(void)
+{
+ return xen_get_lazy_mode() == XEN_LAZY_MMU;
+}
+
static void __init xen_post_allocator_init(void)
{
pv_ops.mmu.set_pte = xen_set_pte;
@@ -2230,6 +2235,7 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
.flush = xen_flush_lazy_mmu,
+ .in = xen_in_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index b50447ef1c92..580d9971f435 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -235,6 +235,7 @@ static inline int pmd_dirty(pmd_t pmd)
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
+#define arch_in_lazy_mmu_mode() false
#endif
#ifndef pte_batch_hint
--
2.43.0
More information about the Linuxppc-dev
mailing list