[PATCH v2 1/2] powerpc/64s: move machine check SLB flushing to mm/slb.c
Mahesh Jagannath Salgaonkar
mahesh at linux.vnet.ibm.com
Mon Aug 13 14:27:33 AEST 2018
On 08/10/2018 12:12 PM, Nicholas Piggin wrote:
> The machine check code that flushes and restores bolted segments in
> real mode belongs in mm/slb.c. This will also be used by pseries
> machine check and idle code in future changes.
>
> Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
>
> Since v1:
> - Restore the test for slb_shadow (mpe)
> ---
> arch/powerpc/include/asm/book3s/64/mmu-hash.h | 3 ++
> arch/powerpc/kernel/mce_power.c | 26 +++++--------
> arch/powerpc/mm/slb.c | 39 +++++++++++++++++++
> 3 files changed, 51 insertions(+), 17 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
> index 2f74bdc805e0..d4e398185b3a 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
> @@ -497,6 +497,9 @@ extern void hpte_init_native(void);
>
> extern void slb_initialize(void);
> extern void slb_flush_and_rebolt(void);
> +extern void slb_flush_all_realmode(void);
> +extern void __slb_restore_bolted_realmode(void);
> +extern void slb_restore_bolted_realmode(void);
>
> extern void slb_vmalloc_update(void);
> extern void slb_set_size(u16 size);
> diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
> index d6756af6ec78..3497c8329c1d 100644
> --- a/arch/powerpc/kernel/mce_power.c
> +++ b/arch/powerpc/kernel/mce_power.c
> @@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
> #ifdef CONFIG_PPC_BOOK3S_64
> static void flush_and_reload_slb(void)
> {
> - struct slb_shadow *slb;
> - unsigned long i, n;
> -
> /* Invalidate all SLBs */
> - asm volatile("slbmte %0,%0; slbia" : : "r" (0));
> + slb_flush_all_realmode();
>
> #ifdef CONFIG_KVM_BOOK3S_HANDLER
> /*
> @@ -76,22 +73,17 @@ static void flush_and_reload_slb(void)
> if (get_paca()->kvm_hstate.in_guest)
> return;
> #endif
> -
> - /* For host kernel, reload the SLBs from shadow SLB buffer. */
> - slb = get_slb_shadow();
> - if (!slb)
> + if (early_radix_enabled())
> return;
Would we ever get MCE for SLB errors when radix is enabled ?
>
> - n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
> -
> - /* Load up the SLB entries from shadow SLB */
> - for (i = 0; i < n; i++) {
> - unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
> - unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
> + /*
> + * This probably shouldn't happen, but it may be possible it's
> + * called in early boot before SLB shadows are allocated.
> + */
> + if (!get_slb_shadow())
> + return;
Any reason you added above check here instead on mm/slb.c ? Should we
move above check inside slb_restore_bolted_realmode() ? I guess mm/slb.c
is right place for this check. This will also help pseries machine check
to avoid calling this extra check explicitly.
Thanks,
-Mahesh.
>
> - rb = (rb & ~0xFFFul) | i;
> - asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
> - }
> + slb_restore_bolted_realmode();
> }
> #endif
>
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index cb796724a6fc..0b095fa54049 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
> : "memory" );
> }
>
> +/*
> + * Insert bolted entries into SLB (which may not be empty, so don't clear
> + * slb_cache_ptr).
> + */
> +void __slb_restore_bolted_realmode(void)
> +{
> + struct slb_shadow *p = get_slb_shadow();
> + enum slb_index index;
> +
> + /* No isync needed because realmode. */
> + for (index = 0; index < SLB_NUM_BOLTED; index++) {
> + asm volatile("slbmte %0,%1" :
> + : "r" (be64_to_cpu(p->save_area[index].vsid)),
> + "r" (be64_to_cpu(p->save_area[index].esid)));
> + }
> +}
> +
> +/*
> + * Insert the bolted entries into an empty SLB.
> + * This is not the same as rebolt because the bolted segments are not
> + * changed, just loaded from the shadow area.
> + */
> +void slb_restore_bolted_realmode(void)
> +{
> + __slb_restore_bolted_realmode();
> + get_paca()->slb_cache_ptr = 0;
> +}
> +
> +/*
> + * This flushes all SLB entries including 0, so it must be realmode.
> + */
> +void slb_flush_all_realmode(void)
> +{
> + /*
> + * This flushes all SLB entries including 0, so it must be realmode.
> + */
> + asm volatile("slbmte %0,%0; slbia" : : "r" (0));
> +}
> +
> static void __slb_flush_and_rebolt(void)
> {
> /* If you change this make sure you change SLB_NUM_BOLTED
>
More information about the Linuxppc-dev
mailing list