[PATCH 4/4] powerpc/perf: MMCR0 control for PMU registers under PMCC=00
Athira Rajeev
atrajeev at linux.vnet.ibm.com
Wed Nov 11 15:33:09 AEDT 2020
PowerISA v3.1 introduces new control bit (PMCCEXT) for enabling
secure access to group B PMU registers in problem state when
MMCR0 PMCC=0b00. This patch adds support for MMCR0 PMCCEXT bit
in power10 by enabling this bit during boot and during the PMU
event enable/disable operations when MMCR0 PMCC=0b00
Signed-off-by: Athira Rajeev <atrajeev at linux.vnet.ibm.com>
---
arch/powerpc/include/asm/reg.h | 1 +
arch/powerpc/kernel/cpu_setup_power.S | 2 ++
arch/powerpc/kernel/dt_cpu_ftrs.c | 1 +
arch/powerpc/perf/core-book3s.c | 16 ++++++++++++++++
4 files changed, 20 insertions(+)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f877a57..cba9965 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -864,6 +864,7 @@
#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
#define MMCR0_PMCC 0x000c0000UL /* PMC control */
+#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */
#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 704e8b9..8fc8b72 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -249,4 +249,6 @@ __init_PMU_ISA31:
mtspr SPRN_MMCR3,r5
LOAD_REG_IMMEDIATE(r5, MMCRA_BHRB_DISABLE)
mtspr SPRN_MMCRA,r5
+ LOAD_REG_IMMEDIATE(r5, MMCR0_PMCCEXT)
+ mtspr SPRN_MMCR0,r5
blr
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 1098863..9d07965 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -454,6 +454,7 @@ static void init_pmu_power10(void)
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
+ mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
}
static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 08643cb..f328bc0 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -95,6 +95,7 @@ struct cpu_hw_events {
#define SPRN_SIER3 0
#define MMCRA_SAMPLE_ENABLE 0
#define MMCRA_BHRB_DISABLE 0
+#define MMCR0_PMCCEXT 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
@@ -1242,6 +1243,9 @@ static void power_pmu_disable(struct pmu *pmu)
val |= MMCR0_FC;
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
MMCR0_FC56);
+ /* Set mmcr0 PMCCEXT for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+ val |= MMCR0_PMCCEXT;
/*
* The barrier is to make sure the mtspr has been
@@ -1449,6 +1453,18 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw);
+ /*
+ * Set mmcr0 (PMCCEXT) for p10
+ * if mmcr0 PMCC=0b00 to allow secure
+ * mode of access to group B registers.
+ */
+ if (ppmu->flags & PPMU_ARCH_31) {
+ if (!(mmcr0 & MMCR0_PMCC)) {
+ cpuhw->mmcr.mmcr0 |= MMCR0_PMCCEXT;
+ mmcr0 |= MMCR0_PMCCEXT;
+ }
+ }
+
mb();
if (cpuhw->bhrb_users)
ppmu->config_bhrb(cpuhw->bhrb_filter);
--
1.8.3.1
More information about the Linuxppc-dev
mailing list