[Skiboot] [RFC PATCH 5/6] xive/p9: Add statistics for HW procedures

Cédric Le Goater clg at kaod.org
Fri Sep 18 02:35:42 AEST 2020


Common XIVE HW procedures are cache updates and synchronization to
ensure pending interrupts have reached their event queues. These can
be done frequently in some scenarios. Collect some statistics for
these procedures and expose the results in a debug handler to be read
from Linux. The write handler does a reset.

Signed-off-by: Cédric Le Goater <clg at kaod.org>
---
 hw/xive.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 98 insertions(+), 9 deletions(-)

diff --git a/hw/xive.c b/hw/xive.c
index b13beb575ba1..abdb2115a1a2 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -20,6 +20,7 @@
 #include <phys-map.h>
 #include <p9_stop_api.H>
 #include <opal-debug.h>
+#include <stat.h>
 
 /* Always notify from EQ to VP (no EOI on EQs). Will speed up
  * EOIs at the expense of potentially higher powerbus traffic.
@@ -357,6 +358,32 @@ static inline void log_print(struct xive_cpu_state *xs __unused) { }
 
 #endif /* XIVE_PERCPU_LOG */
 
+/*
+ * Statistics
+ */
+enum {
+	XIVE_IVC_SCRUB,
+	XIVE_VPC_SCRUB,
+	XIVE_VPC_SCRUB_CLEAN,
+	XIVE_EQC_SCRUB,
+	XIVE_SYNC,
+	XIVE_SYNC_NOLOCK,
+	XIVE_VC_CACHE_KILL,
+	XIVE_PC_CACHE_KILL,
+	XIVE_STAT_LAST,
+};
+
+static const char *xive_stat_names[] = {
+	"XIVE_IVC_SCRUB",
+	"XIVE_VPC_SCRUB",
+	"XIVE_VPC_SCRUB_CLEAN",
+	"XIVE_EQC_SCRUB",
+	"XIVE_SYNC",
+	"XIVE_SYNC_NOLOCK",
+	"XIVE_VC_CACHE_KILL",
+	"XIVE_PC_CACHE_KILL",
+};
+
 struct xive {
 	uint32_t	chip_id;
 	uint32_t	block_id;
@@ -463,6 +490,8 @@ struct xive {
 
 	/* In memory queue overflow */
 	void		*q_ovf;
+
+	struct stat	stat[XIVE_STAT_LAST];
 };
 
 #define XIVE_CAN_STORE_EOI(x) XIVE_STORE_EOI_ENABLED
@@ -1202,26 +1231,34 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
 	return 0;
 }
 
-static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
+static int64_t __xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
 {
 	/* IVC has no "want_inval" bit, it always invalidates */
 	return __xive_cache_scrub(x, xive_cache_ivc, block, idx, false, false);
 }
+#define xive_ivc_scrub(x, b, i)					\
+	stat_call(__xive_ivc_scrub(x, b, i),  &x->stat[XIVE_IVC_SCRUB])
 
-static int64_t xive_vpc_scrub(struct xive *x, uint64_t block, uint64_t idx)
+static int64_t __xive_vpc_scrub(struct xive *x, uint64_t block, uint64_t idx)
 {
 	return __xive_cache_scrub(x, xive_cache_vpc, block, idx, false, false);
 }
+#define xive_vpc_scrub(x, b, i)						\
+	stat_call(__xive_vpc_scrub(x, b, i),  &x->stat[XIVE_VPC_SCRUB])
 
-static int64_t xive_vpc_scrub_clean(struct xive *x, uint64_t block, uint64_t idx)
+static int64_t __xive_vpc_scrub_clean(struct xive *x, uint64_t block, uint64_t idx)
 {
 	return __xive_cache_scrub(x, xive_cache_vpc, block, idx, true, false);
 }
+#define xive_vpc_scrub_clean(x, b, i)						\
+	stat_call(__xive_vpc_scrub_clean(x, b, i),  &x->stat[XIVE_VPC_SCRUB_CLEAN])
 
-static int64_t xive_eqc_scrub(struct xive *x, uint64_t block, uint64_t idx)
+static int64_t __xive_eqc_scrub(struct xive *x, uint64_t block, uint64_t idx)
 {
 	return __xive_cache_scrub(x, xive_cache_eqc, block, idx, false, false);
 }
+#define xive_eqc_scrub(x, b, i)					\
+	stat_call(__xive_eqc_scrub(x, b, i),  &x->stat[XIVE_EQC_SCRUB])
 
 static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
 				  uint64_t block, uint64_t idx,
@@ -2280,13 +2317,11 @@ static void xive_update_irq_mask(struct xive_src *s, uint32_t idx, bool masked)
 	in_be64(mmio_base + offset);
 }
 
-static int64_t xive_sync(struct xive *x)
+static int64_t __xive_sync_nolock(struct xive *x)
 {
 	uint64_t r;
 	void *p;
 
-	lock(&x->lock);
-
 	/* Second 2K range of second page */
 	p = x->ic_base + (1 << x->ic_shift) + 0x800;
 
@@ -2316,10 +2351,20 @@ static int64_t xive_sync(struct xive *x)
 	/* Workaround HW issue, read back before allowing a new sync */
 	xive_regr(x, VC_GLOBAL_CONFIG);
 
+	return 0;
+}
+#define xive_sync_nolock(x)						\
+	stat_call(__xive_sync_nolock(x), &x->stat[XIVE_SYNC_NOLOCK])
+
+static int64_t __xive_sync(struct xive *x)
+{
+	lock(&x->lock);
+	xive_sync_nolock(x);
 	unlock(&x->lock);
 
 	return 0;
 }
+#define xive_sync(x) stat_call(__xive_sync(x), &x->stat[XIVE_SYNC])
 
 static int64_t __xive_set_irq_config(struct irq_source *is, uint32_t girq,
 				     uint64_t vp, uint8_t prio, uint32_t lirq,
@@ -2586,6 +2631,16 @@ void xive_register_ipi_source(uint32_t base, uint32_t count, void *data,
 			       flags, false, data, ops);
 }
 
+#define XIVE_STAT_MAX_TIME 50 /* usecs */
+
+static void xive_stat_init(struct xive *x)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(x->stat); i++)
+		stat_init(&x->stat[i], xive_stat_names[i], XIVE_STAT_MAX_TIME);
+}
+
 static struct xive *init_one_xive(struct dt_node *np)
 {
 	struct xive *x;
@@ -2695,6 +2750,8 @@ static struct xive *init_one_xive(struct dt_node *np)
 			       false, NULL, NULL);
 
 
+	xive_stat_init(x);
+
 	return x;
  fail:
 	xive_err(x, "Initialization failed...\n");
@@ -4357,7 +4414,7 @@ static void xive_cleanup_cpu_tima(struct cpu_thread *c)
 	xive_regw(x, PC_TCTXT_INDIR0, 0);
 }
 
-static int64_t xive_vc_ind_cache_kill(struct xive *x, uint64_t type)
+static int64_t __xive_vc_ind_cache_kill(struct xive *x, uint64_t type)
 {
 	uint64_t val;
 
@@ -4378,8 +4435,10 @@ static int64_t xive_vc_ind_cache_kill(struct xive *x, uint64_t type)
 	}
 	return 0;
 }
+#define xive_vc_ind_cache_kill(x, type) \
+	stat_call(__xive_vc_ind_cache_kill(x, type), &x->stat[XIVE_VC_CACHE_KILL])
 
-static int64_t xive_pc_ind_cache_kill(struct xive *x)
+static int64_t __xive_pc_ind_cache_kill(struct xive *x)
 {
 	uint64_t val;
 
@@ -4399,6 +4458,8 @@ static int64_t xive_pc_ind_cache_kill(struct xive *x)
 	}
 	return 0;
 }
+#define xive_pc_ind_cache_kill(x) \
+	stat_call(__xive_pc_ind_cache_kill(x), &x->stat[XIVE_PC_CACHE_KILL])
 
 static void xive_cleanup_vp_ind(struct xive *x)
 {
@@ -5380,6 +5441,29 @@ static int xive_perf_read(struct opal_debug *d, void *buf, uint64_t size)
 	return n;
 }
 
+static int xive_stat_read(struct opal_debug *d, void *buf, uint64_t size)
+{
+	struct xive *x = d->private;
+	int n = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(x->stat); i++)
+		n += stat_printf(&x->stat[i], buf + n, size - n);
+	return n;
+}
+
+static int xive_stat_write(struct opal_debug *d, void *buf, uint64_t size)
+{
+	struct xive *x = d->private;
+
+	if (!strncmp(buf, "reset", size)) {
+		xive_stat_init(x);
+		return OPAL_SUCCESS;
+	} else {
+		return OPAL_PARAMETER;
+	}
+}
+
 static const struct opal_debug_ops xive_ivt_ops = {
 	.read = xive_ivt_read,
 };
@@ -5395,6 +5479,10 @@ static const struct opal_debug_ops xive_vpt_ops = {
 static const struct opal_debug_ops xive_perf_ops = {
 	.read = xive_perf_read,
 };
+static const struct opal_debug_ops xive_stat_ops = {
+	.read = xive_stat_read,
+	.write = xive_stat_write,
+};
 
 static const struct {
 	const char *name;
@@ -5405,6 +5493,7 @@ static const struct {
 	{ "xive-esc",	&xive_esc_ops,  },
 	{ "xive-vpt",	&xive_vpt_ops,  },
 	{ "xive-perf",	&xive_perf_ops, },
+	{ "xive-stat",	&xive_stat_ops, },
 };
 
 static void xive_init_debug(struct xive *x)
-- 
2.25.4



More information about the Skiboot mailing list