[Skiboot] [PATCH 11/32] xive: Implement cache watch and use it for EQs

Benjamin Herrenschmidt benh at kernel.crashing.org
Tue Nov 22 13:13:13 AEDT 2016


We need to do cache coherent updates of the EQs when modifying
escalation interrupts. Use the cache watch facility for that.

Signed-off-by: Benjamin Herrenschmidt <benh at kernel.crashing.org>
---
 hw/xive.c      | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
 include/xive.h |  36 ++++++++++++++++++++
 2 files changed, 135 insertions(+), 7 deletions(-)

diff --git a/hw/xive.c b/hw/xive.c
index 7df699d..5932319 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -686,6 +686,8 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
 		mreg = PC_VPC_SCRUB_MASK;
 		mregx = X_PC_VPC_SCRUB_MASK;
 		break;
+	default:
+		return OPAL_PARAMETER;
 	}
 	if (ctype == xive_cache_vpc) {
 		mval = PC_SCRUB_BLOCK_ID | PC_SCRUB_OFFSET;
@@ -719,6 +721,86 @@ static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
 	return __xive_cache_scrub(x, xive_cache_ivc, block, idx, false, false);
 }
 
+static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
+				  uint64_t block, uint64_t idx,
+				  uint32_t start_dword, uint32_t dword_count,
+				  void *new_data, bool light_watch)
+{
+	uint64_t sreg, sregx, dreg0, dreg0x;
+	uint64_t dval0, sval, status, i;
+
+	switch (ctype) {
+	case xive_cache_eqc:
+		sreg = VC_EQC_CWATCH_SPEC;
+		sregx = X_VC_EQC_CWATCH_SPEC;
+		dreg0 = VC_EQC_CWATCH_DAT0;
+		dreg0x = X_VC_EQC_CWATCH_DAT0;
+		sval = SETFIELD(VC_EQC_CWATCH_BLOCKID, idx, block);
+		break;
+	case xive_cache_vpc:
+		sreg = PC_VPC_CWATCH_SPEC;
+		sregx = X_PC_VPC_CWATCH_SPEC;
+		dreg0 = PC_VPC_CWATCH_DAT0;
+		dreg0x = X_PC_VPC_CWATCH_DAT0;
+		sval = SETFIELD(PC_VPC_CWATCH_BLOCKID, idx, block);
+		break;
+	default:
+		return OPAL_PARAMETER;
+	}
+
+	/* The full bit is in the same position for EQC and VPC */
+	if (!light_watch)
+		sval |= VC_EQC_CWATCH_FULL;
+
+	do {
+		/* Write the cache watch spec */
+		__xive_regw(x, sreg, sregx, sval, NULL);
+
+		/* Load data0 register to populate the watch */
+		dval0 = __xive_regr(x, dreg0, dreg0x, NULL);
+
+		/* Write the words into the watch facility. We write in reverse
+		 * order in case word 0 is part of it as it must be the last
+		 * one written.
+		 */
+		for (i = start_dword + dword_count - 1; i >= start_dword ;i--) {
+			uint64_t dw = ((uint64_t *)new_data)[i];
+			__xive_regw(x, dreg0 + i * 8, dreg0x + i, dw, NULL);
+		}
+
+		/* Write data0 register to trigger the update if word 0 wasn't
+		 * written above
+		 */
+		if (start_dword > 0)
+			__xive_regw(x, dreg0, dreg0x, dval0, NULL);
+
+		/* This may not be necessary for light updates (it's possible\
+		 * that a sync in sufficient, TBD). Ensure the above is
+		 * complete and check the status of the watch.
+		 */
+		status = __xive_regr(x, sreg, sregx, NULL);
+
+		/* XXX Add timeout ? */
+
+		/* Bits FULL and CONFLICT are in the same position in
+		 * EQC and VPC
+		 */
+	} while((status & VC_EQC_CWATCH_FULL) &&
+		(status & VC_EQC_CWATCH_CONFLICT));
+
+	return 0;
+}
+
+static int64_t xive_eqc_cache_update(struct xive *x, uint64_t block,
+				     uint64_t idx, uint32_t start_dword,
+				     uint32_t dword_count, void *new_data,
+				     bool light_watch)
+{
+	return __xive_cache_watch(x, xive_cache_eqc, block, idx,
+				  start_dword, dword_count,
+				  new_data, light_watch);
+}
+
 static bool xive_set_vsd(struct xive *x, uint32_t tbl, uint32_t idx, uint64_t v)
 {
 	/* Set VC version */
@@ -1518,6 +1600,7 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
 	struct xive_ive *ive;
 	uint32_t eq_blk, eq_idx;
 	bool is_escalation = GIRQ_IS_ESCALATION(isn);
+	uint64_t new_ive;
 
 	/* Find XIVE on which the IVE resides */
 	x = xive_from_isn(isn);
@@ -1534,15 +1617,17 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
 
 	lock(&x->lock);
 
+	/* Read existing IVE */
+	new_ive = ive->w;
+
 	/* Are we masking ? */
 	if (prio == 0xff) {
 		/* Masking, just set the M bit */
 		if (!is_escalation)
-			ive->w |= IVE_MASKED;
+			new_ive |= IVE_MASKED;
 
 		xive_vdbg(x, "ISN %x masked !\n", isn);
 	} else {
-		uint64_t new_ive;
 
 		/* Unmasking, re-target the IVE. First find the EQ
 		 * correponding to the target
@@ -1560,15 +1645,22 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
 		new_ive = ive->w & ~IVE_MASKED;
 		new_ive = SETFIELD(IVE_EQ_BLOCK, new_ive, eq_blk);
 		new_ive = SETFIELD(IVE_EQ_INDEX, new_ive, eq_idx);
-		sync();
-		ive->w = new_ive;
 
 		xive_vdbg(x,"ISN %x routed to eq %x/%x IVE=%016llx !\n",
-		  isn, eq_blk, eq_idx, new_ive);
+			  isn, eq_blk, eq_idx, new_ive);
 	}
 
-	/* Scrub IVE from cache */
-	xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
+	/* Updating the cache differs between real IVEs and escalation
+	 * IVEs inside an EQ
+	 */
+	if (is_escalation) {
+		xive_eqc_cache_update(x, x->chip_id, GIRQ_TO_IDX(isn),
+				      2, 1, &new_ive, true);
+	} else {
+		sync();
+		ive->w = new_ive;
+		xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
+	}
 
 	unlock(&x->lock);
 	return true;
diff --git a/include/xive.h b/include/xive.h
index ba48601..a6dc7be 100644
--- a/include/xive.h
+++ b/include/xive.h
@@ -107,6 +107,28 @@
 #define  PC_SCRUB_WANT_INVAL	PPC_BIT(2)
 #define  PC_SCRUB_BLOCK_ID	PPC_BITMASK(27,31)
 #define  PC_SCRUB_OFFSET	PPC_BITMASK(45,63)
+#define X_PC_VPC_CWATCH_SPEC	0x167
+#define PC_VPC_CWATCH_SPEC	0x738
+#define  PC_VPC_CWATCH_CONFLICT	PPC_BIT(0)
+#define  PC_VPC_CWATCH_FULL	PPC_BIT(8)
+#define  PC_VPC_CWATCH_BLOCKID	PPC_BITMASK(27,31)
+#define  PC_VPC_CWATCH_OFFSET	PPC_BITMASK(45,63)
+#define X_PC_VPC_CWATCH_DAT0	0x168
+#define PC_VPC_CWATCH_DAT0	0x740
+#define X_PC_VPC_CWATCH_DAT1	0x169
+#define PC_VPC_CWATCH_DAT1	0x748
+#define X_PC_VPC_CWATCH_DAT2	0x16a
+#define PC_VPC_CWATCH_DAT2	0x750
+#define X_PC_VPC_CWATCH_DAT3	0x16b
+#define PC_VPC_CWATCH_DAT3	0x758
+#define X_PC_VPC_CWATCH_DAT4	0x16c
+#define PC_VPC_CWATCH_DAT4	0x760
+#define X_PC_VPC_CWATCH_DAT5	0x16d
+#define PC_VPC_CWATCH_DAT5	0x768
+#define X_PC_VPC_CWATCH_DAT6	0x16e
+#define PC_VPC_CWATCH_DAT6	0x770
+#define X_PC_VPC_CWATCH_DAT7	0x16f
+#define PC_VPC_CWATCH_DAT7	0x778
 
 /* VC0 register offsets */
 #define X_VC_GLOBAL_CONFIG	0x200
@@ -144,6 +166,20 @@
 #define VC_EQC_SCRUB_TRIG	0x910
 #define X_VC_EQC_SCRUB_MASK	0x213
 #define VC_EQC_SCRUB_MASK	0x918
+#define X_VC_EQC_CWATCH_SPEC	0x215
+#define VC_EQC_CWATCH_SPEC	0x928
+#define  VC_EQC_CWATCH_CONFLICT	PPC_BIT(0)
+#define  VC_EQC_CWATCH_FULL	PPC_BIT(8)
+#define  VC_EQC_CWATCH_BLOCKID	PPC_BITMASK(28,31)
+#define  VC_EQC_CWATCH_OFFSET	PPC_BITMASK(40,63)
+#define X_VC_EQC_CWATCH_DAT0	0x216
+#define VC_EQC_CWATCH_DAT0	0x930
+#define X_VC_EQC_CWATCH_DAT1	0x217
+#define VC_EQC_CWATCH_DAT1	0x938
+#define X_VC_EQC_CWATCH_DAT2	0x218
+#define VC_EQC_CWATCH_DAT2	0x940
+#define X_VC_EQC_CWATCH_DAT3	0x219
+#define VC_EQC_CWATCH_DAT3	0x948
 #define X_VC_IVC_SCRUB_TRIG	0x222
 #define VC_IVC_SCRUB_TRIG	0x990
 #define X_VC_IVC_SCRUB_MASK	0x223
-- 
2.7.4



More information about the Skiboot mailing list