[Skiboot] [PATCH 3/5] slw: Add Power9 idle states to power-mgt dt node

Shreyas B. Prabhu shreyas at linux.vnet.ibm.com
Thu Apr 21 23:48:00 AEST 2016


POWER ISA v3 defines a new idle processor core mechanism. In summary,
 a) new instruction named stop is added. This instruction replaces
	instructions like nap, sleep, rvwinkle.
 b) new per thread SPR named PSSCR is added which controls the behavior
	of stop instruction. This SPR subsumes PMICR.

This patch adds the supported idle states to power-mgt dt node.

It also introduces ibm,cpu-idle-state-psscr and
ibm,cpu-idle-state-psscr-mask entries which exposes the value to be
written to PSSCR to enter a given stop state. These entries replaces
POWER8's counterparts ibm,cpu-idle-state-pmicr and
ibm,cpu-idle-state-pmicr-mask.

Signed-off-by: Shreyas B. Prabhu <shreyas at linux.vnet.ibm.com>
---
 hw/slw.c           | 171 ++++++++++++++++++++++++++++++++++++++++++-----------
 include/opal-api.h |  10 ++++
 2 files changed, 147 insertions(+), 34 deletions(-)

diff --git a/hw/slw.c b/hw/slw.c
index 68b61ea80ccd..ccaea827378a 100644
--- a/hw/slw.c
+++ b/hw/slw.c
@@ -397,8 +397,10 @@ struct cpu_idle_states {
 	u32 latency_ns;
 	u32 residency_ns;
 	u32 flags;
-	u64 pmicr;
-	u64 pmicr_mask;
+	/* Register value/mask used to select different idle states.
+	 * PMICR in POWER8 and PSSCR in POWER9 */
+	u64 pm_ctrl_reg_val;
+	u64 pm_ctrl_reg_mask;
 };
 
 /* Flag definitions */
@@ -435,8 +437,8 @@ static struct cpu_idle_states power7_cpu_idle_states[] = {
 		       | 0*OPAL_PM_SLEEP_ENABLED \
 		       | 0*OPAL_PM_WINKLE_ENABLED \
 		       | 0*IDLE_USE_PMICR,
-		.pmicr = 0,
-		.pmicr_mask = 0 },
+		.pm_ctrl_reg_val = 0,
+		.pm_ctrl_reg_mask = 0 },
 };
 
 static struct cpu_idle_states power8_cpu_idle_states[] = {
@@ -451,8 +453,8 @@ static struct cpu_idle_states power8_cpu_idle_states[] = {
 		       | 0*IDLE_LOSE_FULL_CONTEXT \
 		       | 1*OPAL_PM_NAP_ENABLED \
 		       | 0*IDLE_USE_PMICR,
-		.pmicr = 0,
-		.pmicr_mask = 0 },
+		.pm_ctrl_reg_val = 0,
+		.pm_ctrl_reg_mask = 0 },
 	{ /* fast sleep (with workaround) */
 		.name = "fastsleep_",
 		.latency_ns = 40000,
@@ -465,8 +467,8 @@ static struct cpu_idle_states power8_cpu_idle_states[] = {
 		       | 1*OPAL_PM_SLEEP_ENABLED_ER1 \
 		       | 0*IDLE_USE_PMICR, /* Not enabled until deep
 						states are available */
-		.pmicr = IDLE_FASTSLEEP_PMICR,
-		.pmicr_mask = IDLE_SLEEP_PMICR_MASK },
+		.pm_ctrl_reg_val = IDLE_FASTSLEEP_PMICR,
+		.pm_ctrl_reg_mask = IDLE_SLEEP_PMICR_MASK },
 	{ /* Winkle */
 		.name = "winkle",
 		.latency_ns = 10000000,
@@ -485,10 +487,89 @@ static struct cpu_idle_states power8_cpu_idle_states[] = {
 		       | 1*OPAL_PM_WINKLE_ENABLED \
 		       | 0*IDLE_USE_PMICR, /* Currently choosing deep vs
 						fast via EX_PM_GP1 reg */
-		.pmicr = 0,
-		.pmicr_mask = 0 },
+		.pm_ctrl_reg_val = 0,
+		.pm_ctrl_reg_mask = 0 },
 };
 
+static struct cpu_idle_states power9_cpu_idle_states[] = {
+	{
+		.name = "stop0",
+		.latency_ns = 300,
+		.residency_ns = 3000,
+		.flags = 0*IDLE_DEC_STOP \
+		       | 0*IDLE_TB_STOP  \
+		       | 0*IDLE_LOSE_USER_CONTEXT \
+		       | 0*IDLE_LOSE_HYP_CONTEXT \
+		       | 0*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_FAST,
+		.pm_ctrl_reg_val = 0,
+		.pm_ctrl_reg_mask = 0xF },
+	{
+		.name = "stop1",
+		.latency_ns = 5000,
+		.residency_ns = 50000,
+		.flags = 0*IDLE_DEC_STOP \
+		       | 0*IDLE_TB_STOP  \
+		       | 1*IDLE_LOSE_USER_CONTEXT \
+		       | 0*IDLE_LOSE_HYP_CONTEXT \
+		       | 0*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_FAST,
+		.pm_ctrl_reg_val = 1,
+		.pm_ctrl_reg_mask = 0xF },
+	{
+		.name = "stop2",
+		.latency_ns = 10000,
+		.residency_ns = 100000,
+		.flags = 0*IDLE_DEC_STOP \
+		       | 0*IDLE_TB_STOP  \
+		       | 1*IDLE_LOSE_USER_CONTEXT \
+		       | 0*IDLE_LOSE_HYP_CONTEXT \
+		       | 0*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_FAST,
+		.pm_ctrl_reg_val = 2,
+		.pm_ctrl_reg_mask = 0xF },
+
+	{
+		.name = "stop4",
+		.latency_ns = 100000,
+		.residency_ns = 1000000,
+		.flags = 1*IDLE_DEC_STOP \
+		       | 1*IDLE_TB_STOP  \
+		       | 1*IDLE_LOSE_USER_CONTEXT \
+		       | 1*IDLE_LOSE_HYP_CONTEXT \
+		       | 1*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_DEEP,
+		.pm_ctrl_reg_val = 4,
+		.pm_ctrl_reg_mask = 0xF },
+
+	{
+		.name = "stop8",
+		.latency_ns = 2000000,
+		.residency_ns = 20000000,
+		.flags = 1*IDLE_DEC_STOP \
+		       | 1*IDLE_TB_STOP  \
+		       | 1*IDLE_LOSE_USER_CONTEXT \
+		       | 1*IDLE_LOSE_HYP_CONTEXT \
+		       | 1*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_DEEP,
+		.pm_ctrl_reg_val = 0x8,
+		.pm_ctrl_reg_mask = 0xF },
+
+
+	{
+		.name = "stop11",
+		.latency_ns = 10000000,
+		.residency_ns = 100000000,
+		.flags = 1*IDLE_DEC_STOP \
+		       | 1*IDLE_TB_STOP  \
+		       | 1*IDLE_LOSE_USER_CONTEXT \
+		       | 1*IDLE_LOSE_HYP_CONTEXT \
+		       | 1*IDLE_LOSE_FULL_CONTEXT \
+		       | 1*OPAL_PM_STOP_INST_DEEP,
+		.pm_ctrl_reg_val = 0xB,
+		.pm_ctrl_reg_mask = 0xF },
+
+};
 /* Add device tree properties to describe idle states */
 void add_cpu_idle_state_properties(void)
 {
@@ -499,6 +580,7 @@ void add_cpu_idle_state_properties(void)
 
 	bool can_sleep = true;
 	bool has_slw = true;
+	bool has_stop_inst = false;
 	u8 i;
 
 	u32 supported_states_mask;
@@ -508,8 +590,8 @@ void add_cpu_idle_state_properties(void)
 	u32 *latency_ns_buf;
 	u32 *residency_ns_buf;
 	u32 *flags_buf;
-	u64 *pmicr_buf;
-	u64 *pmicr_mask_buf;
+	u64 *pm_ctrl_reg_val_buf;
+	u64 *pm_ctrl_reg_mask_buf;
 
 	/* Variables to track buffer length */
 	u8 name_buf_len;
@@ -538,7 +620,13 @@ void add_cpu_idle_state_properties(void)
 	 */
 	chip = next_chip(NULL);
 	assert(chip);
-	if (chip->type == PROC_CHIP_P8_MURANO ||
+	if (chip->type == PROC_CHIP_P9_NIMBUS ||
+	    chip->type == PROC_CHIP_P9_CUMULUS) {
+		states = power9_cpu_idle_states;
+		nr_states = ARRAY_SIZE(power9_cpu_idle_states);
+		has_stop_inst = true;
+	}
+	else if (chip->type == PROC_CHIP_P8_MURANO ||
 	    chip->type == PROC_CHIP_P8_VENICE ||
 	    chip->type == PROC_CHIP_P8_NAPLES) {
 		const struct dt_property *p;
@@ -586,8 +674,8 @@ void add_cpu_idle_state_properties(void)
 	latency_ns_buf	=  (u32 *) malloc(nr_states * sizeof(u32));
 	residency_ns_buf=  (u32 *) malloc(nr_states * sizeof(u32));
 	flags_buf	=  (u32 *) malloc(nr_states * sizeof(u32));
-	pmicr_buf	=  (u64 *) malloc(nr_states * sizeof(u64));
-	pmicr_mask_buf	=  (u64 *) malloc(nr_states * sizeof(u64));
+	pm_ctrl_reg_val_buf	=  (u64 *) malloc(nr_states * sizeof(u64));
+	pm_ctrl_reg_mask_buf	=  (u64 *) malloc(nr_states * sizeof(u64));
 
 	name_buf_len = 0;
 	num_supported_idle_states = 0;
@@ -597,13 +685,18 @@ void add_cpu_idle_state_properties(void)
 	 * set. Use this to only add supported idle states to the
 	 * device-tree
 	 */
-	supported_states_mask = OPAL_PM_NAP_ENABLED;
-	if (can_sleep)
-		supported_states_mask |= OPAL_PM_SLEEP_ENABLED |
-					OPAL_PM_SLEEP_ENABLED_ER1;
-	if (has_slw)
+	if (has_stop_inst) {
+		supported_states_mask = OPAL_PM_STOP_INST_FAST;
+		if (has_slw)
+			supported_states_mask |= OPAL_PM_STOP_INST_DEEP;
+	} else {
+		supported_states_mask = OPAL_PM_NAP_ENABLED;
+		if (can_sleep)
+			supported_states_mask |= OPAL_PM_SLEEP_ENABLED |
+						OPAL_PM_SLEEP_ENABLED_ER1;
+		if (has_slw)
 		supported_states_mask |= OPAL_PM_WINKLE_ENABLED;
-
+	}
 	for (i = 0; i < nr_states; i++) {
 		/* For each state, check if it is one of the supported states. */
 		if (states[i].flags & supported_states_mask) {
@@ -623,11 +716,11 @@ void add_cpu_idle_state_properties(void)
 			*flags_buf = cpu_to_fdt32(states[i].flags);
 			flags_buf++;
 
-			*pmicr_buf = cpu_to_fdt64(states[i].pmicr);
-			pmicr_buf++;
+			*pm_ctrl_reg_val_buf = cpu_to_fdt64(states[i].pm_ctrl_reg_val);
+			pm_ctrl_reg_val_buf++;
 
-			*pmicr_mask_buf = cpu_to_fdt64(states[i].pmicr);
-			pmicr_mask_buf++;
+			*pm_ctrl_reg_mask_buf = cpu_to_fdt64(states[i].pm_ctrl_reg_mask);
+			pm_ctrl_reg_mask_buf++;
 
 			/* Increment buffer length trackers */
 			name_buf_len += strlen(states[i].name) + 1;
@@ -640,9 +733,8 @@ void add_cpu_idle_state_properties(void)
 	latency_ns_buf -= num_supported_idle_states;
 	residency_ns_buf -= num_supported_idle_states;
 	flags_buf -= num_supported_idle_states;
-	pmicr_buf -= num_supported_idle_states;
-	pmicr_mask_buf -= num_supported_idle_states;
-
+	pm_ctrl_reg_val_buf -= num_supported_idle_states;
+	pm_ctrl_reg_mask_buf -= num_supported_idle_states;
 	/* Create dt properties with the buffer content */
 	dt_add_property(power_mgt, "ibm,cpu-idle-state-names", name_buf,
 			name_buf_len* sizeof(char));
@@ -652,18 +744,29 @@ void add_cpu_idle_state_properties(void)
 			residency_ns_buf, num_supported_idle_states * sizeof(u32));
 	dt_add_property(power_mgt, "ibm,cpu-idle-state-flags", flags_buf,
 			num_supported_idle_states * sizeof(u32));
-	dt_add_property(power_mgt, "ibm,cpu-idle-state-pmicr", pmicr_buf,
-			num_supported_idle_states * sizeof(u64));
-	dt_add_property(power_mgt, "ibm,cpu-idle-state-pmicr-mask",
-			pmicr_mask_buf, num_supported_idle_states * sizeof(u64));
 
+	if (has_stop_inst) {
+		dt_add_property(power_mgt, "ibm,cpu-idle-state-psscr",
+				pm_ctrl_reg_val_buf,
+				num_supported_idle_states * sizeof(u64));
+		dt_add_property(power_mgt, "ibm,cpu-idle-state-psscr-mask",
+				pm_ctrl_reg_mask_buf,
+				num_supported_idle_states * sizeof(u64));
+	} else {
+		dt_add_property(power_mgt, "ibm,cpu-idle-state-pmicr",
+				pm_ctrl_reg_val_buf,
+				num_supported_idle_states * sizeof(u64));
+		dt_add_property(power_mgt, "ibm,cpu-idle-state-pmicr-mask",
+				pm_ctrl_reg_mask_buf,
+				num_supported_idle_states * sizeof(u64));
+	}
 	assert(alloced_name_buf == name_buf);
 	free(alloced_name_buf);
 	free(latency_ns_buf);
 	free(residency_ns_buf);
 	free(flags_buf);
-	free(pmicr_buf);
-	free(pmicr_mask_buf);
+	free(pm_ctrl_reg_val_buf);
+	free(pm_ctrl_reg_mask_buf);
 }
 
 #ifdef __HAVE_LIBPORE__
diff --git a/include/opal-api.h b/include/opal-api.h
index 369aa93276fb..8a6dae06f33c 100644
--- a/include/opal-api.h
+++ b/include/opal-api.h
@@ -175,6 +175,16 @@
 #define OPAL_PM_WINKLE_ENABLED		0x00040000
 #define OPAL_PM_SLEEP_ENABLED_ER1	0x00080000 /* with workaround */
 
+/*
+ * Flags for stop states. Use 2 bits to distinguish between
+ * deep and fast states. Deep states result in full context
+ * loss thereby requiring slw to partially restore state
+ * whereas fast state can function without the presence of
+ * slw.
+ */
+#define OPAL_PM_STOP_INST_FAST		0x00100000
+#define OPAL_PM_STOP_INST_DEEP		0x00200000
+
 #ifndef __ASSEMBLY__
 
 /* Other enums */
-- 
1.9.3



More information about the Skiboot mailing list