[Skiboot] [PATCH V2 1/2] occ: Add support for OPAL-OCC command/response interface

Cyril Bur cyrilbur at gmail.com
Wed Jun 14 16:29:52 AEST 2017


On Tue, 2017-06-13 at 16:34 +0530, Shilpasri G Bhat wrote:
> This patch adds support for a shared memory based command/response
> interface between OCC and OPAL. In HOMER, there is an OPAL command
> buffer and an OCC response buffer which is used to send inband
> commands to OCC.
> 
> The OPAL-OCC command/response sequence is as follows:
> 
> 1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
>    bit in OPAL command flag are set to zero. If yes then proceed with
>    below steps to send a command to OCC.
> 2. Write the command value, request ID and command specific data
>    to the OPAL command buffer.
> 3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
>    flag to indicate command is ready.
> 4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
>    is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
>    bit.
> 5. OCC will process the command and write the response to the OCC response
>    buffer and set the 'Rsp Ready' bit in the response flag and sends an
>    interrupt.
> 8. OPAL will receive the interrupt and queue the response to the host.
> 

So I'm not sure about this list you've got going on. If I've read it
right it is to cope with opal_queue_msg() failing, how likely is this?
opal_queue_msg() is already a queue so you've got a queue that queues
before a queue - it feels like there is one too many queues.

If you're expecting all the OCC events to really overwhelm the queue in
opal_queue_msg() then perhaps we should consider making that larger
rather than adding another queue.

Other than that, looks good.

> Signed-off-by: Shilpasri G Bhat <shilpa.bhat at linux.vnet.ibm.com>
> ---
> Changes from V1:
> - Replace (u8 *) with __be64 for cdata and rdata
> - Use nr_occs iterate over per-chip data
> - Check the command on retry
> - s/OCC_STATE_CHAR/OCC_STATE_CHARACTERIZATION
> 
>  hw/occ.c           | 465 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  include/opal-api.h |  82 +++++++++-
>  2 files changed, 542 insertions(+), 5 deletions(-)
> 
> diff --git a/hw/occ.c b/hw/occ.c
> index bc3abc7..bb5f321 100644
> --- a/hw/occ.c
> +++ b/hw/occ.c
> @@ -120,6 +120,87 @@ struct occ_pstate_table {
>  } __packed;
>  
>  /**
> + * OPAL-OCC Command Response Interface
> + *
> + * OPAL-OCC Command Buffer
> + *
> + * ---------------------------------------------------------------------
> + * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
> + * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> + * ---------------------------------------------------------------------
> + * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
> + * |								       |
> + * ---------------------------------------------------------------------
> + *
> + * OPAL Command Flag
> + *
> + * -----------------------------------------------------------------
> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> + * -----------------------------------------------------------------
> + * |Cmd    |       |       |       |       |       |       |       |
> + * |Ready  |	   |	   |	   |	   |	   |	   |	   |
> + * -----------------------------------------------------------------
> + *
> + * struct opal_command_buffer -	Defines the layout of OPAL command buffer
> + * @flag:			Provides general status of the command
> + * @request_id:			Token to identify request
> + * @cmd:			Command sent
> + * @data_size:			Command data length
> + * @data:			Command specific data
> + * @spare:			Unused byte
> + */
> +struct opal_command_buffer {
> +	u8 flag;
> +	u8 request_id;
> +	u8 cmd;
> +	u8 spare;
> +	u16 data_size;
> +	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
> +} __packed;
> +
> +/**
> + * OPAL-OCC Response Buffer
> + *
> + * ---------------------------------------------------------------------
> + * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
> + * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> + * ---------------------------------------------------------------------
> + * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
> + * |								       |
> + * ---------------------------------------------------------------------
> + *
> + * OCC Response Flag
> + *
> + * -----------------------------------------------------------------
> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> + * -----------------------------------------------------------------
> + * |       |       |       |       |       |       |OCC in  | Rsp  |
> + * |       |	   |	   |	   |	   |	   |progress|Ready |
> + * -----------------------------------------------------------------
> + *
> + * struct occ_response_buffer -	Defines the layout of OCC response buffer
> + * @flag:			Provides general status of the response
> + * @request_id:			Token to identify request
> + * @cmd:			Command requested
> + * @status:			Indicates success/failure status of
> + *				the command
> + * @data_size:			Response data length
> + * @data:			Response specific data
> + */
> +struct occ_response_buffer {
> +	u8 flag;
> +	u8 request_id;
> +	u8 cmd;
> +	u8 status;
> +	u16 data_size;
> +	u8 data[MAX_OCC_RSP_DATA_LENGTH];
> +} __packed;
> +
> +/**
>   * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
>   *
>   * struct occ_dynamic_data -	Contains runtime attributes
> @@ -136,6 +217,8 @@ struct occ_pstate_table {
>   * @max_pwr_cap:		Maximum allowed system power cap in Watts
>   * @cur_pwr_cap:		Current system power cap
>   * @spare/reserved:		Unused data
> + * @cmd:			Opal Command Buffer
> + * @rsp:			OCC Response Buffer
>   */
>  struct occ_dynamic_data {
>  	u8 occ_state;
> @@ -151,7 +234,9 @@ struct occ_dynamic_data {
>  	u16 min_pwr_cap;
>  	u16 max_pwr_cap;
>  	u16 cur_pwr_cap;
> -	u64 reserved;
> +	u8 pad[112];
> +	struct opal_command_buffer cmd;
> +	struct occ_response_buffer rsp;
>  } __packed;
>  
>  static bool occ_reset;
> @@ -843,6 +928,375 @@ done:
>  	unlock(&occ_lock);
>  }
>  
> +#define OCC_RSP_READY		0x01
> +#define OCC_IN_PROGRESS		0x02
> +#define OPAL_CMD_READY		0x80
> +
> +enum occ_state {
> +	OCC_STATE_NOT_RUNNING		= 0x00,
> +	OCC_STATE_STANDBY		= 0x01,
> +	OCC_STATE_OBSERVATION		= 0x02,
> +	OCC_STATE_ACTIVE		= 0x03,
> +	OCC_STATE_SAFE			= 0x04,
> +	OCC_STATE_CHARACTERIZATION	= 0x05,
> +};
> +
> +enum occ_role {
> +	OCC_ROLE_SLAVE		= 0x0,
> +	OCC_ROLE_MASTER		= 0x1,
> +};
> +
> +enum occ_cmd_value {
> +	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
> +	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
> +	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
> +	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
> +	OCC_CMD_VALUE_SELECT_SENSOR_GROUPS	= 0xD3,
> +};
> +
> +struct opal_occ_cmd_info {
> +	enum	occ_cmd_value value;
> +	int	timeout_ms;
> +	u16	state_mask;
> +	u8	role_mask;
> +};
> +
> +static struct opal_occ_cmd_info occ_cmds[] = {
> +	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHARACTERIZATION),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHARACTERIZATION),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +	{	OCC_CMD_VALUE_SET_POWER_CAP,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHARACTERIZATION),
> +		PPC_BIT8(OCC_ROLE_MASTER)
> +	},
> +	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHARACTERIZATION),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +	{	OCC_CMD_VALUE_SELECT_SENSOR_GROUPS,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHARACTERIZATION),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +};
> +
> +static struct cmd_interface {
> +	struct lock queue_lock;
> +	struct timer timeout;
> +	struct opal_command_buffer *cmd;
> +	struct occ_response_buffer *rsp;
> +	struct opal_occ_cmd_rsp_msg *msg;
> +	u32 id;
> +	enum occ_cmd prev_cmd;
> +	u8 occ_role;
> +	u8 *occ_state;
> +	bool cmd_in_progress;
> +} *chips;
> +
> +struct occ_rsp_msg {
> +	struct list_node link;
> +	u8 token;
> +	int rc;
> +};
> +
> +static LIST_HEAD(rsp_msg_list);
> +static struct lock rsp_msg_lock = LOCK_UNLOCKED;
> +static bool rsp_msg_in_use;
> +static int nr_occs;
> +
> +static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < nr_occs; i++)
> +		if (chips[i].id == chip_id)
> +			return &chips[i];
> +
> +	return NULL;
> +}
> +
> +static inline bool occ_in_progress(struct cmd_interface *chip)
> +{
> +	return (chip->rsp->flag == OCC_IN_PROGRESS);
> +}
> +
> +static int write_occ_cmd(struct cmd_interface *chip, bool retry)
> +{
> +	struct opal_command_buffer *cmd = chip->cmd;
> +	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
> +
> +	if (!retry && occ_in_progress(chip)) {
> +		chip->cmd_in_progress = false;
> +		return OPAL_OCC_BUSY;
> +	}
> +
> +	cmd->flag = chip->rsp->flag = 0;
> +	cmd->cmd = occ_cmds[msg->cmd].value;
> +	cmd->request_id = msg->request_id;
> +	cmd->data_size = msg->cdata_size;
> +	memcpy(&cmd->data, (u8 *)msg->cdata, msg->cdata_size);
> +	cmd->flag = OPAL_CMD_READY;
> +
> +	schedule_timer(&chip->timeout,
> +		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
> +	chip->prev_cmd = msg->cmd;
> +
> +	return OPAL_ASYNC_COMPLETION;
> +}
> +
> +static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
> +				bool retry)
> +{
> +	struct cmd_interface *chip;
> +	int rc;
> +
> +	if (!msg || !opal_addr_valid((u8 *)msg->cdata) ||
> +	    !opal_addr_valid((u8 *)msg->rdata))
> +		return OPAL_PARAMETER;
> +
> +	if (msg->cmd >= OCC_CMD_LAST)
> +		return OPAL_UNSUPPORTED;
> +
> +	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
> +		return OPAL_PARAMETER;
> +
> +	chip = get_chip_cmd_interface(chip_id);
> +	if (!chip)
> +		return OPAL_PARAMETER;
> +
> +	if (retry && msg->cmd != chip->prev_cmd)
> +		return OPAL_PARAMETER;
> +
> +	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
> +		return OPAL_PARAMETER;
> +
> +	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
> +		return OPAL_OCC_INVALID_STATE;
> +
> +	lock(&chip->queue_lock);
> +	if (chip->cmd_in_progress) {
> +		rc = OPAL_OCC_BUSY;
> +		goto out;
> +	}
> +
> +	chip->msg = msg;
> +	chip->cmd_in_progress = true;
> +	rc = write_occ_cmd(chip, retry);
> +out:
> +	unlock(&chip->queue_lock);
> +	return rc;
> +}
> +
> +static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
> +					 struct opal_occ_cmd_rsp_msg *msg)
> +{
> +	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
> +		(cmd->request_id == msg->request_id) &&
> +		(cmd->data_size == msg->cdata_size));
> +}
> +
> +static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
> +				 struct occ_response_buffer *rsp)
> +{
> +	if (cmd->cmd != rsp->cmd) {
> +		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
> +		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
> +		return false;
> +	}
> +
> +	if (cmd->request_id != rsp->request_id) {
> +		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
> +		      "rsp->request_id = %d cmd->request_id = %d\n",
> +		      rsp->request_id, cmd->request_id);
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +static void occ_rsp_msg_consumed(void *data __unused);
> +
> +static inline int queue_occ_rsp_msg(int token, int rc)
> +{
> +	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
> +			      token, rc);
> +}
> +
> +static void occ_rsp_msg_consumed(void *data __unused)
> +{
> +	struct occ_rsp_msg *item;
> +
> +	lock(&rsp_msg_lock);
> +
> +	/* Queue next message */
> +	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
> +	if (!item) {
> +		rsp_msg_in_use = false;
> +		goto exit;
> +	}
> +
> +	if (!queue_occ_rsp_msg(item->token, item->rc)) {
> +		rsp_msg_in_use = true;
> +		free(item);
> +	} else {
> +		rsp_msg_in_use = false;
> +		list_add(&rsp_msg_list, &item->link);
> +	}
> +exit:
> +	unlock(&rsp_msg_lock);
> +}
> +
> +static void send_occ_rsp_msg(u8 token, int rc)
> +{
> +	struct occ_rsp_msg *item;
> +
> +	lock(&rsp_msg_lock);
> +	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
> +		rsp_msg_in_use = true;
> +		goto out;
> +	}
> +
> +	item = malloc(sizeof(*item));
> +	if (!item)
> +		goto out;
> +
> +	item->token = token;
> +	item->rc = rc;
> +	item->link.next = item->link.prev = NULL;
> +	list_add_tail(&rsp_msg_list, &item->link);
> +out:
> +	unlock(&rsp_msg_lock);
> +}
> +
> +static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
> +				    uint64_t now __unused)
> +{
> +	struct cmd_interface *chip = data;
> +
> +	lock(&chip->queue_lock);
> +	if (!chip->cmd_in_progress)
> +		goto exit;
> +
> +	chip->cmd_in_progress = false;
> +	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
> +exit:
> +	unlock(&chip->queue_lock);
> +}
> +
> +static void read_occ_rsp(struct occ_response_buffer *rsp,
> +			 struct opal_occ_cmd_rsp_msg *msg)
> +{
> +	/* Copy response to host buffer */
> +	msg->status = rsp->status;
> +	msg->rdata_size = rsp->data_size;
> +	memcpy((u8 *)msg->rdata, rsp->data, rsp->data_size);
> +
> +	/* Clear the OCC response flag */
> +	rsp->flag = 0;
> +}
> +
> +static void handle_occ_rsp(uint32_t chip_id)
> +{
> +	struct cmd_interface *chip;
> +	struct opal_command_buffer *cmd;
> +	struct occ_response_buffer *rsp;
> +	struct opal_occ_cmd_rsp_msg *msg;
> +
> +	chip = get_chip_cmd_interface(chip_id);
> +	if (!chip)
> +		return;
> +
> +	cmd = chip->cmd;
> +	rsp = chip->rsp;
> +	msg = chip->msg;
> +
> +	/*Read rsp*/
> +	if (rsp->flag != OCC_RSP_READY)
> +		return;
> +	lock(&chip->queue_lock);
> +	if (!chip->cmd_in_progress)
> +		goto exit;
> +
> +	chip->cmd_in_progress = false;
> +	cancel_timer(&chip->timeout);
> +	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
> +	    !check_occ_rsp(cmd, rsp)) {
> +		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
> +		goto exit;
> +	}
> +
> +	read_occ_rsp(chip->rsp, msg);
> +	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
> +exit:
> +	unlock(&chip->queue_lock);
> +}
> +
> +static void occ_cmd_interface_init(void)
> +{
> +	struct dt_node *power_mgt;
> +	struct occ_dynamic_data *data;
> +	struct occ_pstate_table *pdata;
> +	struct proc_chip *chip;
> +	int i = 0;
> +
> +	chip = next_chip(NULL);
> +	pdata = get_occ_pstate_table(chip);
> +	if (pdata->version != 0x90)
> +		return;
> +
> +	for_each_chip(chip)
> +		nr_occs++;
> +
> +	chips = malloc(sizeof(*chips) * nr_occs);
> +	assert(chips);
> +
> +	for_each_chip(chip) {
> +		pdata = get_occ_pstate_table(chip);
> +		data = get_occ_dynamic_data(chip);
> +		chips[i].id = chip->id;
> +		chips[i].occ_role = pdata->v9.occ_role;
> +		chips[i].occ_state = &data->occ_state;
> +		chips[i].cmd = &data->cmd;
> +		chips[i].rsp = &data->rsp;
> +		init_lock(&chips[i].queue_lock);
> +		chips[i].cmd_in_progress = false;
> +		chips[i].prev_cmd = OCC_CMD_LAST;
> +		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
> +			   &chips[i]);
> +		i++;
> +	}
> +
> +	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
> +	if (!power_mgt) {
> +		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
> +		free(chips);
> +		return;
> +	}
> +
> +	dt_add_property_string(power_mgt, "compatible",
> +			       "ibm,opal-occ-cmd-rsp-interface");
> +	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
> +}
> +
>  /* CPU-OCC PState init */
>  /* Called after OCC init on P8 and P9 */
>  void occ_pstates_init(void)
> @@ -910,6 +1364,9 @@ void occ_pstates_init(void)
>  		chip->throttle = 0;
>  	opal_add_poller(occ_throttle_poll, NULL);
>  	occ_pstates_initialized = true;
> +
> +	/* Init OPAL-OCC command-response interface */
> +	occ_cmd_interface_init();
>  }
>  
>  struct occ_load_req {
> @@ -1408,8 +1865,10 @@ void occ_p9_interrupt(uint32_t chip_id)
>  	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
>  		prd_tmgt_interrupt(chip_id);
>  
> -	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
> +	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
>  		occ_throttle_poll(NULL);
> +		handle_occ_rsp(chip_id);
> +	}
>  
>  	if (ireg & OCB_OCI_OCIMISC_IRQ_I2C)
>  		p9_i2c_bus_owner_change(chip_id);
> @@ -1434,5 +1893,3 @@ void occ_fsp_init(void)
>  	if (fsp_present())
>  		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
>  }
> -
> -
> diff --git a/include/opal-api.h b/include/opal-api.h
> index 37af5f7..030a283 100644
> --- a/include/opal-api.h
> +++ b/include/opal-api.h
> @@ -55,6 +55,10 @@
>  #define OPAL_XSCOM_CTR_OFFLINED	-30
>  #define OPAL_XIVE_PROVISIONING	-31
>  #define OPAL_XIVE_FREE_ACTIVE	-32
> +#define OPAL_OCC_INVALID_STATE	-33
> +#define OPAL_OCC_BUSY		-34
> +#define OPAL_OCC_CMD_TIMEOUT	-35
> +#define OPAL_OCC_CMD_MISMATCH	-36
>  
>  /* API Tokens (in r0) */
>  #define OPAL_INVALID_CALL		       -1
> @@ -204,7 +208,8 @@
>  #define OPAL_NPU_INIT_CONTEXT			146
>  #define OPAL_NPU_DESTROY_CONTEXT		147
>  #define OPAL_NPU_MAP_LPAR			148
> -#define OPAL_LAST				148
> +#define OPAL_OCC_COMMAND			149
> +#define OPAL_LAST				149
>  
>  /* Device tree flags */
>  
> @@ -1021,6 +1026,81 @@ typedef struct oppanel_line {
>  	__be64 line_len;
>  } oppanel_line_t;
>  
> +enum occ_cmd {
> +	OCC_CMD_AMESTER_PASS_THRU = 0,
> +	OCC_CMD_CLEAR_SENSOR_DATA,
> +	OCC_CMD_SET_POWER_CAP,
> +	OCC_CMD_SET_POWER_SHIFTING_RATIO,
> +	OCC_CMD_SELECT_SENSOR_GROUPS,
> +	OCC_CMD_LAST
> +};
> +
> +enum occ_cmd_data_length {
> +	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> +	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
> +	OCC_CMD_DL_SET_POWER_CAP		= 2,
> +	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
> +	OCC_CMD_DL_SELECT_SENSOR_GROUPS		= 2,
> +};
> +
> +enum occ_rsp_data_length {
> +	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> +	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
> +	OCC_RSP_DL_SET_POWER_CAP		= 2,
> +	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
> +	OCC_RSP_DL_SELECT_SENSOR_GROUPS		= 2,
> +};
> +
> +enum occ_sensor_limit_group {
> +	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
> +	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
> +	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
> +};
> +
> +enum occ_sensor_group_mask {
> +	OCC_SENSOR_GROUP_MASK_PERFORMANCE	= 6,
> +	OCC_SENSOR_GROUP_MASK_POWER		= 8,
> +	OCC_SENSOR_GROUP_MASK_FREQUENCY		= 9,
> +	OCC_SENSOR_GROUP_MASK_TIME		= 10,
> +	OCC_SENSOR_GROUP_MASK_UTILIZATION	= 11,
> +	OCC_SENSOR_GROUP_MASK_TEMPERATURE	= 12,
> +	OCC_SENSOR_GROUP_MASK_VOLTAGE		= 13,
> +	OCC_SENSOR_GROUP_MASK_CURRENT		= 14,
> +};
> +
> +#define MAX_OPAL_CMD_DATA_LENGTH	4090
> +#define MAX_OCC_RSP_DATA_LENGTH		8698
> +
> +enum occ_response_status {
> +	OCC_SUCCESS			= 0x00,
> +	OCC_INVALID_COMMAND		= 0x11,
> +	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
> +	OCC_INVALID_DATA		= 0x13,
> +	OCC_INTERNAL_ERROR		= 0x15,
> +};
> +
> +struct opal_occ_cmd_rsp_msg {
> +	__be64 cdata;
> +	__be64 rdata;
> +	__be16 cdata_size;
> +	__be16 rdata_size;
> +	u8 cmd;
> +	u8 request_id;
> +	u8 status;
> +};
> +
> +struct opal_occ_cmd_data {
> +	__be16 size;
> +	u8 cmd;
> +	u8 data[];
> +};
> +
> +struct opal_occ_rsp_data {
> +	__be16 size;
> +	u8 status;
> +	u8 data[];
> +};
> +
>  enum opal_prd_msg_type {
>  	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
>  	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */


More information about the Skiboot mailing list