[Skiboot] [PATCH V5 01/10] opal/errorlog : Modification as per coding guidelines to make the code more legible
Mukesh Ojha
mukesh02 at linux.vnet.ibm.com
Sun Aug 21 18:47:19 AEST 2016
Some modifications related to typo errors, alignment, case letter mismatch
to add more clarity to the code.
Signed-off-by: Mukesh Ojha <mukesh02 at linux.vnet.ibm.com>
---
Changes in V5:
- Removes the changes (V4 1/6) related to redundant line removal which is
upto individual developer choice to keep it to make the code more
clean.
Changes in V4:
- V3 2/6 becomes V4 1/6 .
- Some modification in core/errorlog.c .
- Changes are rebased on master.
Changes in V3:
- Some more correction moved from 3/6 and 6/6 to this patch.
Changes in V2:
- New Patch.
core/errorlog.c | 22 +++---
hw/fsp/fsp-elog-read.c | 178 ++++++++++++++++++++++++------------------------
hw/fsp/fsp-elog-write.c | 106 +++++++++++++++-------------
hw/ipmi/ipmi-sel.c | 77 +++++++++++----------
4 files changed, 200 insertions(+), 183 deletions(-)
diff --git a/core/errorlog.c b/core/errorlog.c
index c34251b..179e09f 100644
--- a/core/errorlog.c
+++ b/core/errorlog.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 IBM Corp.
+/* Copyright 2013-2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,12 +27,12 @@
/*
* Maximum number buffers that are pre-allocated
* to hold elogs that are reported on Sapphire and
- * powernv.
+ * PowerNV.
*/
#define ELOG_WRITE_MAX_RECORD 64
-
-/* Platform Log ID as per the spec */
+/* Platform log id as per the spec */
static uint32_t sapphire_elog_id = 0xB0000000;
+
/* Reserved for future use */
/* static uint32_t powernv_elog_id = 0xB1000000; */
@@ -54,6 +54,7 @@ static struct errorlog *get_write_buffer(int opal_event_severity)
buf = pool_get(&elog_pool, POOL_HIGH);
else
buf = pool_get(&elog_pool, POOL_NORMAL);
+
unlock(&elog_lock);
return buf;
}
@@ -132,8 +133,10 @@ void log_commit(struct errorlog *elog)
rc = platform.elog_commit(elog);
if (rc)
prerror("ELOG: Platform commit error %d\n", rc);
+
return;
}
+
opal_elog_complete(elog, false);
}
@@ -156,7 +159,6 @@ void log_append_data(struct errorlog *buf, unsigned char *data, uint16_t size)
/* Step through user sections to find latest dump section */
buffer = buf->user_data_dump;
n_sections = buf->user_section_count;
-
if (!n_sections) {
prerror("ELOG: User section invalid\n");
return;
@@ -170,7 +172,6 @@ void log_append_data(struct errorlog *buf, unsigned char *data, uint16_t size)
section = (struct elog_user_data_section *)buffer;
buffer += section->size;
memcpy(buffer, data, size);
-
section->size += size;
buf->user_section_size += size;
}
@@ -209,9 +210,9 @@ void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
prerror("%s", err_msg);
buf = opal_elog_create(e_info, 0);
- if (buf == NULL)
+ if (buf == NULL) {
prerror("ELOG: Error getting buffer to log error\n");
- else {
+ } else {
log_append_data(buf, err_msg, strlen(err_msg));
log_commit(buf);
}
@@ -219,8 +220,9 @@ void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
int elog_init(void)
{
- /* pre-allocate memory for records */
- if (pool_init(&elog_pool, sizeof(struct errorlog), ELOG_WRITE_MAX_RECORD, 1))
+ /* Pre-allocate memory for records */
+ if (pool_init(&elog_pool, sizeof(struct errorlog),
+ ELOG_WRITE_MAX_RECORD, 1))
return OPAL_RESOURCE;
elog_available = true;
diff --git a/hw/fsp/fsp-elog-read.c b/hw/fsp/fsp-elog-read.c
index a980281..351b3bd 100644
--- a/hw/fsp/fsp-elog-read.c
+++ b/hw/fsp/fsp-elog-read.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 IBM Corp.
+/* Copyright 2013-2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,44 +16,40 @@
/*
- * This code will enable retrieving of error log from fsp->sapphire
- * in sequence.
- * Here, FSP would send next log only when sapphire sends a new
- * log notification response to FSP. On Completion of reading
- * the log from FSP, OPAL_EVENT_ERROR_LOG_AVAIL is signaled.
- * This will remain raised until a call to opal_elog_read()
- * is made and OPAL_SUCCESS is returned, upon which.
- * the operation is complete and the event is cleared.
- * This is READ action from FSP.
+ * This code will enable retrieving of error log from FSP -> Sapphire in
+ * sequence.
+ * Here, FSP would send next log only when Sapphire sends a new log notification
+ * response to FSP. On Completion of reading the log from FSP,
+ * OPAL_EVENT_ERROR_LOG_AVAIL is signaled. This will remain raised until a call
+ * to opal_elog_read() is made and OPAL_SUCCESS is returned. Upon which, the
+ * operation is complete and the event is cleared. This is READ action from FSP.
*/
/*
* Design of READ error log :
- * When we receive a new error log entry notificatiion from FSP,
- * we queue it into the "pending" list.
- * If the "pending" list is not empty, then we start the fetching log from FSP.
+ * When we receive a new error log entry notification from FSP, we queue it into
+ * the "pending" list. If the "pending" list is not empty, then we start
+ * fetching log from FSP.
*
- * When Linux reads a log entry, we dequeue it from the "pending" list
- * and enqueue it to another "processed" list. At this point, if the
- * "pending" list is not empty, we continue to fetch the next log.
+ * When Linux reads a log entry, we dequeue it from the "pending" list and
+ * enqueue it to another "processed" list. At this point, if the "pending"
+ * list is not empty, we continue to fetch the next log.
*
- * When Linux calls opal_resend_pending_logs(), we fetch the log
- * corresponding to the head of the pending list and move it to the
- * processed list, and continue this process this until the pending list is
- * empty. If the pending list was empty earlier and is currently non-empty, we
- * initiate an error log fetch.
+ * When Linux calls opal_resend_pending_logs(), we fetch the log corresponding
+ * to the head of the pending list and move it to the processed list, and
+ * continue this process until the pending list is empty. If the pending list
+ * was empty earlier and is currently non-empty, we initiate an error log fetch.
*
* When Linux acks an error log, we remove it from processed list.
*/
-#include <skiboot.h>
-#include <fsp.h>
-#include <cpu.h>
-#include <lock.h>
#include <errno.h>
-#include <psi.h>
+#include <fsp.h>
#include <fsp-elog.h>
+#include <lock.h>
#include <opal-api.h>
+#include <psi.h>
+#include <skiboot.h>
/*
* Maximum number of entries that are pre-allocated
@@ -61,7 +57,7 @@
*/
#define ELOG_READ_MAX_RECORD 128
-/* structure to maintain log-id,log-size, pending and processed list */
+/* Structure to maintain log-id, log-size, pending and processed list. */
struct fsp_log_entry {
uint32_t log_id;
size_t log_size;
@@ -71,30 +67,29 @@ struct fsp_log_entry {
static LIST_HEAD(elog_read_pending);
static LIST_HEAD(elog_read_processed);
static LIST_HEAD(elog_read_free);
-
/*
- * lock is used to protect overwriting of processed and pending list
- * and also used while updating state of each log
+ * Lock is used to protect overwriting of processed and pending list
+ * and also used while updating state of each log.
*/
static struct lock elog_read_lock = LOCK_UNLOCKED;
-/* log buffer to copy FSP log for READ */
-#define ELOG_READ_BUFFER_SIZE 0x00004000
+#define ELOG_READ_BUFFER_SIZE 0x00004000
+/* Log buffer to copy FSP log for read */
static void *elog_read_buffer;
static uint32_t elog_head_id; /* FSP entry ID */
-static size_t elog_head_size; /* actual FSP log size */
-static uint32_t elog_read_retries; /* bad response status count */
+static size_t elog_head_size; /* Actual FSP log size */
+static uint32_t elog_read_retries; /* Bad response status count */
/* Initialize the state of the log */
static enum elog_head_state elog_read_from_fsp_head_state = ELOG_STATE_NONE;
static bool elog_enabled = false;
-/* Need forward declaration because of Circular dependency */
+/* Need forward declaration because of circular dependency. */
static void fsp_elog_queue_fetch(void);
/*
- * check the response message for mbox acknowledgment
+ * Check the response message for mbox acknowledgement
* command send to FSP.
*/
static void fsp_elog_ack_complete(struct fsp_msg *msg)
@@ -103,14 +98,14 @@ static void fsp_elog_ack_complete(struct fsp_msg *msg)
val = (msg->resp->word1 >> 8) & 0xff;
if (val != 0)
- prerror("ELOG: Acknowledgment error\n");
+ prerror("ELOG: Acknowledgement error\n");
+
fsp_freemsg(msg);
}
-/* send Error Log PHYP Acknowledgment to FSP with entry ID */
+/* Send error log PHYP acknowledgement to FSP with entry ID. */
static int64_t fsp_send_elog_ack(uint32_t log_id)
{
-
struct fsp_msg *ack_msg;
ack_msg = fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK, 1, log_id);
@@ -118,23 +113,24 @@ static int64_t fsp_send_elog_ack(uint32_t log_id)
prerror("ELOG: Failed to allocate ack message\n");
return OPAL_INTERNAL_ERROR;
}
+
if (fsp_queue_msg(ack_msg, fsp_elog_ack_complete)) {
fsp_freemsg(ack_msg);
ack_msg = NULL;
prerror("ELOG: Error queueing elog ack complete\n");
return OPAL_INTERNAL_ERROR;
}
+
return OPAL_SUCCESS;
}
-/* retrive error log from FSP with TCE for the data transfer */
+/* Retrieve error log from FSP with TCE for the data transfer. */
static void fsp_elog_check_and_fetch_head(void)
{
if (!elog_enabled)
return;
lock(&elog_read_lock);
-
if (elog_read_from_fsp_head_state != ELOG_STATE_NONE ||
list_empty(&elog_read_pending)) {
unlock(&elog_read_lock);
@@ -142,7 +138,6 @@ static void fsp_elog_check_and_fetch_head(void)
}
elog_read_retries = 0;
-
/* Start fetching first entry from the pending list */
fsp_elog_queue_fetch();
unlock(&elog_read_lock);
@@ -163,14 +158,14 @@ void elog_set_head_state(bool opal_logs, enum elog_head_state state)
fsp_logs_state = state;
if (fsp_logs_state == ELOG_STATE_FETCHED_DATA ||
- opal_logs_state == ELOG_STATE_FETCHED_DATA)
+ opal_logs_state == ELOG_STATE_FETCHED_DATA)
opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL,
OPAL_EVENT_ERROR_LOG_AVAIL);
else
opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, 0);
}
-/* this function should be called with the lock held */
+/* This function should be called with the lock held. */
static inline void fsp_elog_set_head_state(enum elog_head_state state)
{
elog_set_head_state(false, state);
@@ -178,17 +173,17 @@ static inline void fsp_elog_set_head_state(enum elog_head_state state)
}
/*
- * when we try maximum time of fetching log from fsp
+ * When, we try maximum time of fetching log from FSP
* we call following function to delete log from the
- * pending list and update the state to fetch next log
+ * pending list and update the state to fetch next log.
*
- * this function should be called with the lock held
+ * This function should be called with the lock held.
*/
static void fsp_elog_fetch_failure(uint8_t fsp_status)
{
struct fsp_log_entry *log_data;
- /* read top list and delete the node */
+ /* Read top list and delete the node */
log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
if (!log_data) {
/**
@@ -204,8 +199,8 @@ static void fsp_elog_fetch_failure(uint8_t fsp_status)
list_add(&elog_read_free, &log_data->link);
prerror("ELOG: received invalid data: %x FSP status: 0x%x\n",
log_data->log_id, fsp_status);
-
}
+
fsp_elog_set_head_state(ELOG_STATE_NONE);
}
@@ -213,12 +208,10 @@ static void fsp_elog_fetch_failure(uint8_t fsp_status)
static void fsp_elog_read_complete(struct fsp_msg *read_msg)
{
uint8_t val;
- /*struct fsp_log_entry *log_data;*/
lock(&elog_read_lock);
val = (read_msg->resp->word1 >> 8) & 0xff;
fsp_freemsg(read_msg);
-
if (elog_read_from_fsp_head_state == ELOG_STATE_REJECTED) {
fsp_elog_set_head_state(ELOG_STATE_NONE);
goto elog_read_out;
@@ -232,14 +225,15 @@ static void fsp_elog_read_complete(struct fsp_msg *read_msg)
case FSP_STATUS_DMA_ERROR:
if (elog_read_retries++ < MAX_RETRIES) {
/*
- * for a error response value from FSP, we try to
+ * For a error response value from FSP, we try to
* send fetch sp data mbox command again for three
* times if response from FSP is still not valid
- * we send generic error response to fsp.
+ * we send generic error response to FSP.
*/
fsp_elog_queue_fetch();
break;
}
+
fsp_elog_fetch_failure(val);
break;
@@ -254,7 +248,7 @@ elog_read_out:
fsp_elog_check_and_fetch_head();
}
-/* read error log from FSP through mbox commands */
+/* Read error log from FSP through mbox commands */
static void fsp_elog_queue_fetch(void)
{
int rc;
@@ -270,14 +264,14 @@ static void fsp_elog_queue_fetch(void)
* was empty.
*/
prlog(PR_ERR, "%s: Inconsistent internal list state !\n",
- __func__);
+ __func__);
fsp_elog_set_head_state(ELOG_STATE_NONE);
return;
}
+
fsp_elog_set_head_state(ELOG_STATE_FETCHING);
elog_head_id = entry->log_id;
elog_head_size = entry->log_size;
-
rc = fsp_fetch_data_queue(flags, FSP_DATASET_ERRLOG, elog_head_id,
0, (void *)PSI_DMA_ERRLOG_READ_BUF,
&elog_head_size, fsp_elog_read_complete);
@@ -287,13 +281,13 @@ static void fsp_elog_queue_fetch(void)
}
}
-/* opal interface for powernv to read log size and log ID from sapphire */
+/* OPAL interface for PowerNV to read log size and log ID from Sapphire. */
static int64_t fsp_opal_elog_info(uint64_t *opal_elog_id,
uint64_t *opal_elog_size, uint64_t *elog_type)
{
struct fsp_log_entry *log_data;
- /* copy type of the error log */
+ /* Copy type of the error log */
*elog_type = ELOG_TYPE_PEL;
/* Check if any OPAL log needs to be reported to the host */
@@ -305,6 +299,7 @@ static int64_t fsp_opal_elog_info(uint64_t *opal_elog_id,
unlock(&elog_read_lock);
return OPAL_WRONG_STATE;
}
+
log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
if (!log_data) {
/**
@@ -319,6 +314,7 @@ static int64_t fsp_opal_elog_info(uint64_t *opal_elog_id,
unlock(&elog_read_lock);
return OPAL_WRONG_STATE;
}
+
*opal_elog_id = log_data->log_id;
*opal_elog_size = log_data->log_size;
fsp_elog_set_head_state(ELOG_STATE_HOST_INFO);
@@ -326,20 +322,20 @@ static int64_t fsp_opal_elog_info(uint64_t *opal_elog_id,
return OPAL_SUCCESS;
}
-/* opal interface for powernv to read log from sapphire */
+/* OPAL interface for PowerNV to read log from Sapphire. */
static int64_t fsp_opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
uint64_t opal_elog_id)
{
int size = opal_elog_size;
struct fsp_log_entry *log_data;
-
- /* Check if any OPAL log needs to be reported to the host */
+ /* Check if any OPAL log needs to be reported to the PowerNV */
if (opal_elog_read(buffer, opal_elog_size, opal_elog_id))
return OPAL_SUCCESS;
+
/*
* Read top entry from list.
- * as we know always top record of the list is fetched from FSP
+ * As we know always top record of the list is fetched from FSP
*/
lock(&elog_read_lock);
if (elog_read_from_fsp_head_state != ELOG_STATE_HOST_INFO) {
@@ -375,23 +371,22 @@ static int64_t fsp_opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
memcpy((void *)buffer, elog_read_buffer, size);
/*
- * once log is read from linux move record from pending
+ * Once log is read from linux move record from pending
* to processed list and delete record from pending list
- * and change state of the log to fetch next record
+ * and change state of the log to fetch next record.
*/
list_del(&log_data->link);
list_add(&elog_read_processed, &log_data->link);
fsp_elog_set_head_state(ELOG_STATE_NONE);
unlock(&elog_read_lock);
-
- /* read error log from FSP */
+ /* Read error log from FSP */
fsp_elog_check_and_fetch_head();
return OPAL_SUCCESS;
}
-/* set state of the log head before fetching the log */
+/* Set state of the log head before fetching the log. */
static void elog_reject_head(void)
{
if (elog_read_from_fsp_head_state == ELOG_STATE_FETCHING)
@@ -400,7 +395,7 @@ static void elog_reject_head(void)
fsp_elog_set_head_state(ELOG_STATE_NONE);
}
-/* opal Interface for powernv to send ack to fsp with log ID */
+/* OPAL interface for PowerNV to send ack to FSP with log ID */
static int64_t fsp_opal_elog_ack(uint64_t ack_id)
{
int rc = 0;
@@ -415,22 +410,26 @@ static int64_t fsp_opal_elog_ack(uint64_t ack_id)
prerror("ELOG: failed to send acknowledgement: %d\n", rc);
return rc;
}
+
lock(&elog_read_lock);
list_for_each_safe(&elog_read_processed, record, next_record, link) {
if (record->log_id != ack_id)
continue;
+
list_del(&record->link);
list_add(&elog_read_free, &record->link);
unlock(&elog_read_lock);
return rc;
}
+
list_for_each_safe(&elog_read_pending, record, next_record, link) {
if (record->log_id != ack_id)
continue;
- /* It means host has sent ACK without reading actual data.
+ /*
+ * It means PowerNV has sent ACK without reading actual data.
* Because of this elog_read_from_fsp_head_state may be
* stuck in wrong state (ELOG_STATE_HOST_INFO) and not able
- * to send remaining ELOGs to host. Hence reset ELOG state
+ * to send remaining ELOGs to PowerNV. Hence reset ELOG state
* and start sending remaining ELOGs.
*/
list_del(&record->link);
@@ -440,14 +439,14 @@ static int64_t fsp_opal_elog_ack(uint64_t ack_id)
fsp_elog_check_and_fetch_head();
return rc;
}
- unlock(&elog_read_lock);
+ unlock(&elog_read_lock);
return OPAL_PARAMETER;
}
/*
- * once linux kexec's it ask to resend all logs which
- * are not acknowledged from linux
+ * Once Linux kexec's it ask to resend all logs which
+ * are not acknowledged from Linux.
*/
static void fsp_opal_resend_pending_logs(void)
{
@@ -457,7 +456,7 @@ static void fsp_opal_resend_pending_logs(void)
elog_enabled = true;
unlock(&elog_read_lock);
- /* Check if any Sapphire logs are pending */
+ /* Check if any Sapphire logs are pending. */
opal_resend_pending_logs();
lock(&elog_read_lock);
@@ -479,7 +478,7 @@ static void fsp_opal_resend_pending_logs(void)
fsp_elog_check_and_fetch_head();
}
-/* Disable ELOG event flag until host is ready to receive event */
+/* Disable ELOG event flag until PowerNV is ready to receive event */
static bool opal_kexec_elog_notify(void *data __unused)
{
lock(&elog_read_lock);
@@ -491,7 +490,7 @@ static bool opal_kexec_elog_notify(void *data __unused)
return true;
}
-/* fsp elog notify function */
+/* FSP elog notify function */
static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
{
int rc = 0;
@@ -499,7 +498,6 @@ static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
uint32_t log_id;
uint32_t log_size;
-
if (cmd_sub_mod != FSP_CMD_ERRLOG_NOTIFICATION)
return false;
@@ -516,30 +514,30 @@ static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
log_id, log_size);
}
- /* take a lock until we take out the node from elog_read_free */
+ /* Take a lock until we take out the node from elog_read_free */
lock(&elog_read_lock);
if (!list_empty(&elog_read_free)) {
- /* Create a new entry in the pending list */
+ /* Create a new entry in the pending list. */
record = list_pop(&elog_read_free, struct fsp_log_entry, link);
record->log_id = log_id;
record->log_size = log_size;
list_add_tail(&elog_read_pending, &record->link);
unlock(&elog_read_lock);
- /* Send response back to FSP for a new elog notify message */
+ /* Send response back to FSP for a new elog notify message. */
rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
1, log_id), fsp_freemsg);
if (rc)
prerror("ELOG: Failed to queue errlog notification"
" response: %d\n", rc);
- /* read error log from FSP */
+ /* Read error log from FSP */
fsp_elog_check_and_fetch_head();
} else {
printf("ELOG: Log entry 0x%08x discarded\n", log_id);
- /* unlock if elog_read_free is empty */
+ /* Unlock if elog_read_free is empty. */
unlock(&elog_read_lock);
rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
@@ -547,9 +545,10 @@ static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
if (rc)
prerror("ELOG: Failed to queue errlog notification"
" response: %d\n", rc);
+
/*
- * if list is full with max record then we
- * send discarded by phyp (condition full) ack to FSP.
+ * If list is full with max record then we send discarded by
+ * phyp (condition full) ack to FSP.
*
* At some point in the future, we'll get notified again.
* This is largely up to FSP as to when they tell us about
@@ -583,13 +582,14 @@ static int init_elog_read_free_list(uint32_t num_entries)
list_add_tail(&elog_read_free, &entry->link);
entry++;
}
+
return 0;
out_err:
return -ENOMEM;
}
-/* fsp elog read init function */
+/* FSP elog read init function */
void fsp_elog_read_init(void)
{
int val = 0;
@@ -607,18 +607,18 @@ void fsp_elog_read_init(void)
fsp_tce_map(PSI_DMA_ERRLOG_READ_BUF, elog_read_buffer,
PSI_DMA_ERRLOG_READ_BUF_SZ);
- /* pre allocate memory for 128 record */
+ /* Pre allocate memory for 128 record */
val = init_elog_read_free_list(ELOG_READ_MAX_RECORD);
if (val != 0)
return;
- /* register Eror log Class D2 */
+ /* Register error log class D2 */
fsp_register_client(&fsp_get_elog_notify, FSP_MCLASS_ERR_LOG);
- /* Register for sync on host reboot call */
+ /* Register for sync on PowerNV reboot call */
opal_add_host_sync_notifier(opal_kexec_elog_notify, NULL);
- /* register opal Interface */
+ /* Register OPAL interface */
opal_register(OPAL_ELOG_READ, fsp_opal_elog_read, 3);
opal_register(OPAL_ELOG_ACK, fsp_opal_elog_ack, 1);
opal_register(OPAL_ELOG_RESEND, fsp_opal_resend_pending_logs, 0);
diff --git a/hw/fsp/fsp-elog-write.c b/hw/fsp/fsp-elog-write.c
index f38c236..2de4ea3 100644
--- a/hw/fsp/fsp-elog-write.c
+++ b/hw/fsp/fsp-elog-write.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 IBM Corp.
+/* Copyright 2013-2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,24 +16,23 @@
/*
- * This code will enable generation and pushing of error log
- * from powernv, sapphire to FSP
- * Critical events from sapphire that needs to be reported
- * will be pushed on to FSP after converting the
- * error log to Platform Error Log (PEL) format.
- * This is termed as WRITE action to FSP.
+ * This code will enable generation and pushing of error log from Sapphire
+ * to FSP.
+ * Critical events from Sapphire that needs to be reported will be pushed
+ * on to FSP after converting the error log to Platform Error Log(PEL) format.
+ * This is termed as write action to FSP.
*/
-#include <skiboot.h>
-#include <fsp.h>
#include <cpu.h>
-#include <lock.h>
#include <errno.h>
+#include <fsp.h>
#include <fsp-elog.h>
-#include <timebase.h>
+#include <lock.h>
+#include <opal-api.h>
#include <pel.h>
#include <pool.h>
-#include <opal-api.h>
+#include <skiboot.h>
+#include <timebase.h>
static LIST_HEAD(elog_write_to_fsp_pending);
static LIST_HEAD(elog_write_to_host_pending);
@@ -43,8 +42,8 @@ static struct lock elog_write_lock = LOCK_UNLOCKED;
static struct lock elog_panic_write_lock = LOCK_UNLOCKED;
static struct lock elog_write_to_host_lock = LOCK_UNLOCKED;
-/* log buffer to copy FSP log for READ */
#define ELOG_WRITE_TO_FSP_BUFFER_SIZE 0x00004000
+/* Log buffer to copy OPAL log for write to FSP. */
static void *elog_write_to_fsp_buffer;
#define ELOG_PANIC_WRITE_BUFFER_SIZE 0x00004000
@@ -59,7 +58,7 @@ static uint32_t elog_write_retries;
static uint32_t elog_plid_fsp_commit = -1;
static enum elog_head_state elog_write_to_host_head_state = ELOG_STATE_NONE;
-/* Need forward declaration because of Circular dependency */
+/* Need forward declaration because of circular dependency */
static int opal_send_elog_to_fsp(void);
static void remove_elog_head_entry(void)
@@ -73,11 +72,13 @@ static void remove_elog_head_entry(void)
if (head->plid == elog_plid_fsp_commit) {
entry = list_pop(&elog_write_to_fsp_pending,
struct errorlog, link);
- opal_elog_complete(entry, elog_write_retries < MAX_RETRIES);
+ opal_elog_complete(entry,
+ elog_write_retries < MAX_RETRIES);
/* Reset the counter */
elog_plid_fsp_commit = -1;
}
}
+
elog_write_retries = 0;
unlock(&elog_write_lock);
}
@@ -91,14 +92,14 @@ static void opal_fsp_write_complete(struct fsp_msg *read_msg)
switch (val) {
case FSP_STATUS_SUCCESS:
- remove_elog_head_entry();
- break;
-
+ remove_elog_head_entry();
+ break;
default:
if (elog_write_retries++ >= MAX_RETRIES) {
remove_elog_head_entry();
prerror("ELOG: Error in writing to FSP (0x%x)!\n", val);
}
+
break;
}
@@ -106,7 +107,7 @@ static void opal_fsp_write_complete(struct fsp_msg *read_msg)
prerror("ELOG: Error sending elog to FSP !\n");
}
-/* write PEL format hex dump of the log to FSP */
+/* Write PEL format hex dump of the log to FSP */
static int64_t fsp_opal_elog_write(size_t opal_elog_size)
{
struct fsp_msg *elog_msg;
@@ -117,12 +118,14 @@ static int64_t fsp_opal_elog_write(size_t opal_elog_size)
prerror("ELOG: Failed to create message for WRITE to FSP\n");
return OPAL_INTERNAL_ERROR;
}
+
if (fsp_queue_msg(elog_msg, opal_fsp_write_complete)) {
fsp_freemsg(elog_msg);
elog_msg = NULL;
prerror("FSP: Error queueing elog update\n");
return OPAL_INTERNAL_ERROR;
}
+
return OPAL_SUCCESS;
}
@@ -160,13 +163,13 @@ bool opal_elog_info(uint64_t *opal_elog_id, uint64_t *opal_elog_size)
rc = true;
}
}
+
unlock(&elog_write_to_host_lock);
return rc;
}
static void opal_commit_elog_in_host(void)
{
-
struct errorlog *buf;
lock(&elog_write_to_host_lock);
@@ -175,14 +178,14 @@ static void opal_commit_elog_in_host(void)
buf = list_top(&elog_write_to_host_pending,
struct errorlog, link);
buf->log_size = create_pel_log(buf,
- (char *)elog_write_to_host_buffer,
- ELOG_WRITE_TO_HOST_BUFFER_SIZE);
+ (char *)elog_write_to_host_buffer,
+ ELOG_WRITE_TO_HOST_BUFFER_SIZE);
fsp_elog_write_set_head_state(ELOG_STATE_FETCHED_DATA);
}
+
unlock(&elog_write_to_host_lock);
}
-
bool opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
uint64_t opal_elog_id)
{
@@ -198,6 +201,7 @@ bool opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
unlock(&elog_write_to_host_lock);
return rc;
}
+
if ((opal_elog_id != log_data->plid) &&
(opal_elog_size != log_data->log_size)) {
unlock(&elog_write_to_host_lock);
@@ -206,12 +210,12 @@ bool opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
memcpy((void *)buffer, elog_write_to_host_buffer,
opal_elog_size);
-
list_del(&log_data->link);
list_add(&elog_write_to_host_processed, &log_data->link);
fsp_elog_write_set_head_state(ELOG_STATE_NONE);
rc = true;
}
+
unlock(&elog_write_to_host_lock);
opal_commit_elog_in_host();
return rc;
@@ -226,9 +230,10 @@ bool opal_elog_ack(uint64_t ack_id)
lock(&elog_write_to_host_lock);
if (!list_empty(&elog_write_to_host_processed)) {
list_for_each_safe(&elog_write_to_host_processed, record,
- next_record, link) {
+ next_record, link) {
if (record->plid != ack_id)
continue;
+
list_del(&record->link);
opal_elog_complete(record, true);
rc = true;
@@ -237,14 +242,15 @@ bool opal_elog_ack(uint64_t ack_id)
if ((!rc) && (!list_empty(&elog_write_to_host_pending))) {
log_data = list_top(&elog_write_to_host_pending,
- struct errorlog, link);
+ struct errorlog, link);
if (ack_id == log_data->plid)
fsp_elog_write_set_head_state(ELOG_STATE_NONE);
list_for_each_safe(&elog_write_to_host_pending, record,
- next_record, link) {
+ next_record, link) {
if (record->plid != ack_id)
continue;
+
list_del(&record->link);
opal_elog_complete(record, true);
rc = true;
@@ -253,6 +259,7 @@ bool opal_elog_ack(uint64_t ack_id)
return rc;
}
}
+
unlock(&elog_write_to_host_lock);
return rc;
}
@@ -267,6 +274,7 @@ void opal_resend_pending_logs(void)
struct errorlog, link);
list_add_tail(&elog_write_to_host_pending, &record->link);
}
+
fsp_elog_write_set_head_state(ELOG_STATE_NONE);
unlock(&elog_write_to_host_lock);
opal_commit_elog_in_host();
@@ -277,9 +285,9 @@ static int opal_send_elog_to_fsp(void)
struct errorlog *head;
int rc = OPAL_SUCCESS;
- /* Convert entry to PEL
- * and push it down to FSP. We wait for the ack from
- * FSP.
+ /*
+ * Convert entry to PEL and push it down to FSP.
+ * Then we wait for the ack from FSP.
*/
lock(&elog_write_lock);
if (!list_empty(&elog_write_to_fsp_pending)) {
@@ -287,12 +295,13 @@ static int opal_send_elog_to_fsp(void)
struct errorlog, link);
elog_plid_fsp_commit = head->plid;
head->log_size = create_pel_log(head,
- (char *)elog_write_to_fsp_buffer,
- ELOG_WRITE_TO_FSP_BUFFER_SIZE);
+ (char *)elog_write_to_fsp_buffer,
+ ELOG_WRITE_TO_FSP_BUFFER_SIZE);
rc = fsp_opal_elog_write(head->log_size);
unlock(&elog_write_lock);
return rc;
}
+
unlock(&elog_write_lock);
return rc;
}
@@ -325,12 +334,13 @@ static int opal_push_logs_sync_to_fsp(struct errorlog *buf)
rc = (elog_msg->resp->word1 >> 8) & 0xff;
fsp_freemsg(elog_msg);
}
- unlock(&elog_panic_write_lock);
+ unlock(&elog_panic_write_lock);
if (rc != OPAL_SUCCESS)
opal_elog_complete(buf, false);
else
opal_elog_complete(buf, true);
+
return rc;
}
@@ -358,6 +368,7 @@ int elog_fsp_commit(struct errorlog *buf)
rc = opal_send_elog_to_fsp();
return rc;
}
+
list_add_tail(&elog_write_to_fsp_pending, &buf->link);
unlock(&elog_write_lock);
return rc;
@@ -365,7 +376,6 @@ int elog_fsp_commit(struct errorlog *buf)
static void elog_append_write_to_host(struct errorlog *buf)
{
-
lock(&elog_write_to_host_lock);
if (list_empty(&elog_write_to_host_pending)) {
list_add(&elog_write_to_host_pending, &buf->link);
@@ -386,36 +396,36 @@ static void elog_timeout_poll(void *data __unused)
if (list_empty(&elog_write_to_fsp_pending)) {
unlock(&elog_write_lock);
return;
- } else {
- head = list_top(&elog_write_to_fsp_pending,
- struct errorlog, link);
- now = mftb();
- if ((tb_compare(now, head->elog_timeout) == TB_AAFTERB) ||
+ }
+
+ head = list_top(&elog_write_to_fsp_pending, struct errorlog, link);
+ now = mftb();
+ if ((tb_compare(now, head->elog_timeout) == TB_AAFTERB) ||
(tb_compare(now, head->elog_timeout) == TB_AEQUALB)) {
- entry = list_pop(&elog_write_to_fsp_pending,
- struct errorlog, link);
- unlock(&elog_write_lock);
- elog_append_write_to_host(entry);
- } else
- unlock(&elog_write_lock);
+ entry = list_pop(&elog_write_to_fsp_pending,
+ struct errorlog, link);
+ unlock(&elog_write_lock);
+ elog_append_write_to_host(entry);
+ } else {
+ unlock(&elog_write_lock);
}
}
-/* fsp elog init function */
+/* FSP elog init function */
void fsp_elog_write_init(void)
{
if (!fsp_present())
return;
elog_panic_write_buffer = memalign(TCE_PSIZE,
- ELOG_PANIC_WRITE_BUFFER_SIZE);
+ ELOG_PANIC_WRITE_BUFFER_SIZE);
if (!elog_panic_write_buffer) {
prerror("FSP: could not allocate ELOG_PANIC_WRITE_BUFFER!\n");
return;
}
elog_write_to_fsp_buffer = memalign(TCE_PSIZE,
- ELOG_WRITE_TO_FSP_BUFFER_SIZE);
+ ELOG_WRITE_TO_FSP_BUFFER_SIZE);
if (!elog_write_to_fsp_buffer) {
prerror("FSP: could not allocate ELOG_WRITE_BUFFER!\n");
return;
diff --git a/hw/ipmi/ipmi-sel.c b/hw/ipmi/ipmi-sel.c
index d26059b..369cebc 100644
--- a/hw/ipmi/ipmi-sel.c
+++ b/hw/ipmi/ipmi-sel.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 IBM Corp.
+/* Copyright 2013-2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -146,14 +146,14 @@ void ipmi_sel_init(void)
memset(&ipmi_sel_panic_msg, 0, sizeof(struct ipmi_sel_panic_msg));
ipmi_sel_panic_msg.msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
- IPMI_RESERVE_SEL, ipmi_elog_poll,
- NULL, NULL, IPMI_MAX_REQ_SIZE, 2);
+ IPMI_RESERVE_SEL, ipmi_elog_poll,
+ NULL, NULL, IPMI_MAX_REQ_SIZE, 2);
}
/*
- * Allocate IPMI message
- * For normal event, allocate memory using ipmi_mkmsg and for PANIC
- * event, use pre-allocated buffer.
+ * Allocate IPMI message:
+ * For normal event, allocate memory using ipmi_mkmsg and for PANIC
+ * event, use pre-allocated buffer.
*/
static struct ipmi_msg *ipmi_sel_alloc_msg(struct errorlog *elog_buf)
{
@@ -175,13 +175,12 @@ static struct ipmi_msg *ipmi_sel_alloc_msg(struct errorlog *elog_buf)
ipmi_sel_panic_msg.busy = true;
unlock(&ipmi_sel_panic_msg.lock);
- ipmi_init_msg(msg, IPMI_DEFAULT_INTERFACE,
- IPMI_RESERVE_SEL, ipmi_elog_poll,
- elog_buf, IPMI_MAX_REQ_SIZE, 2);
+ ipmi_init_msg(msg, IPMI_DEFAULT_INTERFACE, IPMI_RESERVE_SEL,
+ ipmi_elog_poll, elog_buf, IPMI_MAX_REQ_SIZE, 2);
} else {
msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_RESERVE_SEL,
- ipmi_elog_poll, elog_buf,
- NULL, IPMI_MAX_REQ_SIZE, 2);
+ ipmi_elog_poll, elog_buf, NULL,
+ IPMI_MAX_REQ_SIZE, 2);
}
return msg;
@@ -196,6 +195,7 @@ static void ipmi_sel_free_msg(struct ipmi_msg *msg)
} else {
ipmi_free_msg(msg);
}
+
msg = NULL;
}
@@ -277,15 +277,15 @@ static void ipmi_log_sel_event_error(struct ipmi_msg *msg)
static void ipmi_log_sel_event_complete(struct ipmi_msg *msg)
{
- prlog(PR_INFO, "SEL: New event logged [ID : %x%x]\n",
- msg->data[1], msg->data[0]);
+ prlog(PR_INFO, "SEL: New event logged [ID : %x%x]\n", msg->data[1],
+ msg->data[0]);
ipmi_sel_free_msg(msg);
}
/* Log SEL event with eSEL record ID */
-static void ipmi_log_sel_event(struct ipmi_msg *msg,
- uint8_t event_severity, uint16_t esel_record_id)
+static void ipmi_log_sel_event(struct ipmi_msg *msg, uint8_t event_severity,
+ uint16_t esel_record_id)
{
/* Fill required SEL event fields */
ipmi_update_sel_record(event_severity, esel_record_id);
@@ -335,23 +335,23 @@ static void ipmi_elog_poll(struct ipmi_msg *msg)
size_t req_size;
ipmi_init_esel_record();
-
if (msg->cmd == IPMI_CMD(IPMI_RESERVE_SEL)) {
first = true;
reservation_id = msg->data[0];
reservation_id |= msg->data[1] << 8;
if (!reservation_id) {
- /* According to specification we should never
+ /*
+ * According to specification we should never
* get here, but just in case we do we cancel
- * sending the message. */
+ * sending the message.
+ */
prerror("Invalid reservation id");
opal_elog_complete(elog_buf, false);
ipmi_sel_free_msg(msg);
return;
}
- pel_size = create_pel_log(elog_buf,
- pel_buf, IPMI_MAX_PEL_SIZE);
+ pel_size = create_pel_log(elog_buf, pel_buf, IPMI_MAX_PEL_SIZE);
esel_size = pel_size + sizeof(struct sel_record);
esel_index = 0;
record_id = 0;
@@ -362,9 +362,11 @@ static void ipmi_elog_poll(struct ipmi_msg *msg)
/* Start or continue the IPMI_PARTIAL_ADD_SEL */
if (esel_index >= esel_size) {
- /* We're all done. Invalidate the resevation id to
+ /*
+ * We're all done. Invalidate the resevation id to
* ensure we get an error if we cut in on another eSEL
- * message. */
+ * message.
+ */
reservation_id = 0;
esel_index = 0;
@@ -396,14 +398,14 @@ static void ipmi_elog_poll(struct ipmi_msg *msg)
if (first) {
first = false;
- memcpy(&msg->data[ESEL_HDR_SIZE],
- &sel_record, sizeof(struct sel_record));
+ memcpy(&msg->data[ESEL_HDR_SIZE], &sel_record,
+ sizeof(struct sel_record));
esel_index = sizeof(struct sel_record);
msg->req_size = esel_index + ESEL_HDR_SIZE;
} else {
pel_index = esel_index - sizeof(struct sel_record);
- memcpy(&msg->data[ESEL_HDR_SIZE],
- &pel_buf[pel_index], msg->req_size - ESEL_HDR_SIZE);
+ memcpy(&msg->data[ESEL_HDR_SIZE], &pel_buf[pel_index],
+ msg->req_size - ESEL_HDR_SIZE);
esel_index += msg->req_size - ESEL_HDR_SIZE;
}
@@ -416,25 +418,27 @@ int ipmi_elog_commit(struct errorlog *elog_buf)
struct ipmi_msg *msg;
/* Only log events that needs attention */
- if (elog_buf->event_severity < OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT ||
- elog_buf->elog_origin != ORG_SAPPHIRE) {
+ if (elog_buf->event_severity <
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT ||
+ elog_buf->elog_origin != ORG_SAPPHIRE) {
prlog(PR_INFO, "dropping non severe PEL event\n");
opal_elog_complete(elog_buf, true);
return 0;
}
- /* We pass a large request size in to mkmsg so that we have a
+ /*
+ * We pass a large request size in to mkmsg so that we have a
* large enough allocation to reuse the message to pass the
- * PEL data via a series of partial add commands. */
+ * PEL data via a series of partial add commands.
+ */
msg = ipmi_sel_alloc_msg(elog_buf);
if (!msg) {
opal_elog_complete(elog_buf, false);
return OPAL_RESOURCE;
}
- msg->error = ipmi_elog_error;
+ msg->error = ipmi_elog_error;
msg->req_size = 0;
-
if (elog_buf->event_severity == OPAL_ERROR_PANIC)
ipmi_queue_msg_sync(msg);
else
@@ -491,6 +495,7 @@ static void sel_power(uint8_t power)
} else {
opal_queue_msg(OPAL_MSG_SHUTDOWN, NULL, NULL, SOFT_OFF);
}
+
break;
case SOFT_REBOOT:
prlog(PR_NOTICE, "Soft reboot requested\n");
@@ -501,6 +506,7 @@ static void sel_power(uint8_t power)
} else {
opal_queue_msg(OPAL_MSG_SHUTDOWN, NULL, NULL, SOFT_REBOOT);
}
+
break;
default:
prlog(PR_WARNING, "requested bad power state: %02x\n",
@@ -510,7 +516,7 @@ static void sel_power(uint8_t power)
static uint32_t occ_sensor_id_to_chip(uint8_t sensor, uint32_t *chip)
{
- /* todo: lookup sensor ID node in the DT, and map to a chip id */
+ /* TODO: Lookup sensor ID node in the DT, and map to a chip id */
(void)sensor;
*chip = 0;
return 0;
@@ -553,9 +559,8 @@ void ipmi_parse_sel(struct ipmi_msg *msg)
msg->resp_size, sel.netfun, sel.cmd);
/* Only accept OEM SEL messages */
- if (sel.id[0] != SEL_OEM_ID_0 ||
- sel.id[1] != SEL_OEM_ID_1 ||
- sel.type != SEL_RECORD_TYPE_OEM) {
+ if (sel.id[0] != SEL_OEM_ID_0 || sel.id[1] != SEL_OEM_ID_1 ||
+ sel.type != SEL_RECORD_TYPE_OEM) {
prlog(PR_WARNING, "unknown SEL %02x%02x (type %02x)\n",
sel.id[0], sel.id[1], sel.type);
return;
--
2.7.4
More information about the Skiboot
mailing list