[Skiboot] [PATCH 1/2] ipmi/opal: Provide ipmi_dequeue_msg() wrapper and invoke it on completion

Neelesh Gupta neelegup at linux.vnet.ibm.com
Thu Jun 25 03:10:32 AEST 2015


Provide the ipmi_dequeue_msg() wrapper for the backend dequeue
function and the client should be responsible to call this
interface on completion of the command.

Signed-off-by: Neelesh Gupta <neelegup at linux.vnet.ibm.com>
---
 core/ipmi.c             |   25 +++++++++++++++++++++++--
 hw/bt.c                 |   27 ++++++++++++---------------
 hw/ipmi/ipmi-fru.c      |    5 +++++
 hw/ipmi/ipmi-opal.c     |    2 ++
 hw/ipmi/ipmi-rtc.c      |    1 +
 hw/ipmi/ipmi-sel.c      |    4 ++++
 hw/ipmi/ipmi-watchdog.c |    1 +
 include/ipmi.h          |    3 +++
 8 files changed, 51 insertions(+), 17 deletions(-)

diff --git a/core/ipmi.c b/core/ipmi.c
index bee2987..1fbe56a 100644
--- a/core/ipmi.c
+++ b/core/ipmi.c
@@ -59,9 +59,15 @@ void ipmi_init_msg(struct ipmi_msg *msg, int interface,
 	msg->user_data = user_data;
 }
 
+static void ipmi_msg_done(struct ipmi_msg *msg)
+{
+	ipmi_dequeue_msg(msg);
+	ipmi_free_msg(msg);
+}
+
 struct ipmi_msg *ipmi_mkmsg_simple(uint32_t code, void *req_data, size_t req_size)
 {
-	return ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, code, ipmi_free_msg, NULL,
+	return ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, code, ipmi_msg_done, NULL,
 			  req_data, req_size, 0);
 }
 
@@ -83,7 +89,7 @@ struct ipmi_msg *ipmi_mkmsg(int interface, uint32_t code,
 		      resp_size);
 
 	/* Commands are free to over ride this if they want to handle errors */
-	msg->error = ipmi_free_msg;
+	msg->error = ipmi_msg_done;
 
 	if (req_data)
 		memcpy(msg->data, req_data, req_size);
@@ -119,6 +125,19 @@ int ipmi_queue_msg(struct ipmi_msg *msg)
 	return msg->backend->queue_msg(msg);
 }
 
+int ipmi_dequeue_msg(struct ipmi_msg *msg)
+{
+	if (!ipmi_present())
+		return OPAL_HARDWARE;
+
+	if (!msg) {
+		prerror("%s: Attempting to dequeue NULL message\n", __func__);
+		return OPAL_PARAMETER;
+	}
+
+	return msg->backend->dequeue_msg(msg);
+}
+
 void ipmi_cmd_done(uint8_t cmd, uint8_t netfn, uint8_t cc, struct ipmi_msg *msg)
 {
 	msg->cc = cc;
@@ -175,6 +194,7 @@ static void ipmi_read_event_complete(struct ipmi_msg *msg)
 	/* Handle power control & PNOR handshake events */
 	ipmi_parse_sel(msg);
 
+	ipmi_dequeue_msg(msg);
 	ipmi_free_msg(msg);
 }
 
@@ -182,6 +202,7 @@ static void ipmi_get_message_flags_complete(struct ipmi_msg *msg)
 {
 	uint8_t flags = msg->data[0];
 
+	ipmi_dequeue_msg(msg);
 	ipmi_free_msg(msg);
 
 	prlog(PR_DEBUG, "IPMI Get Message Flags: %02x\n", flags);
diff --git a/hw/bt.c b/hw/bt.c
index 0c89d9f..9aa86b9 100644
--- a/hw/bt.c
+++ b/hw/bt.c
@@ -133,17 +133,6 @@ static inline void bt_set_state(enum bt_states next_state)
 	bt.state = next_state;
 }
 
-/* Must be called with bt.lock held */
-static void bt_msg_del(struct bt_msg *bt_msg)
-{
-	list_del(&bt_msg->link);
-	bt.queue_len--;
-	unlock(&bt.lock);
-	ipmi_cmd_done(bt_msg->ipmi_msg.cmd, bt_msg->ipmi_msg.netfn + (1 << 2),
-		      IPMI_TIMEOUT_ERR, &bt_msg->ipmi_msg);
-	lock(&bt.lock);
-}
-
 static void bt_init_interface(void)
 {
 	/* Clear interrupt condition & enable irq */
@@ -275,8 +264,6 @@ static void bt_get_resp(void)
 
 	bt_set_state(BT_STATE_IDLE);
 
-	list_del(&bt_msg->link);
-	bt.queue_len--;
 	unlock(&bt.lock);
 
 	/*
@@ -312,7 +299,12 @@ static void bt_expire_old_msg(void)
 			bt_outb(BT_CTRL_H2B_ATN, BT_CTRL);
 		} else {
 			BT_ERR(bt_msg, "Timeout sending message");
-			bt_msg_del(bt_msg);
+
+			unlock(&bt.lock);
+			ipmi_cmd_done(bt_msg->ipmi_msg.cmd,
+				      bt_msg->ipmi_msg.netfn + (1 << 2),
+				      IPMI_TIMEOUT_ERR, &bt_msg->ipmi_msg);
+			lock(&bt.lock);
 
 			/* Timing out a message is inherently racy as the BMC
 			   may start writing just as we decide to kill the
@@ -409,7 +401,12 @@ static void bt_add_msg(struct bt_msg *bt_msg)
 		bt_msg = list_tail(&bt.msgq, struct bt_msg, link);
 		assert(bt_msg);
 		BT_ERR(bt_msg, "Removed from queue");
-		bt_msg_del(bt_msg);
+
+		unlock(&bt.lock);
+		ipmi_cmd_done(bt_msg->ipmi_msg.cmd,
+			      bt_msg->ipmi_msg.netfn + (1 << 2),
+			      IPMI_TIMEOUT_ERR, &bt_msg->ipmi_msg);
+		lock(&bt.lock);
 	}
 }
 
diff --git a/hw/ipmi/ipmi-fru.c b/hw/ipmi/ipmi-fru.c
index ddcb3d6..f356c8b 100644
--- a/hw/ipmi/ipmi-fru.c
+++ b/hw/ipmi/ipmi-fru.c
@@ -177,6 +177,11 @@ static void fru_write_complete(struct ipmi_msg *msg)
 	u8 write_count = msg->data[0];
 	u16 offset;
 
+	/* Dequeue the message, but don't free immediately as we may queue it
+	 * up again
+	 */
+	ipmi_dequeue_msg(msg);
+
 	msg->data[WRITE_INDEX] += write_count;
 	msg->data[REMAINING] -= write_count;
 	if (msg->data[REMAINING] == 0)
diff --git a/hw/ipmi/ipmi-opal.c b/hw/ipmi/ipmi-opal.c
index 237f8c0..7fa05ed 100644
--- a/hw/ipmi/ipmi-opal.c
+++ b/hw/ipmi/ipmi-opal.c
@@ -27,6 +27,7 @@ static struct list_head msgq = LIST_HEAD_INIT(msgq);
 
 static void opal_send_complete(struct ipmi_msg *msg)
 {
+	ipmi_dequeue_msg(msg);
 	lock(&msgq_lock);
 	list_add_tail(&msgq, &msg->link);
 	opal_update_pending_evt(ipmi_backend->opal_event_ipmi_recv,
@@ -111,6 +112,7 @@ static int64_t opal_ipmi_recv(uint64_t interface,
 
 	/* Add one as the completion code is returned in the message data */
 	*msg_len = msg->resp_size + sizeof(struct opal_ipmi_msg) + 1;
+
 	ipmi_free_msg(msg);
 
 	return OPAL_SUCCESS;
diff --git a/hw/ipmi/ipmi-rtc.c b/hw/ipmi/ipmi-rtc.c
index 01fb3e1..afe55d2 100644
--- a/hw/ipmi/ipmi-rtc.c
+++ b/hw/ipmi/ipmi-rtc.c
@@ -42,6 +42,7 @@ static void get_sel_time_complete(struct ipmi_msg *msg)
 	gmtime_r(&time, &tm);
 	rtc_cache_update(&tm);
 	time_status = updated;
+	ipmi_dequeue_msg(msg);
 	ipmi_free_msg(msg);
 }
 
diff --git a/hw/ipmi/ipmi-sel.c b/hw/ipmi/ipmi-sel.c
index 7007f83..a227547 100644
--- a/hw/ipmi/ipmi-sel.c
+++ b/hw/ipmi/ipmi-sel.c
@@ -66,6 +66,8 @@ struct oem_sel {
 
 static void ipmi_elog_error(struct ipmi_msg *msg)
 {
+	ipmi_dequeue_msg(msg);
+
 	if (msg->cc == IPMI_LOST_ARBITRATION_ERR)
 		/* Retry due to SEL erase */
 		ipmi_queue_msg(msg);
@@ -103,6 +105,8 @@ static void ipmi_elog_poll(struct ipmi_msg *msg)
 	struct errorlog *elog_buf = (struct errorlog *) msg->user_data;
 	size_t req_size;
 
+	ipmi_dequeue_msg(msg);
+
 	if (msg->cmd == IPMI_CMD(IPMI_RESERVE_SEL)) {
 		reservation_id = msg->data[0];
 		reservation_id |= msg->data[1] << 8;
diff --git a/hw/ipmi/ipmi-watchdog.c b/hw/ipmi/ipmi-watchdog.c
index 498d4c6..658314a 100644
--- a/hw/ipmi/ipmi-watchdog.c
+++ b/hw/ipmi/ipmi-watchdog.c
@@ -54,6 +54,7 @@ static void ipmi_wdt_complete(struct ipmi_msg *msg)
 		schedule_timer(&wdt_timer, msecs_to_tb(
 				       (WDT_TIMEOUT - WDT_MARGIN)*100));
 
+	ipmi_dequeue_msg(msg);
 	ipmi_free_msg(msg);
 }
 
diff --git a/include/ipmi.h b/include/ipmi.h
index 77e4b9a..36b4344 100644
--- a/include/ipmi.h
+++ b/include/ipmi.h
@@ -207,6 +207,9 @@ int ipmi_queue_msg_head(struct ipmi_msg *msg);
  * messages callback has been called. */
 void ipmi_queue_msg_sync(struct ipmi_msg *msg);
 
+/* Dequeues the message previously queued */
+int ipmi_dequeue_msg(struct ipmi_msg *msg);
+
 /* Process a completed message */
 void ipmi_cmd_done(uint8_t cmd, uint8_t netfn, uint8_t cc, struct ipmi_msg *msg);
 



More information about the Skiboot mailing list