[Cbe-oss-dev] [PATCH 7/10 v2] MARS: workload queue api fix

Yuji Mano Yuji.Mano at am.sony.com
Sat Aug 30 11:24:22 EST 2008


This modifies the internal workload queue API in the host-side library. When
scheduling a new workload, the workload model may need to initialize parts of
the workload context once it has been reserved in an available slot of the
workload queue.

To allow the workload queue to internally handle its own mutex protection of
each queue block, the workload queue APIs for mars_workload_queue_add,
mars_workload_queue_remove, and mars_workload_queue_schedule, will each be
split into 3 separate stages of *_begin/*_end/*_cancel.

*_begin will reserve the workload queue slot and return the pointer to the
reserved workload context and atomically set its state to "ADDING", "REMOVING",
or "SCHEDULING" respective to the call.

Between calls from *_begin and *_end, the caller can modify the workload
context as necessary.

The *_cancel is provided so that if the caller encounters an error in between
the *_begin and *_end calls it may cancel the add/remove/schedule process
without calling *_end.

This patch also removes the unnecessary member count inside the workload queue
header.

Signed-off-by: Yuji Mano <yuji.mano at am.sony.com>

---
v2:
 - complete change in implementation from v1 patch
 - update patch header for new implementation explanation

 include/common/mars/mars_workload_types.h |   12 -
 include/host/mars/mars_workload_queue.h   |   32 +++-
 src/host/lib/mars_task.c                  |  120 +++++++++-------
 src/host/lib/mars_workload_queue.c        |  219 +++++++++++++++++++++---------
 src/mpu/kernel/mars_kernel_scheduler.c    |    4 
 5 files changed, 257 insertions(+), 130 deletions(-)

--- a/include/common/mars/mars_workload_types.h
+++ b/include/common/mars/mars_workload_types.h
@@ -50,10 +50,13 @@ extern "C" {
 #define MARS_WORKLOAD_TYPE_TASK			0x01	/* workload type */
 
 #define MARS_WORKLOAD_STATE_NONE		0x00	/* workload undefined */
-#define MARS_WORKLOAD_STATE_READY		0x01	/* ready to schedule */
-#define MARS_WORKLOAD_STATE_WAITING		0x02	/* waiting for sync */
-#define MARS_WORKLOAD_STATE_RUNNING		0x03	/* currently running */
-#define MARS_WORKLOAD_STATE_FINISHED		0x04	/* not allow schedule */
+#define MARS_WORKLOAD_STATE_ADDING		0x01	/* adding now */
+#define MARS_WORKLOAD_STATE_REMOVING		0x02	/* removing now */
+#define MARS_WORKLOAD_STATE_SCHEDULING		0x03	/* scheduling now */
+#define MARS_WORKLOAD_STATE_READY		0x04	/* ready to schedule */
+#define MARS_WORKLOAD_STATE_WAITING		0x05	/* waiting for sync */
+#define MARS_WORKLOAD_STATE_RUNNING		0x06	/* currently running */
+#define MARS_WORKLOAD_STATE_FINISHED		0x07	/* not allow schedule */
 
 #define MARS_WORKLOAD_PRIORITY_MIN		0x00	/* minimum priority */
 #define MARS_WORKLOAD_PRIORITY_MAX		0xff	/* maximum priority */
@@ -84,7 +87,6 @@ struct mars_workload_context {
 struct mars_workload_queue_header {
 	uint64_t queue_ea;
 	uint64_t context_ea;
-	uint32_t count;
 	uint8_t flag;
 } __attribute__((aligned(MARS_WORKLOAD_QUEUE_HEADER_ALIGN)));
 
--- a/include/host/mars/mars_workload_queue.h
+++ b/include/host/mars/mars_workload_queue.h
@@ -46,16 +46,30 @@ extern "C" {
 
 int workload_queue_initialize(struct mars_workload_queue *queue);
 int workload_queue_finalize(struct mars_workload_queue *queue);
-int workload_queue_add(struct mars_workload_queue *queue, uint16_t *id,
-			struct mars_workload_context *workload, uint8_t type);
-int workload_queue_remove(struct mars_workload_queue *queue, uint16_t id,
-			struct mars_workload_context *workload);
-int workload_queue_context(struct mars_workload_queue *queue, uint16_t id,
+
+int workload_queue_add_begin(struct mars_workload_queue *queue, uint16_t *id,
+				uint8_t type,
+				struct mars_workload_context **workload);
+int workload_queue_add_end(struct mars_workload_queue *queue, uint16_t id);
+int workload_queue_add_cancel(struct mars_workload_queue *queue, uint16_t id);
+
+int workload_queue_remove_begin(struct mars_workload_queue *queue, uint16_t id,
+				struct mars_workload_context **workload);
+int workload_queue_remove_end(struct mars_workload_queue *queue, uint16_t id);
+int workload_queue_remove_cancel(struct mars_workload_queue *queue,
+				uint16_t id);
+
+int workload_queue_schedule_begin(struct mars_workload_queue *queue,
+				uint16_t id, uint8_t priority,
+				struct mars_workload_context **workload);
+int workload_queue_schedule_end(struct mars_workload_queue *queue, uint16_t id);
+int workload_queue_schedule_cancel(struct mars_workload_queue *queue,
+				uint16_t id);
+
+int workload_queue_wait(struct mars_workload_queue *queue, uint16_t id,
+			struct mars_workload_context **workload);
+int workload_queue_try_wait(struct mars_workload_queue *queue, uint16_t id,
 			struct mars_workload_context **workload);
-int workload_queue_schedule(struct mars_workload_queue *queue, uint16_t id,
-			uint8_t priority);
-int workload_queue_wait(struct mars_workload_queue *queue, uint16_t id);
-int workload_queue_try_wait(struct mars_workload_queue *queue, uint16_t id);
 int workload_queue_signal_send(struct mars_workload_queue *queue, uint16_t id);
 
 #if defined(__cplusplus)
--- a/src/host/lib/mars_task.c
+++ b/src/host/lib/mars_task.c
@@ -54,56 +54,67 @@ int mars_task_initialize(struct mars_con
 	MARS_CHECK_RET(params, MARS_ERROR_NULL);
 
 	int ret;
-	struct mars_task_context task;
+	struct mars_task_context *task;
 
 	/* initialize task context */
-	task.id.mars_context_ea = (uint64_t)(uintptr_t)mars;
-
-	/* parse the elf parameter */
-	ret = mars_elf_parse(params->elf_image, &task.exec, &task.exec_size,
-				&task.bss_size, &task.vaddr, &task.entry);
-	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
+	id->mars_context_ea = (uint64_t)(uintptr_t)mars;
 
 	/* copy the task name into task id */
 	if (params->name) {
 		MARS_CHECK_RET(strlen(params->name) < MARS_TASK_NAME_LEN_MAX,
 				MARS_ERROR_PARAMS);
-		strcpy((char *)&task.id.name, params->name);
+		strcpy((char *)&id->name, params->name);
 	}
 
+	/* begin process to add the task to the workload queue */
+	ret = workload_queue_add_begin(mars->workload_queue, &id->workload_id,
+				MARS_WORKLOAD_TYPE_TASK,
+				(struct mars_workload_context **)&task);
+	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
+
+	/* initialize task specific id */
+	memcpy(&task->id, id, sizeof(struct mars_task_id));
+
+	/* parse the elf parameter */
+	ret = mars_elf_parse(params->elf_image, &task->exec, &task->exec_size,
+				&task->bss_size, &task->vaddr, &task->entry);
+	MARS_CHECK_CLEANUP_RET(ret == MARS_SUCCESS,
+			workload_queue_add_cancel(mars->workload_queue,
+				id->workload_id),
+			ret);
+
 	/* allocate the task context area if specified */
 	if (params->context_save_size) {
-		MARS_CHECK_RET(params->context_save_size <=
-			MARS_TASK_CONTEXT_SAVE_SIZE_MAX, MARS_ERROR_PARAMS);
+		MARS_CHECK_CLEANUP_RET(params->context_save_size <=
+			MARS_TASK_CONTEXT_SAVE_SIZE_MAX,
+			workload_queue_add_cancel(mars->workload_queue,
+				id->workload_id),
+			MARS_ERROR_PARAMS);
 
-		task.context_save_size =
+		task->context_save_size =
 			params->context_save_size;
-		task.context_save_area = (uint64_t)(uintptr_t)
+		task->context_save_area = (uint64_t)(uintptr_t)
 			memalign(MARS_TASK_CONTEXT_SAVE_ALIGN,
-				task.context_save_size);
+				task->context_save_size);
 
-		MARS_CHECK_RET(task.context_save_area, MARS_ERROR_MEMORY);
+		MARS_CHECK_CLEANUP_RET(task->context_save_area,
+			workload_queue_add_cancel(mars->workload_queue,
+				id->workload_id),
+			MARS_ERROR_MEMORY);
 	} else {
-		task.context_save_size = 0;
-		task.context_save_area = 0;
+		task->context_save_size = 0;
+		task->context_save_area = 0;
 	}
 
-	mars_mutex_lock(mars->mutex);
-
-	/* add the task to the workload queue */
-	ret = workload_queue_add(mars->workload_queue, &task.id.workload_id,
-				(struct mars_workload_context *)&task,
-				MARS_WORKLOAD_TYPE_TASK);
+	/* end process to add the task to the workload queue */
+	ret = workload_queue_add_end(mars->workload_queue, id->workload_id);
 	MARS_CHECK_CLEANUP_RET(ret == MARS_SUCCESS,
-				free((void *)task.context_save_area), ret);
-	MARS_PRINT_TASK_CONTEXT(&task);
-
-	/* copy the task id into return id */
-	*id = task.id;
-
-	mars_mutex_unlock(mars->mutex);
+			workload_queue_add_cancel(mars->workload_queue,
+				id->workload_id),
+			ret);
 
 	MARS_PRINT("Initialize Task Context %d\n", task->id.workload_id);
+	MARS_PRINT_TASK_CONTEXT(&task);
 
 	return MARS_SUCCESS;
 }
@@ -115,23 +126,23 @@ int mars_task_finalize(struct mars_task_
 	MARS_CHECK_RET(id->workload_id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int ret;
+	struct mars_task_context *task;
 	struct mars_context *mars = (struct mars_context *)id->mars_context_ea;
-	struct mars_task_context task;
 
-	mars_mutex_lock(mars->mutex);
-
-	/* remove the task from the workload queue */
-	ret = workload_queue_remove(mars->workload_queue, id->workload_id,
-				(struct mars_workload_context *)&task);
+	/* begin process to remove the task from the workload queue */
+	ret = workload_queue_remove_begin(mars->workload_queue, id->workload_id,
+				(struct mars_workload_context **)&task);
 	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
 	/* free the allocated task context area if it has one */
-	if (task.context_save_size)
-		free((void *)task.context_save_area);
+	if (task->context_save_size)
+		free((void *)task->context_save_area);
 
-	mars_mutex_unlock(mars->mutex);
+	/* end process to remove the task from the workload queue */
+	ret = workload_queue_remove_end(mars->workload_queue, id->workload_id);
+	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
-	MARS_PRINT("Finalize Task Context %d\n", task.id.workload_id);
+	MARS_PRINT("Finalize Task Context %d\n", id->workload_id);
 
 	return MARS_SUCCESS;
 }
@@ -145,14 +156,13 @@ int mars_task_schedule(struct mars_task_
 	MARS_CHECK_RET(id->workload_id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int ret;
+	struct mars_task_context *task;
 	struct mars_context *mars = (struct mars_context *)id->mars_context_ea;
-	struct mars_task_context *task = NULL;
 
-	mars_mutex_lock(mars->mutex);
-
-	/* get workload context pointer from the workload queue */
-	ret = workload_queue_context(mars->workload_queue, id->workload_id,
-				(struct mars_workload_context **)&task);
+	/* begin process to schedule the workload in the workload queue */
+	ret = workload_queue_schedule_begin(mars->workload_queue,
+					id->workload_id, priority,
+					(struct mars_workload_context **)&task);
 	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
 	/* initialize task specific context variables */
@@ -160,14 +170,12 @@ int mars_task_schedule(struct mars_task_
 	if (args)
 		memcpy(&task->args, args, sizeof(struct mars_task_args));
 
-	/* schedule the workload in the workload queue */
-	ret = workload_queue_schedule(mars->workload_queue, id->workload_id,
-				priority);
+	/* end process to schedule the workload in the workload queue */
+	ret = workload_queue_schedule_end(mars->workload_queue,
+					id->workload_id);
 	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
-	mars_mutex_unlock(mars->mutex);
-
-	MARS_PRINT("Schedule Task Context %d\n", task->id.workload_id);
+	MARS_PRINT("Schedule Task Context %d\n", id->workload_id);
 
 	return MARS_SUCCESS;
 }
@@ -179,9 +187,12 @@ int mars_task_wait(struct mars_task_id *
 	MARS_CHECK_RET(id->workload_id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int ret;
+	struct mars_task_context *task;
 	struct mars_context *mars = (struct mars_context *)id->mars_context_ea;
 
-	ret = workload_queue_wait(mars->workload_queue, id->workload_id);
+	/* blocking wait for workload completion */
+	ret = workload_queue_wait(mars->workload_queue, id->workload_id,
+				(struct mars_workload_context **)&task);
 	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
 	return MARS_SUCCESS;
@@ -194,9 +205,12 @@ int mars_task_try_wait(struct mars_task_
 	MARS_CHECK_RET(id->workload_id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int ret;
+	struct mars_task_context *task;
 	struct mars_context *mars = (struct mars_context *)id->mars_context_ea;
 
-	ret = workload_queue_try_wait(mars->workload_queue, id->workload_id);
+	/* non-blocking wait for workload completion */
+	ret = workload_queue_try_wait(mars->workload_queue, id->workload_id,
+				(struct mars_workload_context **)&task);
 	MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
 
 	return MARS_SUCCESS;
--- a/src/host/lib/mars_workload_queue.c
+++ b/src/host/lib/mars_workload_queue.c
@@ -52,7 +52,6 @@ int workload_queue_initialize(struct mar
 
 	queue->header.queue_ea = (uint64_t)(uintptr_t)queue;
 	queue->header.context_ea = (uint64_t)(uintptr_t)&queue->context;
-	queue->header.count = 0;
 	queue->header.flag = MARS_FLAG_NONE;
 
 	return MARS_SUCCESS;
@@ -61,68 +60,109 @@ int workload_queue_initialize(struct mar
 int workload_queue_finalize(struct mars_workload_queue *queue)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
-	MARS_CHECK_RET(!queue->header.count, MARS_ERROR_STATE);
+
+	int block;
+	int index;
+
+	uint16_t id = 0;
+
+	/* check for any tasks left in workload queue */
+	while (id < MARS_WORKLOAD_MAX) {
+		block = id / MARS_WORKLOAD_PER_BLOCK;
+		index = id % MARS_WORKLOAD_PER_BLOCK;
+
+		if (queue->block[block].bits[index].state !=
+			MARS_WORKLOAD_STATE_NONE)
+			break;
+
+		id++;
+	}
+
+	/* found some task left in workload queue */
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_STATE);
 
 	queue->header.flag = MARS_FLAG_EXIT;
 
 	return MARS_SUCCESS;
 }
 
-int workload_queue_add(struct mars_workload_queue *queue, uint16_t *id,
-			struct mars_workload_context *workload, uint8_t type)
+int workload_queue_add_begin(struct mars_workload_queue *queue, uint16_t *id,
+				uint8_t type,
+				struct mars_workload_context **workload)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
-	MARS_CHECK_RET(workload, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id, MARS_ERROR_NULL);
 	MARS_CHECK_RET(type == MARS_WORKLOAD_TYPE_TASK, MARS_ERROR_PARAMS);
-	MARS_CHECK_RET(queue->header.count < MARS_WORKLOAD_MAX,
-			MARS_ERROR_LIMIT);
 
 	int block = 0;
 	int index = 0;
 
-	/* search through workload queue for open slot */
-	if (queue->header.count) {
-		int i;
-		int j;
-
-		for (i = 0; i < MARS_WORKLOAD_NUM_BLOCKS; i++) {
-			for (j = 0; j < MARS_WORKLOAD_PER_BLOCK; j++) {
-				if (queue->block[i].bits[j].state ==
-					MARS_WORKLOAD_STATE_NONE) {
-					block = i;
-					index = j;
-					i = MARS_WORKLOAD_NUM_BLOCKS;
-					j = MARS_WORKLOAD_PER_BLOCK;
-				}
-			}
-		}
-	}
+	*id = 0;
+
+	/* find first available empty slot */
+	while (*id < MARS_WORKLOAD_MAX) {
+		block = *id / MARS_WORKLOAD_PER_BLOCK;
+		index = *id % MARS_WORKLOAD_PER_BLOCK;
+
+		if (queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_NONE)
+			break;
 
-	/* set return id of added workload context */
-	*id = MARS_WORKLOAD_PER_BLOCK * block + index;
+		(*id)++;
+	}
 
-	/* copy workload context into workload queue */
-	memcpy(&queue->context[*id], workload,
-		sizeof(struct mars_workload_context));
+	/* no empty slot found - workload queue is full */
+	MARS_CHECK_RET(*id < MARS_WORKLOAD_MAX, MARS_ERROR_LIMIT);
 
-	/* update workload queue header info */
+	/* set type and set state to adding */
 	queue->block[block].bits[index].type = type;
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_ADDING;
+
+	/* if requested set workload context pointer to return */
+	if (workload)
+		*workload = &queue->context[*id];
+
+	return MARS_SUCCESS;
+}
+
+int workload_queue_add_end(struct mars_workload_queue *queue, uint16_t id)
+{
+	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
+
+	int block = id / MARS_WORKLOAD_PER_BLOCK;
+	int index = id % MARS_WORKLOAD_PER_BLOCK;
+
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_ADDING, MARS_ERROR_STATE);
+
+	/* reset workload queue bits and set state to finished state */
 	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED;
-	queue->block[block].bits[index].priority = MARS_WORKLOAD_PRIORITY_MIN;
-	queue->block[block].bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
-	queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
-	queue->block[block].bits[index].wait = MARS_WORKLOAD_ID_NONE;
-	queue->header.count++;
 
 	return MARS_SUCCESS;
 }
 
-int workload_queue_remove(struct mars_workload_queue *queue, uint16_t id,
-			struct mars_workload_context *workload)
+int workload_queue_add_cancel(struct mars_workload_queue *queue, uint16_t id)
+{
+	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
+
+	int block = id / MARS_WORKLOAD_PER_BLOCK;
+	int index = id % MARS_WORKLOAD_PER_BLOCK;
+
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_ADDING, MARS_ERROR_STATE);
+
+	/* set state back to none state */
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_NONE;
+
+	return MARS_SUCCESS;
+}
+
+int workload_queue_remove_begin(struct mars_workload_queue *queue, uint16_t id,
+				struct mars_workload_context **workload)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
-	MARS_CHECK_RET(workload, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int block = id / MARS_WORKLOAD_PER_BLOCK;
@@ -131,42 +171,53 @@ int workload_queue_remove(struct mars_wo
 	MARS_CHECK_RET(queue->block[block].bits[index].state ==
 			MARS_WORKLOAD_STATE_FINISHED, MARS_ERROR_STATE);
 
-	/* copy workload context out from workload queue */
-	memcpy(workload, &queue->context[id],
-		sizeof(struct mars_workload_context));
+	/* set state to removing */
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_REMOVING;
+
+	/* if requested set workload context pointer to return */
+	if (workload)
+		*workload = &queue->context[id];
+
+	return MARS_SUCCESS;
+}
+
+int workload_queue_remove_end(struct mars_workload_queue *queue, uint16_t id)
+{
+	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
+
+	int block = id / MARS_WORKLOAD_PER_BLOCK;
+	int index = id % MARS_WORKLOAD_PER_BLOCK;
+
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_REMOVING, MARS_ERROR_STATE);
 
-	/* update workload queue info */
+	/* set state to none */
 	queue->block[block].bits[index].type = MARS_WORKLOAD_TYPE_NONE;
-	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_NONE;
-	queue->block[block].bits[index].priority = MARS_WORKLOAD_PRIORITY_MIN;
-	queue->block[block].bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
-	queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
-	queue->block[block].bits[index].wait = MARS_WORKLOAD_ID_NONE;
-	queue->header.count--;
 
 	return MARS_SUCCESS;
 }
 
-int workload_queue_context(struct mars_workload_queue *queue, uint16_t id,
-			struct mars_workload_context **workload)
+int workload_queue_remove_cancel(struct mars_workload_queue *queue, uint16_t id)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
-	MARS_CHECK_RET(workload, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
 
 	int block = id / MARS_WORKLOAD_PER_BLOCK;
 	int index = id % MARS_WORKLOAD_PER_BLOCK;
 
-	MARS_CHECK_RET(queue->block[block].bits[index].state !=
-			MARS_WORKLOAD_STATE_NONE, MARS_ERROR_STATE);
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_REMOVING, MARS_ERROR_STATE);
 
-	*workload = &queue->context[id];
+	/* set state back to finished */
+	queue->block[block].bits[index].type = MARS_WORKLOAD_STATE_FINISHED;
 
 	return MARS_SUCCESS;
 }
 
-int workload_queue_schedule(struct mars_workload_queue *queue, uint16_t id,
-			uint8_t priority)
+int workload_queue_schedule_begin(struct mars_workload_queue *queue,
+				uint16_t id, uint8_t priority,
+				struct mars_workload_context **workload)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
@@ -177,16 +228,57 @@ int workload_queue_schedule(struct mars_
 	MARS_CHECK_RET(queue->block[block].bits[index].state ==
 			MARS_WORKLOAD_STATE_FINISHED, MARS_ERROR_STATE);
 
-	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_READY;
+	/* reset workload queue bits and set state to scheduling */
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_SCHEDULING;
 	queue->block[block].bits[index].priority = priority;
 	queue->block[block].bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
 	queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
 	queue->block[block].bits[index].wait = MARS_WORKLOAD_ID_NONE;
 
+	/* if requested set workload context pointer to return */
+	if (workload)
+		*workload = &queue->context[id];
+
+	return MARS_SUCCESS;
+}
+
+int workload_queue_schedule_end(struct mars_workload_queue *queue, uint16_t id)
+{
+	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
+
+	int block = id / MARS_WORKLOAD_PER_BLOCK;
+	int index = id % MARS_WORKLOAD_PER_BLOCK;
+
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_SCHEDULING, MARS_ERROR_STATE);
+
+	/* set state to ready */
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_READY;
+
+	return MARS_SUCCESS;
+}
+
+int workload_queue_schedule_cancel(struct mars_workload_queue *queue,
+				uint16_t id)
+{
+	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
+	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
+
+	int block = id / MARS_WORKLOAD_PER_BLOCK;
+	int index = id % MARS_WORKLOAD_PER_BLOCK;
+
+	MARS_CHECK_RET(queue->block[block].bits[index].state ==
+			MARS_WORKLOAD_STATE_SCHEDULING, MARS_ERROR_STATE);
+
+	/* set state back to finished */
+	queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED;
+
 	return MARS_SUCCESS;
 }
 
-int workload_queue_wait(struct mars_workload_queue *queue, uint16_t id)
+int workload_queue_wait(struct mars_workload_queue *queue, uint16_t id,
+			struct mars_workload_context **workload)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
@@ -201,10 +293,15 @@ int workload_queue_wait(struct mars_work
 		MARS_WORKLOAD_STATE_FINISHED)
 		sched_yield();
 
+	/* if requested set workload context pointer to return */
+	if (workload)
+		*workload = &queue->context[id];
+
 	return MARS_SUCCESS;
 }
 
-int workload_queue_try_wait(struct mars_workload_queue *queue, uint16_t id)
+int workload_queue_try_wait(struct mars_workload_queue *queue, uint16_t id,
+			struct mars_workload_context **workload)
 {
 	MARS_CHECK_RET(queue, MARS_ERROR_NULL);
 	MARS_CHECK_RET(id < MARS_WORKLOAD_MAX, MARS_ERROR_PARAMS);
@@ -219,6 +316,10 @@ int workload_queue_try_wait(struct mars_
 		MARS_WORKLOAD_STATE_FINISHED)
 		return MARS_ERROR_BUSY;
 
+	/* if requested set workload context pointer to return */
+	if (workload)
+		*workload = &queue->context[id];
+
 	return MARS_SUCCESS;
 }
 
--- a/src/mpu/kernel/mars_kernel_scheduler.c
+++ b/src/mpu/kernel/mars_kernel_scheduler.c
@@ -271,10 +271,6 @@ int scheduler(void)
 	if (queue_header.flag == MARS_FLAG_EXIT)
 		return MARS_KERNEL_STATUS_EXIT;
 
-	/* queue empty so return idle*/
-	if (!queue_header.count)
-		return MARS_KERNEL_STATUS_IDLE;
-
 	/* reserve next workload to run or return idle status if none found */
 	status = reserve_workload();
 






More information about the cbe-oss-dev mailing list