[Cbe-oss-dev] [PATCH 04/17]MARS/core: Host api use ea
Yuji Mano
yuji.mano at am.sony.com
Wed Nov 26 14:39:48 EST 2008
From: Kazunori Asayama <asayama at sm.sony.co.jp>
Change type of MARS object from pointer to uint64_t
Signed-off-by: Kazunori Asayama <asayama at sm.sony.co.jp>
---
core/include/host/mars/mutex.h | 20 +-
core/include/host/mars/workload_queue.h | 10 -
core/src/host/lib/context.c | 5
core/src/host/lib/context_internal_types.h | 2
core/src/host/lib/mutex.c | 31 ++--
core/src/host/lib/workload_queue.c | 200 +++++++++++++++++------------
6 files changed, 157 insertions(+), 111 deletions(-)
--- a/core/include/host/mars/mutex.h
+++ b/core/include/host/mars/mutex.h
@@ -57,13 +57,13 @@ extern "C" {
* This function creates a mutex instance that can be locked or unlocked
* from both host and MPU to restrict concurrent accesses.
*
- * \param[in] mutex - address of pointer to mutex instance
+ * \param[in] mutex_ea - address of 64-bit address of mutex instance
* \return
* MARS_SUCCESS - successfully created mutex
* \n MARS_ERROR_NULL - null pointer is specified
* \n MARS_ERROR_MEMORY - instance not aligned properly
*/
-int mars_mutex_create(struct mars_mutex **mutex);
+int mars_mutex_create(uint64_t *mutex_ea);
/**
* \ingroup group_mars_mutex
@@ -71,13 +71,13 @@ int mars_mutex_create(struct mars_mutex
*
* This function destroys a mutex instance.
*
- * \param[in] mutex - address of pointer to mutex instance
+ * \param[in] mutex_ea - 64-bit address of mutex instance
* \return
* MARS_SUCCESS - successfully destroyed mutex
* \n MARS_ERROR_NULL - null pointer is specified
* \n MARS_ERROR_ALIGN - instance not aligned properly
*/
-int mars_mutex_destroy(struct mars_mutex *mutex);
+int mars_mutex_destroy(uint64_t mutex_ea);
/**
* \ingroup group_mars_mutex
@@ -86,13 +86,13 @@ int mars_mutex_destroy(struct mars_mutex
* This function resets a mutex instance and forces it into an unlocked state
* regardless of whether it is locked or unlocked.
*
- * \param[in] mutex - address of pointer to mutex instance
+ * \param[in] mutex_ea - 64-bit address of mutex instance
* \return
* MARS_SUCCESS - successfully reset mutex
* \n MARS_ERROR_NULL - null pointer is specified
* \n MARS_ERROR_ALIGN - instance not aligned properly
*/
-int mars_mutex_reset(struct mars_mutex *mutex);
+int mars_mutex_reset(uint64_t mutex_ea);
/**
* \ingroup group_mars_mutex
@@ -100,13 +100,13 @@ int mars_mutex_reset(struct mars_mutex *
*
* This function locks a mutex and blocks other requests to lock it.
*
- * \param[in] mutex - pointer to mutex instance
+ * \param[in] mutex_ea - 64-bit address of mutex instance
* \return
* MARS_SUCCESS - successfully locked mutex
* \n MARS_ERROR_NULL - null pointer is specified
* \n MARS_ERROR_ALIGN - instance not aligned properly
*/
-int mars_mutex_lock(struct mars_mutex *mutex);
+int mars_mutex_lock(uint64_t mutex_ea);
/**
* \ingroup group_mars_mutex
@@ -114,14 +114,14 @@ int mars_mutex_lock(struct mars_mutex *m
*
* This function unlocks a previously locked mutex to allow other lock requests.
*
- * \param[in] mutex - pointer to mutex instance
+ * \param[in] mutex_ea - 64-bit address of mutex instance
* \return
* MARS_SUCCESS - successfully unlocked mutex
* \n MARS_ERROR_NULL - null pointer is specified
* \n MARS_ERROR_ALIGN - instance not aligned properly
* \n MARS_ERROR_STATE - instance not in locked state
*/
-int mars_mutex_unlock(struct mars_mutex *mutex);
+int mars_mutex_unlock(uint64_t mutex_ea);
#if defined(__cplusplus)
}
--- a/core/include/host/mars/workload_queue.h
+++ b/core/include/host/mars/workload_queue.h
@@ -52,7 +52,7 @@ int mars_workload_queue_exit(struct mars
int mars_workload_queue_add_begin(struct mars_context *mars,
uint16_t *id,
- struct mars_workload_context **workload);
+ uint64_t *workload_ea);
int mars_workload_queue_add_end(struct mars_context *mars,
uint16_t id);
int mars_workload_queue_add_cancel(struct mars_context *mars,
@@ -60,7 +60,7 @@ int mars_workload_queue_add_cancel(struc
int mars_workload_queue_remove_begin(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload);
+ uint64_t *workload_ea);
int mars_workload_queue_remove_end(struct mars_context *mars,
uint16_t id);
int mars_workload_queue_remove_cancel(struct mars_context *mars,
@@ -68,7 +68,7 @@ int mars_workload_queue_remove_cancel(st
int mars_workload_queue_schedule_begin(struct mars_context *mars,
uint16_t id, uint8_t priority,
- struct mars_workload_context **workload);
+ uint64_t *workload_ea);
int mars_workload_queue_schedule_end(struct mars_context *mars,
uint16_t id);
int mars_workload_queue_schedule_cancel(struct mars_context *mars,
@@ -76,10 +76,10 @@ int mars_workload_queue_schedule_cancel(
int mars_workload_queue_wait(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload);
+ uint64_t *workload_ea);
int mars_workload_queue_try_wait(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload);
+ uint64_t *workload_ea);
int mars_workload_queue_signal_send(struct mars_context *mars,
uint16_t id);
--- a/core/src/host/lib/context.c
+++ b/core/src/host/lib/context.c
@@ -117,8 +117,7 @@ static int mpu_contexts_create(struct ma
params->kernel_id = i;
params->mars_context_ea =
mars_ptr_to_ea(mars);
- params->workload_queue_ea =
- mars_ptr_to_ea(mars->workload_queue);
+ params->workload_queue_ea = mars->workload_queue_ea;
ret = pthread_create(&mars->mpu_context_threads[i], NULL,
mpu_context_thread, params);
@@ -299,7 +298,7 @@ int mars_context_destroy(struct mars_con
}
/* destroy workload queue */
- if (mars->workload_queue) {
+ if (mars->workload_queue_ea) {
ret = mars_workload_queue_destroy(mars);
if (ret != MARS_SUCCESS)
goto error;
--- a/core/src/host/lib/context_internal_types.h
+++ b/core/src/host/lib/context_internal_types.h
@@ -45,7 +45,7 @@ struct mars_context {
/* parameters for the MARS kernel */
struct mars_kernel_params *kernel_params;
/* process queue where process requests are added */
- struct mars_workload_queue *workload_queue;
+ uint64_t workload_queue_ea;
/* array of mpu context threads */
pthread_t *mpu_context_threads;
/* num of mpu context threads */
--- a/core/src/host/lib/mutex.c
+++ b/core/src/host/lib/mutex.c
@@ -43,11 +43,11 @@
#include "mars/error.h"
#include "mars/mutex.h"
-int mars_mutex_create(struct mars_mutex **mutex_ret)
+int mars_mutex_create(uint64_t *mutex_ea_ret)
{
struct mars_mutex *mutex;
- if (!mutex_ret)
+ if (!mutex_ea_ret)
return MARS_ERROR_NULL;
mutex = (struct mars_mutex *)mars_ea_memalign(
@@ -58,24 +58,26 @@ int mars_mutex_create(struct mars_mutex
mutex->lock = MARS_MUTEX_UNLOCKED;
__lwsync();
- *mutex_ret = mutex;
+ *mutex_ea_ret = mars_ptr_to_ea(mutex);
return MARS_SUCCESS;
}
-int mars_mutex_destroy(struct mars_mutex *mutex)
+int mars_mutex_destroy(uint64_t mutex_ea)
{
- if (!mutex)
+ if (!mutex_ea)
return MARS_ERROR_NULL;
- mars_ea_free(mutex);
+ mars_ea_free(mars_ea_to_ptr(mutex_ea));
return MARS_SUCCESS;
}
-int mars_mutex_reset(struct mars_mutex *mutex)
+int mars_mutex_reset(uint64_t mutex_ea)
{
- if (!mutex)
+ struct mars_mutex *mutex = mars_ea_to_ptr(mutex_ea);
+
+ if (!mutex_ea)
return MARS_ERROR_NULL;
mutex->lock = MARS_MUTEX_UNLOCKED;
@@ -84,10 +86,13 @@ int mars_mutex_reset(struct mars_mutex *
return MARS_SUCCESS;
}
-int mars_mutex_lock(struct mars_mutex *mutex)
+int mars_mutex_lock(uint64_t mutex_ea)
{
- if (!mutex)
+ struct mars_mutex *mutex = mars_ea_to_ptr(mutex_ea);
+
+ if (!mutex_ea) {
return MARS_ERROR_NULL;
+ }
do {
do {
@@ -99,9 +104,11 @@ int mars_mutex_lock(struct mars_mutex *m
return MARS_SUCCESS;
}
-int mars_mutex_unlock(struct mars_mutex *mutex)
+int mars_mutex_unlock(uint64_t mutex_ea)
{
- if (!mutex)
+ struct mars_mutex *mutex = mars_ea_to_ptr(mutex_ea);
+
+ if (!mutex_ea)
return MARS_ERROR_NULL;
if (mutex->lock != MARS_MUTEX_LOCKED)
return MARS_ERROR_STATE;
--- a/core/src/host/lib/workload_queue.c
+++ b/core/src/host/lib/workload_queue.c
@@ -50,16 +50,30 @@
#include "kernel_internal_types.h"
#include "workload_internal_types.h"
+static inline uint64_t workload_queue_block_ea(uint64_t queue_ea, int block)
+{
+ return queue_ea +
+ offsetof(struct mars_workload_queue, block) +
+ sizeof(struct mars_workload_queue_block) * block;
+}
+
+static inline uint64_t workload_queue_workload_ea(uint64_t context_ea, int workload_id)
+{
+ return context_ea +
+ sizeof(struct mars_workload_context) * workload_id;
+}
+
int mars_workload_queue_create(struct mars_context *mars)
{
struct mars_workload_queue *queue;
int block;
int index;
+ uint64_t queue_ea;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (mars->workload_queue)
+ if (mars->workload_queue_ea)
return MARS_ERROR_STATE;
/* allocate workload instance */
@@ -68,14 +82,16 @@ int mars_workload_queue_create(struct ma
if (!queue)
return MARS_ERROR_MEMORY;
+ queue_ea = mars_ptr_to_ea(queue);
+
/* initialize workload queue header */
queue->header.flag = MARS_WORKLOAD_QUEUE_FLAG_NONE;
- queue->header.queue_ea = mars_ptr_to_ea(queue);
+ queue->header.queue_ea = queue_ea;
queue->header.context_ea = mars_ptr_to_ea(&queue->context);
/* initialize workload queue blocks */
for (block = 0; block < MARS_WORKLOAD_NUM_BLOCKS; block++) {
- mars_mutex_reset((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_reset(workload_queue_block_ea(queue_ea, block));
for (index = 0; index < MARS_WORKLOAD_PER_BLOCK; index++) {
uint64_t *bits = &queue->block[block].bits[index];
@@ -94,7 +110,7 @@ int mars_workload_queue_create(struct ma
}
/* set the workload queue instance in the mars context */
- mars->workload_queue = queue;
+ mars->workload_queue_ea = queue_ea;
return MARS_SUCCESS;
}
@@ -109,10 +125,10 @@ int mars_workload_queue_destroy(struct m
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
- queue = mars->workload_queue;
+ queue = mars_ea_to_ptr(mars->workload_queue_ea);
/* check for any workloads left in workload queue */
while (id < MARS_WORKLOAD_MAX) {
@@ -130,7 +146,7 @@ int mars_workload_queue_destroy(struct m
mars_ea_free(queue);
/* set the workload queue to NULL for error checking */
- mars->workload_queue = NULL;
+ mars->workload_queue_ea = 0;
return MARS_SUCCESS;
}
@@ -142,10 +158,10 @@ int mars_workload_queue_exit(struct mars
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
- queue = mars->workload_queue;
+ queue = mars_ea_to_ptr(mars->workload_queue_ea);
queue->header.flag = MARS_WORKLOAD_QUEUE_FLAG_EXIT;
@@ -154,9 +170,10 @@ int mars_workload_queue_exit(struct mars
int mars_workload_queue_add_begin(struct mars_context *mars,
uint16_t *id,
- struct mars_workload_context **workload)
+ uint64_t *workload_ea)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block = 0;
int index = 0;
uint64_t *bits;
@@ -164,14 +181,15 @@ int mars_workload_queue_add_begin(struct
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (!id)
return MARS_ERROR_NULL;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* get bits from workload queue block */
bits = &queue->block[block].bits[index];
@@ -186,31 +204,31 @@ int mars_workload_queue_add_begin(struct
index = 0;
mars_mutex_unlock(
- (struct mars_mutex *)&queue->block[block]);
+ workload_queue_block_ea(queue_ea, block));
if (++block == MARS_WORKLOAD_NUM_BLOCKS)
return MARS_ERROR_LIMIT;
mars_mutex_lock(
- (struct mars_mutex *)&queue->block[block]);
+ workload_queue_block_ea(queue_ea, block));
}
bits = &queue->block[block].bits[index];
}
/* check for valid state */
if (MARS_BITS_GET(bits, STATE) != MARS_WORKLOAD_STATE_NONE) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
/* set state to adding */
MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_ADDING);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
/* if requested set workload context pointer to return */
- if (workload)
- *workload = &queue->context[*id];
+ if (workload_ea)
+ *workload_ea = workload_queue_workload_ea(queue->header.context_ea, *id);
return MARS_SUCCESS;
}
@@ -219,6 +237,7 @@ int mars_workload_queue_add_end(struct m
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
uint64_t *bits;
@@ -226,32 +245,33 @@ int mars_workload_queue_add_end(struct m
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* get bits from workload queue block */
bits = &queue->block[block].bits[index];
/* check for valid state */
if (MARS_BITS_GET(bits, STATE) != MARS_WORKLOAD_STATE_ADDING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
/* reset workload queue bits and set state to finished state */
MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_FINISHED);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
@@ -260,29 +280,31 @@ int mars_workload_queue_add_cancel(struc
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_ADDING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -290,39 +312,41 @@ int mars_workload_queue_add_cancel(struc
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_NONE);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
int mars_workload_queue_remove_begin(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload)
+ uint64_t *workload_ea)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_FINISHED) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -330,11 +354,11 @@ int mars_workload_queue_remove_begin(str
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_REMOVING);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
/* if requested set workload context pointer to return */
- if (workload)
- *workload = &queue->context[id];
+ if (workload_ea)
+ *workload_ea = workload_queue_workload_ea(queue->header.context_ea, id);
return MARS_SUCCESS;
}
@@ -343,29 +367,31 @@ int mars_workload_queue_remove_end(struc
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_REMOVING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -373,7 +399,7 @@ int mars_workload_queue_remove_end(struc
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_NONE);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
@@ -382,29 +408,31 @@ int mars_workload_queue_remove_cancel(st
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_REMOVING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -412,39 +440,41 @@ int mars_workload_queue_remove_cancel(st
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_FINISHED);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
int mars_workload_queue_schedule_begin(struct mars_context *mars,
uint16_t id, uint8_t priority,
- struct mars_workload_context **workload)
+ uint64_t *workload_ea)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_FINISHED) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -460,11 +490,11 @@ int mars_workload_queue_schedule_begin(s
MARS_BITS_SET(&queue->block[block].bits[index], WAIT_ID,
MARS_WORKLOAD_ID_NONE);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
/* if requested set workload context pointer to return */
- if (workload)
- *workload = &queue->context[id];
+ if (workload_ea)
+ *workload_ea = workload_queue_workload_ea(queue->header.context_ea, id);
return MARS_SUCCESS;
}
@@ -473,29 +503,31 @@ int mars_workload_queue_schedule_end(str
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_SCHEDULING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -503,7 +535,7 @@ int mars_workload_queue_schedule_end(str
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_READY);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
@@ -512,29 +544,31 @@ int mars_workload_queue_schedule_cancel(
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_SCHEDULING) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -542,28 +576,30 @@ int mars_workload_queue_schedule_cancel(
MARS_BITS_SET(&queue->block[block].bits[index], STATE,
MARS_WORKLOAD_STATE_FINISHED);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
int mars_workload_queue_wait(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload)
+ uint64_t *workload_ea)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
@@ -579,29 +615,31 @@ int mars_workload_queue_wait(struct mars
}
/* if requested set workload context pointer to return */
- if (workload)
- *workload = &queue->context[id];
+ if (workload_ea)
+ *workload_ea = workload_queue_workload_ea(queue->header.context_ea, id);
return MARS_SUCCESS;
}
int mars_workload_queue_try_wait(struct mars_context *mars,
uint16_t id,
- struct mars_workload_context **workload)
+ uint64_t *workload_ea)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
@@ -618,8 +656,8 @@ int mars_workload_queue_try_wait(struct
return MARS_ERROR_BUSY;
/* if requested set workload context pointer to return */
- if (workload)
- *workload = &queue->context[id];
+ if (workload_ea)
+ *workload_ea = workload_queue_workload_ea(queue->header.context_ea, id);
return MARS_SUCCESS;
}
@@ -628,29 +666,31 @@ int mars_workload_queue_signal_send(stru
uint16_t id)
{
struct mars_workload_queue *queue;
+ uint64_t queue_ea;
int block;
int index;
/* check function params */
if (!mars)
return MARS_ERROR_NULL;
- if (!mars->workload_queue)
+ if (!mars->workload_queue_ea)
return MARS_ERROR_STATE;
if (id >= MARS_WORKLOAD_MAX)
return MARS_ERROR_PARAMS;
- queue = mars->workload_queue;
+ queue_ea = mars->workload_queue_ea;
+ queue = mars_ea_to_ptr(queue_ea);
/* calculate block/index from id */
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_lock(workload_queue_block_ea(queue_ea, block));
/* check for valid state */
if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
MARS_WORKLOAD_STATE_NONE) {
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_ERROR_STATE;
}
@@ -658,7 +698,7 @@ int mars_workload_queue_signal_send(stru
MARS_BITS_SET(&queue->block[block].bits[index], SIGNAL,
MARS_WORKLOAD_SIGNAL_ON);
- mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
+ mars_mutex_unlock(workload_queue_block_ea(queue_ea, block));
return MARS_SUCCESS;
}
More information about the cbe-oss-dev
mailing list