[Cbe-oss-dev] [PATCH 02/11]MARS: Workload queue block replace bit fields
Yuji Mano
yuji.mano at am.sony.com
Fri Sep 12 05:34:27 EST 2008
This replaces the bit fields usage for the workload queue block bits with
explicit bitwise shift/mask operations for better portability.
Signed-off-by: Yuji Mano <yuji.mano at am.sony.com>
---
include/common/mars/mars_workload_types.h | 53 +++++++++--
src/host/lib/mars_workload_queue.c | 134 ++++++++++++++++++------------
src/mpu/kernel/mars_kernel_scheduler.c | 62 ++++++++-----
src/mpu/kernel/mars_kernel_workload.c | 47 ++++++----
4 files changed, 193 insertions(+), 103 deletions(-)
--- a/include/common/mars/mars_workload_types.h
+++ b/include/common/mars/mars_workload_types.h
@@ -76,7 +76,46 @@ extern "C" {
#define MARS_WORKLOAD_QUEUE_ALIGN 128 /* align to 128 bytes */
#define MARS_WORKLOAD_QUEUE_HEADER_ALIGN 128 /* align to 128 bytes */
#define MARS_WORKLOAD_QUEUE_BLOCK_ALIGN 128 /* align to 128 bytes */
-#define MARS_WORKLOAD_QUEUE_BLOCK_BITS_ALIGN 8 /* align to 8 bytes */
+
+/*
+ * MARS workload queue block bits
+ * ----------------------------------------------------------------------------
+ * |[63...60]|[59...56]|[55....48]|[ 47 ]|[46....32]|[31.....16]|[15......0]|
+ * ----------------------------------------------------------------------------
+ * | 4-bits | 4-bits | 8-bits | 1-bit | 15-bits | 16-bits | 16-bits |
+ * ----------------------------------------------------------------------------
+ * | TYPE | STATE | PRIORITY | SIGNAL | RESERVED | WAIT_ID | COUNTER |
+ * ----------------------------------------------------------------------------
+ */
+#define MARS_BITS_SIZE 64
+
+#define MARS_BITS_SHIFT_TYPE 60
+#define MARS_BITS_SHIFT_STATE 56
+#define MARS_BITS_SHIFT_PRIORITY 48
+#define MARS_BITS_SHIFT_SIGNAL 47
+#define MARS_BITS_SHIFT_WAIT_ID 16
+#define MARS_BITS_SHIFT_COUNTER 0
+
+#define MARS_BITS_MASK_TYPE 0xf000000000000000ULL
+#define MARS_BITS_MASK_STATE 0x0f00000000000000ULL
+#define MARS_BITS_MASK_PRIORITY 0x00ff000000000000ULL
+#define MARS_BITS_MASK_SIGNAL 0x0000800000000000ULL
+#define MARS_BITS_MASK_WAIT_ID 0x00000000ffff0000ULL
+#define MARS_BITS_MASK_COUNTER 0x000000000000ffffULL
+
+#define MARS_BITS_CAST_TYPE uint8_t
+#define MARS_BITS_CAST_STATE uint8_t
+#define MARS_BITS_CAST_PRIORITY uint8_t
+#define MARS_BITS_CAST_SIGNAL uint8_t
+#define MARS_BITS_CAST_WAIT_ID uint16_t
+#define MARS_BITS_CAST_COUNTER uint16_t
+
+#define MARS_BITS_GET(bits, name) (MARS_BITS_CAST_##name) \
+ ((*(bits) & MARS_BITS_MASK_##name) >> MARS_BITS_SHIFT_##name)
+
+#define MARS_BITS_SET(bits, name, val) \
+ (*bits) = ((*(bits) & ~MARS_BITS_MASK_##name) | \
+ ((uint64_t)(val) << MARS_BITS_SHIFT_##name))
/* mars workload context */
struct mars_workload_context {
@@ -90,21 +129,11 @@ struct mars_workload_queue_header {
uint8_t flag;
} __attribute__((aligned(MARS_WORKLOAD_QUEUE_HEADER_ALIGN)));
-/* 8 byte workload queue block bits structure */
-struct mars_workload_queue_block_bits {
- uint64_t type:4;
- uint64_t state:4;
- uint64_t priority:8;
- uint64_t signal:1;
- uint64_t wait:16;
- uint64_t counter:16;
-} __attribute__((aligned(MARS_WORKLOAD_QUEUE_BLOCK_BITS_ALIGN)));
-
/* 128 byte workload queue block structure */
struct mars_workload_queue_block {
uint32_t lock;
uint32_t pad;
- struct mars_workload_queue_block_bits bits[MARS_WORKLOAD_PER_BLOCK];
+ uint64_t bits[MARS_WORKLOAD_PER_BLOCK];
} __attribute__((aligned(MARS_WORKLOAD_QUEUE_BLOCK_ALIGN)));
/* mars workload queue structure */
--- a/src/host/lib/mars_workload_queue.c
+++ b/src/host/lib/mars_workload_queue.c
@@ -63,12 +63,20 @@ int workload_queue_initialize(struct mar
mars_mutex_lock((struct mars_mutex *)p);
for (index = 0; index < MARS_WORKLOAD_PER_BLOCK; index++) {
- p->bits[index].type = MARS_WORKLOAD_TYPE_NONE;
- p->bits[index].state = MARS_WORKLOAD_STATE_NONE;
- p->bits[index].priority = MARS_WORKLOAD_PRIORITY_MIN;
- p->bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
- p->bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
- p->bits[index].wait = MARS_WORKLOAD_ID_NONE;
+ uint64_t *bits = &p->bits[index];
+
+ MARS_BITS_SET(bits, TYPE,
+ MARS_WORKLOAD_TYPE_NONE);
+ MARS_BITS_SET(bits, STATE,
+ MARS_WORKLOAD_STATE_NONE);
+ MARS_BITS_SET(bits, PRIORITY,
+ MARS_WORKLOAD_PRIORITY_MIN);
+ MARS_BITS_SET(bits, COUNTER,
+ MARS_WORKLOAD_COUNTER_MIN);
+ MARS_BITS_SET(bits, SIGNAL,
+ MARS_WORKLOAD_SIGNAL_OFF);
+ MARS_BITS_SET(bits, WAIT_ID,
+ MARS_WORKLOAD_ID_NONE);
}
mars_mutex_unlock((struct mars_mutex *)p);
@@ -91,7 +99,7 @@ int workload_queue_finalize(struct mars_
block = id / MARS_WORKLOAD_PER_BLOCK;
index = id % MARS_WORKLOAD_PER_BLOCK;
- if (queue->block[block].bits[index].state !=
+ if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_NONE)
break;
@@ -117,12 +125,13 @@ int workload_queue_add_begin(struct mars
int block = 0;
int index = 0;
+ uint64_t *bits = &queue->block[block].bits[index];
+
*id = 0;
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- while (queue->block[block].bits[index].state !=
- MARS_WORKLOAD_STATE_NONE) {
+ while (MARS_BITS_GET(bits, STATE) != MARS_WORKLOAD_STATE_NONE) {
(*id)++;
index++;
if (index == MARS_WORKLOAD_PER_BLOCK) {
@@ -137,16 +146,17 @@ int workload_queue_add_begin(struct mars
mars_mutex_lock(
(struct mars_mutex *)&queue->block[block]);
}
+ bits = &queue->block[block].bits[index];
}
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(bits, STATE) == MARS_WORKLOAD_STATE_NONE,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set type and set state to adding */
- queue->block[block].bits[index].type = type;
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_ADDING;
+ MARS_BITS_SET(bits, TYPE, type);
+ MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_ADDING);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -165,15 +175,17 @@ int workload_queue_add_end(struct mars_w
int block = id / MARS_WORKLOAD_PER_BLOCK;
int index = id % MARS_WORKLOAD_PER_BLOCK;
+ uint64_t *bits = &queue->block[block].bits[index];
+
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_ADDING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(bits, STATE) == MARS_WORKLOAD_STATE_ADDING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* reset workload queue bits and set state to finished state */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED;
+ MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_FINISHED);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -190,13 +202,15 @@ int workload_queue_add_cancel(struct mar
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_ADDING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_ADDING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state back to none state */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_NONE;
+ MARS_BITS_SET(&queue->block[block].bits[index], STATE,
+ MARS_WORKLOAD_STATE_NONE);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -214,13 +228,15 @@ int workload_queue_remove_begin(struct m
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_FINISHED,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_FINISHED,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state to removing */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_REMOVING;
+ MARS_BITS_SET(&queue->block[block].bits[index], STATE,
+ MARS_WORKLOAD_STATE_REMOVING);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -241,13 +257,15 @@ int workload_queue_remove_end(struct mar
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_REMOVING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_REMOVING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state to none */
- queue->block[block].bits[index].type = MARS_WORKLOAD_TYPE_NONE;
+ MARS_BITS_SET(&queue->block[block].bits[index], TYPE,
+ MARS_WORKLOAD_TYPE_NONE);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -264,13 +282,15 @@ int workload_queue_remove_cancel(struct
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_REMOVING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_REMOVING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state back to finished */
- queue->block[block].bits[index].type = MARS_WORKLOAD_STATE_FINISHED;
+ MARS_BITS_SET(&queue->block[block].bits[index], TYPE,
+ MARS_WORKLOAD_STATE_FINISHED);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -289,17 +309,23 @@ int workload_queue_schedule_begin(struct
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_FINISHED,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_FINISHED,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* reset workload queue bits and set state to scheduling */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_SCHEDULING;
- queue->block[block].bits[index].priority = priority;
- queue->block[block].bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
- queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
- queue->block[block].bits[index].wait = MARS_WORKLOAD_ID_NONE;
+ MARS_BITS_SET(&queue->block[block].bits[index], STATE,
+ MARS_WORKLOAD_STATE_SCHEDULING);
+ MARS_BITS_SET(&queue->block[block].bits[index], PRIORITY,
+ priority);
+ MARS_BITS_SET(&queue->block[block].bits[index], COUNTER,
+ MARS_WORKLOAD_COUNTER_MIN);
+ MARS_BITS_SET(&queue->block[block].bits[index], SIGNAL,
+ MARS_WORKLOAD_SIGNAL_OFF);
+ MARS_BITS_SET(&queue->block[block].bits[index], WAIT_ID,
+ MARS_WORKLOAD_ID_NONE);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -320,13 +346,15 @@ int workload_queue_schedule_end(struct m
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_SCHEDULING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_SCHEDULING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state to ready */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_READY;
+ MARS_BITS_SET(&queue->block[block].bits[index], STATE,
+ MARS_WORKLOAD_STATE_READY);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -344,13 +372,15 @@ int workload_queue_schedule_cancel(struc
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state ==
- MARS_WORKLOAD_STATE_SCHEDULING,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_SCHEDULING,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
/* set state back to finished */
- queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED;
+ MARS_BITS_SET(&queue->block[block].bits[index], STATE,
+ MARS_WORKLOAD_STATE_FINISHED);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
@@ -366,10 +396,11 @@ int workload_queue_wait(struct mars_work
int block = id / MARS_WORKLOAD_PER_BLOCK;
int index = id % MARS_WORKLOAD_PER_BLOCK;
- while (queue->block[block].bits[index].state !=
+ while (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_FINISHED) {
- MARS_CHECK_RET(queue->block[block].bits[index].state !=
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE)
+ != MARS_WORKLOAD_STATE_NONE,
MARS_ERROR_STATE);
sched_yield();
}
@@ -390,11 +421,12 @@ int workload_queue_try_wait(struct mars_
int block = id / MARS_WORKLOAD_PER_BLOCK;
int index = id % MARS_WORKLOAD_PER_BLOCK;
- MARS_CHECK_RET(queue->block[block].bits[index].state !=
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_NONE,
MARS_ERROR_STATE);
- if (queue->block[block].bits[index].state !=
+ if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
MARS_WORKLOAD_STATE_FINISHED)
return MARS_ERROR_BUSY;
@@ -415,12 +447,14 @@ int workload_queue_signal_send(struct ma
mars_mutex_lock((struct mars_mutex *)&queue->block[block]);
- MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state !=
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue->block[block].bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_NONE,
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]),
MARS_ERROR_STATE);
- queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_ON;
+ MARS_BITS_SET(&queue->block[block].bits[index], SIGNAL,
+ MARS_WORKLOAD_SIGNAL_ON);
mars_mutex_unlock((struct mars_mutex *)&queue->block[block]);
--- a/src/mpu/kernel/mars_kernel_scheduler.c
+++ b/src/mpu/kernel/mars_kernel_scheduler.c
@@ -73,34 +73,38 @@ static int search_block(int block)
* and pick the workload that has been waiting the longest
*/
for (i = 0; i < MARS_WORKLOAD_PER_BLOCK; i++) {
- struct mars_workload_queue_block_bits *bits
- = &queue_block.bits[i];
+ uint64_t *bits = &queue_block.bits[i];
+ uint8_t signal = MARS_BITS_GET(bits, SIGNAL);
+ uint8_t priority = MARS_BITS_GET(bits, PRIORITY);
+ uint16_t wait_id = MARS_BITS_GET(bits, WAIT_ID);
+ uint16_t counter = MARS_BITS_GET(bits, COUNTER);
- switch (bits->state) {
+ switch (MARS_BITS_GET(bits, STATE)) {
case MARS_WORKLOAD_STATE_READY:
/* priority greater than max priority so select */
- if ((int)bits->priority > max_priority) {
+ if ((int)priority > max_priority) {
index = i;
- max_count = bits->counter;
- max_priority = bits->priority;
+ max_count = counter;
+ max_priority = priority;
/* priority equal and wait counter greater so select */
- } else if ((int)bits->priority == max_priority &&
- (int)bits->counter > max_count) {
+ } else if ((int)priority == max_priority &&
+ (int)counter > max_count) {
index = i;
- max_count = bits->counter;
+ max_count = counter;
}
/* increment wait counter without overflowing */
- if (bits->counter < MARS_WORKLOAD_COUNTER_MAX)
- bits->counter++;
+ if (counter < MARS_WORKLOAD_COUNTER_MAX)
+ MARS_BITS_SET(bits, COUNTER, counter + 1);
break;
case MARS_WORKLOAD_STATE_WAITING:
/* waiting for workload to finish so check status */
- if (bits->wait != MARS_WORKLOAD_ID_NONE) {
+ if (wait_id != MARS_WORKLOAD_ID_NONE) {
struct mars_workload_queue_block wait_block;
struct mars_workload_queue_block *p_wait_block;
+ uint8_t wait_state;
- int bl = bits->wait / MARS_WORKLOAD_PER_BLOCK;
- int id = bits->wait % MARS_WORKLOAD_PER_BLOCK;
+ int bl = wait_id / MARS_WORKLOAD_PER_BLOCK;
+ int id = wait_id % MARS_WORKLOAD_PER_BLOCK;
/* check if workload id is in the same block */
if (block != bl) {
@@ -113,16 +117,24 @@ static int search_block(int block)
p_wait_block = &queue_block;
}
+ wait_state =
+ MARS_BITS_GET(&p_wait_block->bits[id],
+ STATE);
+
/* check if workload is finished and reset */
- if (p_wait_block->bits[id].state ==
+ if (wait_state ==
MARS_WORKLOAD_STATE_FINISHED) {
- bits->wait = MARS_WORKLOAD_ID_NONE;
- bits->state = MARS_WORKLOAD_STATE_READY;
+ MARS_BITS_SET(bits, WAIT_ID,
+ MARS_WORKLOAD_ID_NONE);
+ MARS_BITS_SET(bits, STATE,
+ MARS_WORKLOAD_STATE_READY);
}
/* waiting for signal so check signal bit and reset */
- } else if (bits->signal == MARS_WORKLOAD_SIGNAL_ON) {
- bits->signal = MARS_WORKLOAD_SIGNAL_OFF;
- bits->state = MARS_WORKLOAD_STATE_READY;
+ } else if (signal == MARS_WORKLOAD_SIGNAL_ON) {
+ MARS_BITS_SET(bits, SIGNAL,
+ MARS_WORKLOAD_SIGNAL_OFF);
+ MARS_BITS_SET(bits, STATE,
+ MARS_WORKLOAD_STATE_READY);
i--;
}
break;
@@ -148,10 +160,12 @@ static int reserve_block(int block)
index = search_block(block);
if (index >= 0) {
/* update the current state of the workload */
- queue_block.bits[index].state = MARS_WORKLOAD_STATE_RUNNING;
+ MARS_BITS_SET(&queue_block.bits[index], STATE,
+ MARS_WORKLOAD_STATE_RUNNING);
/* reset the counter for reserved workload */
- queue_block.bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
+ MARS_BITS_SET(&queue_block.bits[index], COUNTER,
+ MARS_WORKLOAD_COUNTER_MIN);
}
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
@@ -168,7 +182,7 @@ static void release_block(int block, int
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
/* update current workload state in workload queue block */
- queue_block.bits[index].state = workload_state;
+ MARS_BITS_SET(&queue_block.bits[index], STATE, workload_state);
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
}
@@ -191,7 +205,7 @@ int reserve_workload(void)
/* set global workload info based on workload block and index */
workload_index = MARS_WORKLOAD_PER_BLOCK * block + index;
- workload_type = queue_block.bits[index].type;
+ workload_type = MARS_BITS_GET(&queue_block.bits[index], TYPE);
workload_ea = queue_header.context_ea +
workload_index * sizeof(struct mars_workload_context);
--- a/src/mpu/kernel/mars_kernel_workload.c
+++ b/src/mpu/kernel/mars_kernel_workload.c
@@ -139,14 +139,15 @@ int workload_schedule(uint16_t workload_
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
/* make sure workload is in the correct state */
- if (queue_block.bits[index].state != MARS_WORKLOAD_STATE_FINISHED) {
+ if (MARS_BITS_GET(&queue_block.bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_FINISHED) {
mars_mutex_unlock_put(block_ea,
(struct mars_mutex *)&queue_block);
return MARS_ERROR_STATE;
}
/* get information of workload to schedule */
- schedule_workload_type = queue_block.bits[index].type;
+ schedule_workload_type = MARS_BITS_GET(&queue_block.bits[index], TYPE);
schedule_workload_ea = queue_header.context_ea +
workload_id * sizeof(struct mars_workload_context);
@@ -170,11 +171,16 @@ int workload_schedule(uint16_t workload_
sizeof(struct mars_workload_context),
MARS_DMA_TAG);
- queue_block.bits[index].state = MARS_WORKLOAD_STATE_READY;
- queue_block.bits[index].priority = priority;
- queue_block.bits[index].counter = MARS_WORKLOAD_COUNTER_MIN;
- queue_block.bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF;
- queue_block.bits[index].wait = MARS_WORKLOAD_ID_NONE;
+ MARS_BITS_SET(&queue_block.bits[index], STATE,
+ MARS_WORKLOAD_STATE_READY);
+ MARS_BITS_SET(&queue_block.bits[index], PRIORITY,
+ priority);
+ MARS_BITS_SET(&queue_block.bits[index], COUNTER,
+ MARS_WORKLOAD_COUNTER_MIN);
+ MARS_BITS_SET(&queue_block.bits[index], SIGNAL,
+ MARS_WORKLOAD_SIGNAL_OFF);
+ MARS_BITS_SET(&queue_block.bits[index], WAIT_ID,
+ MARS_WORKLOAD_ID_NONE);
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
@@ -196,14 +202,15 @@ int workload_wait(uint16_t workload_id)
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
/* make sure workload is initialized */
- if (queue_block.bits[index].state == MARS_WORKLOAD_STATE_NONE) {
+ if (MARS_BITS_GET(&queue_block.bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_NONE) {
mars_mutex_unlock_put(block_ea,
(struct mars_mutex *)&queue_block);
return MARS_ERROR_STATE;
}
/* set the workload id to wait for */
- queue_block.bits[index].wait = workload_id;
+ MARS_BITS_SET(&queue_block.bits[index], WAIT_ID, workload_id);
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
@@ -224,15 +231,17 @@ int workload_try_wait(uint16_t workload_
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
- MARS_CHECK_CLEANUP_RET(queue_block.bits[index].state !=
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue_block.bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_NONE,
mars_mutex_unlock_put(block_ea,
(struct mars_mutex *)&queue_block),
MARS_ERROR_STATE);
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
- if (queue_block.bits[index].state != MARS_WORKLOAD_STATE_FINISHED)
+ if (MARS_BITS_GET(&queue_block.bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_FINISHED)
return MARS_ERROR_BUSY;
return MARS_SUCCESS;
@@ -253,14 +262,16 @@ int workload_signal_send(uint16_t worklo
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
/* make sure workload is initialized */
- if (queue_block.bits[index].state == MARS_WORKLOAD_STATE_NONE) {
+ if (MARS_BITS_GET(&queue_block.bits[index], STATE) ==
+ MARS_WORKLOAD_STATE_NONE) {
mars_mutex_unlock_put(block_ea,
(struct mars_mutex *)&queue_block);
return MARS_ERROR_STATE;
}
/* set the workload signal */
- queue_block.bits[index].signal = MARS_WORKLOAD_SIGNAL_ON;
+ MARS_BITS_SET(&queue_block.bits[index], SIGNAL,
+ MARS_WORKLOAD_SIGNAL_ON);
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
@@ -295,8 +306,9 @@ int workload_signal_try_wait(void)
mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
- MARS_CHECK_CLEANUP_RET(queue_block.bits[index].state !=
- MARS_WORKLOAD_STATE_NONE,
+ MARS_CHECK_CLEANUP_RET(
+ MARS_BITS_GET(&queue_block.bits[index], STATE) !=
+ MARS_WORKLOAD_STATE_NONE,
mars_mutex_unlock_put(block_ea,
(struct mars_mutex *)&queue_block),
MARS_ERROR_STATE);
@@ -304,7 +316,8 @@ int workload_signal_try_wait(void)
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
/* return busy if task has not received signal */
- if (queue_block.bits[index].signal != MARS_WORKLOAD_SIGNAL_ON)
+ if (MARS_BITS_GET(&queue_block.bits[index], SIGNAL) !=
+ MARS_WORKLOAD_SIGNAL_ON)
return MARS_ERROR_BUSY;
return MARS_SUCCESS;
More information about the cbe-oss-dev
mailing list