[Cbe-oss-dev] [PATCH 06/22]MARS/base: minor consistency cleanup
Yuji Mano
yuji.mano at am.sony.com
Fri Mar 20 07:54:03 EST 2009
This patch is to cleanup some of the inconsistencies of using sizeof() v.s. the
explicit size defines.
Signed-off-by: Yuji Mano <yuji.mano at am.sony.com>
---
base/src/common/kernel_internal_types.h | 2 +-
base/src/common/workload_internal_types.h | 4 +++-
base/src/host/lib/context.c | 10 +++++-----
base/src/host/lib/mutex_cell.c | 5 ++---
base/src/host/lib/workload_queue.c | 22 +++++++++++-----------
base/src/mpu/kernel/kernel.c | 29 ++++++++++++++---------------
6 files changed, 36 insertions(+), 36 deletions(-)
--- a/base/src/common/kernel_internal_types.h
+++ b/base/src/common/kernel_internal_types.h
@@ -57,8 +57,8 @@
#define MARS_KERNEL_DMA_TAG 31
-#define MARS_KERNEL_PARAMS_ALIGN 128
#define MARS_KERNEL_PARAMS_SIZE 128
+#define MARS_KERNEL_PARAMS_ALIGN 128
/* mars kernel syscalls */
struct mars_kernel_syscalls {
--- a/base/src/common/workload_internal_types.h
+++ b/base/src/common/workload_internal_types.h
@@ -68,9 +68,11 @@
#define MARS_WORKLOAD_NUM_BLOCKS 50 /* total blocks */
#define MARS_WORKLOAD_MAX 750 /* blocks * wl/block */
+#define MARS_WORKLOAD_QUEUE_SIZE 198528 /* size 198528 bytes */
#define MARS_WORKLOAD_QUEUE_ALIGN 128 /* align to 128 bytes */
-#define MARS_WORKLOAD_QUEUE_HEADER_ALIGN 128 /* align to 128 bytes */
#define MARS_WORKLOAD_QUEUE_HEADER_SIZE 128 /* size of 128 bytes */
+#define MARS_WORKLOAD_QUEUE_HEADER_ALIGN 128 /* align to 128 bytes */
+#define MARS_WORKLOAD_QUEUE_BLOCK_SIZE 128 /* size to 128 bytes */
#define MARS_WORKLOAD_QUEUE_BLOCK_ALIGN 128 /* align to 128 bytes */
#define MARS_WORKLOAD_QUEUE_FLAG_NONE 0x0 /* no flag set */
--- a/base/src/host/lib/context.c
+++ b/base/src/host/lib/context.c
@@ -81,13 +81,13 @@ static int kernel_params_init(struct mar
struct mars_kernel_params *params =
mars_ea_work_area_get(params_ea,
MARS_KERNEL_PARAMS_ALIGN,
- sizeof(struct mars_kernel_params));
+ MARS_KERNEL_PARAMS_SIZE);
if (!params)
return MARS_ERROR_MEMORY;
/* zero kernel params */
- memset(params, 0, sizeof(struct mars_kernel_params));
+ memset(params, 0, MARS_KERNEL_PARAMS_SIZE);
params->kernel_id = kernel_id;
params->mars_context_ea = mars_ptr_to_ea(mars);
@@ -95,7 +95,7 @@ static int kernel_params_init(struct mar
params->callback_queue_ea = mars->callback_queue_ea;
/* update params on EA */
- mars_ea_put(params_ea, params, sizeof(struct mars_kernel_params));
+ mars_ea_put(params_ea, params, MARS_KERNEL_PARAMS_SIZE);
mars_ea_sync();
return MARS_SUCCESS;
@@ -109,7 +109,7 @@ static int mpu_contexts_create(struct ma
/* create threads for each mpu context */
for (i = mars->mpu_context_count; i < num_mpus; i++) {
uint64_t params_ea = mars->kernel_params_ea +
- sizeof(struct mars_kernel_params) * i;
+ MARS_KERNEL_PARAMS_SIZE * i;
/* initialize kernel params for current mpu context */
ret = kernel_params_init(mars, params_ea, i);
@@ -205,7 +205,7 @@ int mars_context_create(struct mars_cont
/* allocate kernel params */
mars->kernel_params_ea = mars_ea_memalign(
MARS_KERNEL_PARAMS_ALIGN,
- sizeof(struct mars_kernel_params) * num_mpus_max);
+ MARS_KERNEL_PARAMS_SIZE * num_mpus_max);
if (!mars->kernel_params_ea) {
ret = MARS_ERROR_MEMORY;
goto error_malloc_kernel_params;
--- a/base/src/host/lib/mutex_cell.c
+++ b/base/src/host/lib/mutex_cell.c
@@ -64,8 +64,7 @@ int mars_mutex_create(uint64_t *mutex_ea
if (!mutex_ea_ret)
return MARS_ERROR_NULL;
- mutex_ea = mars_ea_memalign(
- MARS_MUTEX_ALIGN, sizeof(struct mars_mutex));
+ mutex_ea = mars_ea_memalign(MARS_MUTEX_ALIGN, MARS_MUTEX_SIZE);
if (!mutex_ea)
return MARS_ERROR_MEMORY;
@@ -169,7 +168,7 @@ int mars_mutex_lock_get(uint64_t mutex_e
ret = mars_mutex_lock(mutex_ea);
if (ret != MARS_SUCCESS)
return ret;
- mars_ea_get(mutex_ea, mutex, sizeof(struct mars_mutex));
+ mars_ea_get(mutex_ea, mutex, MARS_MUTEX_SIZE);
return MARS_SUCCESS;
#else /* !MARS_ENABLE_DISCRETE_SHARED_MEMORY */
(void)mutex; /* ignored */
--- a/base/src/host/lib/workload_queue.c
+++ b/base/src/host/lib/workload_queue.c
@@ -56,14 +56,14 @@ static inline uint64_t get_workload_ea(u
offsetof(struct mars_workload_queue_header,
context_ea));
- return context_ea + sizeof(struct mars_workload_context) * workload_id;
+ return context_ea + MARS_WORKLOAD_CONTEXT_SIZE * workload_id;
}
static inline uint64_t get_block_ea(uint64_t queue_ea, int block)
{
return queue_ea +
offsetof(struct mars_workload_queue, block) +
- sizeof(struct mars_workload_queue_block) * block;
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE * block;
}
static inline uint64_t get_block_bits_ea(uint64_t block_ea, int index)
@@ -176,8 +176,8 @@ static void init_header(uint64_t queue_e
/* prepare work area for queue header */
queue = mars_ea_work_area_get(queue_ea,
- MARS_WORKLOAD_QUEUE_ALIGN,
- sizeof(struct mars_workload_queue_header));
+ MARS_WORKLOAD_QUEUE_HEADER_ALIGN,
+ MARS_WORKLOAD_QUEUE_HEADER_SIZE);
/* initialize workload queue header */
queue->header.flag = MARS_WORKLOAD_QUEUE_FLAG_NONE;
@@ -195,7 +195,7 @@ static void init_header(uint64_t queue_e
queue->header.bits[block] = bits;
/* update queue header on EA */
- mars_ea_put(queue_ea, queue, sizeof(struct mars_workload_queue_header));
+ mars_ea_put(queue_ea, queue, MARS_WORKLOAD_QUEUE_HEADER_SIZE);
/* reset mutex portion of queue header */
mars_mutex_reset(queue_ea);
@@ -207,13 +207,13 @@ static void init_block(uint64_t block_ea
struct mars_workload_queue_block *block =
mars_ea_work_area_get(block_ea,
MARS_WORKLOAD_QUEUE_BLOCK_ALIGN,
- sizeof(struct mars_workload_queue_block));
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE);
for (index = 1; index < MARS_WORKLOAD_PER_BLOCK; index++)
block->bits[index] = initial_bits;
/* update queue block on EA */
- mars_ea_put(block_ea, block, sizeof(struct mars_workload_queue_block));
+ mars_ea_put(block_ea, block, MARS_WORKLOAD_QUEUE_BLOCK_SIZE);
/* reset mutex portion of queue block */
mars_mutex_reset(block_ea);
@@ -249,7 +249,7 @@ int mars_workload_queue_create(struct ma
/* allocate workload instance */
queue_ea = mars_ea_memalign(MARS_WORKLOAD_QUEUE_ALIGN,
- sizeof(struct mars_workload_queue));
+ MARS_WORKLOAD_QUEUE_SIZE);
if (!queue_ea)
return MARS_ERROR_MEMORY;
@@ -274,7 +274,7 @@ static int is_block_empty(uint64_t block
struct mars_workload_queue_block *block =
mars_ea_work_area_get(block_ea,
MARS_WORKLOAD_QUEUE_BLOCK_ALIGN,
- sizeof(struct mars_workload_queue_block));
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE);
/* get the workload queue block from shared memory */
mars_ea_get(block_ea, block, sizeof(struct mars_workload_queue_block));
@@ -405,12 +405,12 @@ static int alloc_block(uint64_t block_ea
struct mars_workload_queue_block *block =
mars_ea_work_area_get(block_ea,
MARS_WORKLOAD_QUEUE_BLOCK_ALIGN,
- sizeof(struct mars_workload_queue_block));
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE);
mars_mutex_lock(block_ea);
/* get the workload queue block from shared memory */
- mars_ea_get(block_ea, block, sizeof(struct mars_workload_queue_block));
+ mars_ea_get(block_ea, block, MARS_WORKLOAD_QUEUE_BLOCK_SIZE);
/* check status */
for (index = 1; index < MARS_WORKLOAD_PER_BLOCK; index++) {
--- a/base/src/mpu/kernel/kernel.c
+++ b/base/src/mpu/kernel/kernel.c
@@ -135,8 +135,7 @@ static struct mars_workload_context *get
static uint64_t get_workload_ea(uint16_t id)
{
- return queue_header.context_ea +
- id * sizeof(struct mars_workload_context);
+ return queue_header.context_ea + MARS_WORKLOAD_CONTEXT_SIZE * id;
}
static struct mars_workload_context *get_workload_by_id(uint16_t id)
@@ -149,7 +148,7 @@ static struct mars_workload_context *get
/* get the workload context from workload queue */
dma_get(&ret_workload, get_workload_ea(id),
- sizeof(struct mars_workload_context), MARS_KERNEL_DMA_TAG);
+ MARS_WORKLOAD_CONTEXT_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
return &ret_workload;
@@ -159,7 +158,7 @@ static uint64_t get_block_ea(int block)
{
return queue_header.queue_ea +
offsetof(struct mars_workload_queue, block) +
- sizeof(struct mars_workload_queue_block) * block;
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE * block;
}
static uint64_t get_block_bits(uint16_t id)
@@ -442,7 +441,7 @@ static void schedule_begin_callback(uint
/* get the workload context from workload queue */
dma_get(&schedule_workload, get_workload_ea(id),
- sizeof(struct mars_workload_context), MARS_KERNEL_DMA_TAG);
+ MARS_WORKLOAD_CONTEXT_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
}
@@ -477,7 +476,7 @@ static void schedule_end_callback(uint16
/* put the workload context into workload queue */
dma_put((void *)&schedule_workload, get_workload_ea(id),
- sizeof(struct mars_workload_context), MARS_KERNEL_DMA_TAG);
+ MARS_WORKLOAD_CONTEXT_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
/* update queue header bits */
@@ -684,7 +683,7 @@ static int search_block(int block, int r
/* fetch the necessary block */
dma_get(wait_block, get_block_ea(bl),
- sizeof(wait_block),
+ MARS_WORKLOAD_QUEUE_BLOCK_SIZE,
MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
} else {
@@ -775,7 +774,7 @@ static int workload_reserve(void)
/* get the workload queue header */
dma_get(&queue_header, kernel_params.workload_queue_ea,
- sizeof(struct mars_workload_queue_header), MARS_KERNEL_DMA_TAG);
+ MARS_WORKLOAD_QUEUE_HEADER_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
/* return exit status if exit flag is set from host */
@@ -825,8 +824,8 @@ static int workload_reserve(void)
workload_module = (struct mars_workload_module *)&workload;
/* get the workload context code from workload queue */
- dma_get(&workload, workload_ea, sizeof(struct mars_workload_context),
- MARS_KERNEL_DMA_TAG);
+ dma_get(&workload, workload_ea,
+ MARS_WORKLOAD_CONTEXT_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
return MARS_KERNEL_STATUS_BUSY;
@@ -839,8 +838,8 @@ static void workload_release(void)
uint64_t block_ea = get_block_ea(block);
/* put the workload context into workload queue */
- dma_put(&workload, workload_ea, sizeof(struct mars_workload_context),
- MARS_KERNEL_DMA_TAG);
+ dma_put(&workload, workload_ea,
+ MARS_WORKLOAD_CONTEXT_SIZE, MARS_KERNEL_DMA_TAG);
dma_wait(MARS_KERNEL_DMA_TAG);
/* lock the queue block */
@@ -866,13 +865,13 @@ static void workload_module_load(void)
workload_module_is_cached =
!(kernel_memcmp(&cached_workload_module, workload_module,
- sizeof(struct mars_workload_module)));
+ MARS_WORKLOAD_MODULE_SIZE));
/* only reload the readonly text segment if different from cached */
if (!workload_module_is_cached) {
/* store the current cached workload module ea */
kernel_memcpy(&cached_workload_module, workload_module,
- sizeof(struct mars_workload_module));
+ MARS_WORKLOAD_MODULE_SIZE);
/* load the text into mpu storage from host storage */
dma_get((void *)workload_module->text_vaddr,
@@ -939,7 +938,7 @@ static void scheduler_idle_wait(void)
/* check if queue header has been modified since we last fetched it */
if (!kernel_memcmp(&queue_header, cur_queue_header,
- sizeof(struct mars_workload_queue_header))) {
+ MARS_WORKLOAD_QUEUE_HEADER_SIZE)) {
/* wait until queue header is modified */
spu_read_event_status();
spu_write_event_ack(MFC_LLR_LOST_EVENT);
More information about the cbe-oss-dev
mailing list