[Cbe-oss-dev] [PATCH 06/10]MARS: Add support for shared mars context
Yuji Mano
yuji.mano at am.sony.com
Fri Oct 3 04:31:33 EST 2008
This changes the mars_context_create API to allow an added flag parameter to
specify whether to create a new instance of a non-shared MARS context or to
return the instance of the globally shared MARS context. Each MARS context will
keep an internal reference count to know when the instance needs to be freed
when mars_context_destroy is called on it.
Signed-off-by: Yuji Mano <yuji.mano at am.sony.com>
---
include/host/mars/mars_context.h | 5 +-
src/host/lib/mars_context.c | 86 +++++++++++++++++++++++++++++++++------
2 files changed, 78 insertions(+), 13 deletions(-)
--- a/include/host/mars/mars_context.h
+++ b/include/host/mars/mars_context.h
@@ -63,6 +63,8 @@ struct mars_context {
pthread_t *mpu_context_threads;
/* num of mpu context threads */
uint32_t mpu_context_count;
+ /* reference count */
+ uint32_t reference_count;
};
#if defined(__cplusplus)
@@ -84,6 +86,7 @@ extern "C" {
*
* \param[out] mars - address of pointer to MARS context
* \param[in] num_mpus - number of mpus utilized by MARS context
+ * \param[in] shared - specifies if context is shared or not
* \return
* MARS_SUCCESS - successfully created MARS context
* \n MARS_ERROR_NULL - null pointer specified
@@ -91,7 +94,7 @@ extern "C" {
* \n MARS_ERROR_MEMORY - not enough memory
* \n MARS_ERROR_INTERNAL - some internal error occurred
*/
-int mars_context_create(struct mars_context **mars, uint32_t num_mpus);
+int mars_context_create(struct mars_context **mars, uint32_t num_mpus, uint8_t shared);
/**
* \ingroup group_mars_context
--- a/src/host/lib/mars_context.c
+++ b/src/host/lib/mars_context.c
@@ -47,6 +47,8 @@
#include "mars/mars_debug.h"
extern struct spe_program_handle mars_kernel_entry;
+static struct mars_context *mars_shared_context;
+static pthread_mutex_t mars_mutex = PTHREAD_MUTEX_INITIALIZER;
static void *mpu_context_thread(void *arg)
{
@@ -79,13 +81,8 @@ static int mpu_contexts_create(struct ma
int ret;
unsigned int i;
- /* allocate mpu context thread array */
- mars->mpu_context_threads = (pthread_t *)
- malloc(sizeof(pthread_t) * num_mpus);
- MARS_CHECK_RET(mars->mpu_context_threads, MARS_ERROR_MEMORY);
-
/* create threads for each mpu context */
- for (i = 0; i < num_mpus; i++) {
+ for (i = mars->mpu_context_count; i < num_mpus; i++) {
struct mars_kernel_params *params = &mars->kernel_params[i];
params->id = i;
@@ -122,13 +119,11 @@ static int mpu_contexts_destroy(struct m
MARS_CHECK_RET(!ret && !p_ret, MARS_ERROR_INTERNAL);
}
- /* free mpu context thread array */
- free(mars->mpu_context_threads);
-
return MARS_SUCCESS;
}
-int mars_context_create(struct mars_context **mars_ret, uint32_t num_mpus)
+int mars_context_create(struct mars_context **mars_ret, uint32_t num_mpus,
+ uint8_t shared)
{
MARS_CHECK_RET(mars_ret, MARS_ERROR_NULL);
@@ -147,36 +142,83 @@ int mars_context_create(struct mars_cont
if (!num_mpus)
num_mpus = num_mpus_max;
+ /* lock mutex */
+ ret = pthread_mutex_lock(&mars_mutex);
+ MARS_CHECK_RET(ret == 0, MARS_ERROR_INTERNAL);
+
+ /* shared context requested */
+ if (shared && mars_shared_context) {
+ /* create any extra mpu contexts necessary */
+ ret = mpu_contexts_create(mars_shared_context, num_mpus);
+ MARS_CHECK_RET(ret == MARS_SUCCESS, ret);
+
+ /* increment shared context reference count */
+ mars_shared_context->reference_count++;
+
+ /* return the shared context */
+ *mars_ret = mars_shared_context;
+
+ /* unlock mutex */
+ ret = pthread_mutex_unlock(&mars_mutex);
+ MARS_CHECK_RET(ret == 0, MARS_ERROR_INTERNAL);
+
+ return MARS_SUCCESS;
+ }
+
/* allocate */
mars = malloc(sizeof(struct mars_context));
- MARS_CHECK_RET(mars, MARS_ERROR_MEMORY);
+ MARS_CHECK_CLEANUP_RET(mars,
+ pthread_mutex_unlock(&mars_mutex),
+ MARS_ERROR_MEMORY);
/* zero context */
memset(mars, 0, sizeof(struct mars_context));
+ /* increment reference count */
+ mars->reference_count++;
+
/* allocate kernel params */
mars->kernel_params = (struct mars_kernel_params *)
memalign(MARS_KERNEL_PARAMS_ALIGN,
- sizeof(struct mars_kernel_params) * num_mpus);
+ sizeof(struct mars_kernel_params) * num_mpus_max);
MARS_CHECK_CLEANUP_RET(mars->kernel_params,
+ pthread_mutex_unlock(&mars_mutex);
+ mars_context_destroy(mars),
+ MARS_ERROR_MEMORY);
+
+ /* allocate mpu context thread array */
+ mars->mpu_context_threads = (pthread_t *)
+ malloc(sizeof(pthread_t) * num_mpus_max);
+ MARS_CHECK_CLEANUP_RET(mars->mpu_context_threads,
+ pthread_mutex_unlock(&mars_mutex);
mars_context_destroy(mars),
MARS_ERROR_MEMORY);
/* create workload queue */
ret = workload_queue_create(&mars->workload_queue);
MARS_CHECK_CLEANUP_RET(ret == MARS_SUCCESS,
+ pthread_mutex_unlock(&mars_mutex);
mars_context_destroy(mars),
ret);
/* create mpu contexts */
ret = mpu_contexts_create(mars, num_mpus);
MARS_CHECK_CLEANUP_RET(ret == MARS_SUCCESS,
+ pthread_mutex_unlock(&mars_mutex);
mars_context_destroy(mars),
ret);
+ /* set the shared context pointer */
+ if (shared)
+ mars_shared_context = mars;
+
/* return mars context pointer */
*mars_ret = mars;
+ /* unlock mutex */
+ ret = pthread_mutex_unlock(&mars_mutex);
+ MARS_CHECK_RET(ret == 0, MARS_ERROR_INTERNAL);
+
return MARS_SUCCESS;
}
@@ -186,6 +228,17 @@ int mars_context_destroy(struct mars_con
int ret;
+ /* lock mutex */
+ ret = pthread_mutex_lock(&mars_mutex);
+ MARS_CHECK_RET(ret == 0, MARS_ERROR_INTERNAL);
+
+ /* decrement reference count */
+ mars->reference_count--;
+
+ /* reference count is not 0 so return */
+ if (mars->reference_count)
+ return MARS_SUCCESS;
+
/* destroy mpu contexts */
if (mars->mpu_context_count) {
ret = mpu_contexts_destroy(mars);
@@ -199,9 +252,18 @@ int mars_context_destroy(struct mars_con
}
/* free allocated memory */
+ free(mars->mpu_context_threads);
free(mars->kernel_params);
free(mars);
+ /* check if it is the shared context pointer and set to NULL */
+ if (mars == mars_shared_context)
+ mars_shared_context = NULL;
+
+ /* unlock mutex */
+ ret = pthread_mutex_unlock(&mars_mutex);
+ MARS_CHECK_RET(ret == 0, MARS_ERROR_INTERNAL);
+
MARS_PRINT("Destroyed MARS Context (%p)\n", mars);
return MARS_SUCCESS;
More information about the cbe-oss-dev
mailing list