[Cbe-oss-dev] [PATCH 04/17]MARS/core: Wrap host mutex

Yuji Mano yuji.mano at am.sony.com
Wed Dec 3 13:58:49 EST 2008


From: Kazunori Asayama <asayama at sm.sony.co.jp>

Wrap system mutex functions

This patch wraps mutex functions provided by the system, to prepare
for hybrid system support.

Signed-off-by: Kazunori Asayama <asayama at sm.sony.co.jp>
---
 core/src/host/lib/Makefile.am              |    1 
 core/src/host/lib/context.c                |   34 ++++++--------
 core/src/host/lib/context_internal_types.h |   10 ++++
 core/src/host/lib/host_mutex.c             |   68 +++++++++++++++++++++++++++++
 4 files changed, 94 insertions(+), 19 deletions(-)

--- a/core/src/host/lib/Makefile.am
+++ b/core/src/host/lib/Makefile.am
@@ -96,6 +96,7 @@ libmars_core_la_SOURCES = \
 	alloc.c \
 	context.c \
 	ea.c \
+	host_mutex.c \
 	mpu.c \
 	mutex.c \
 	workload_queue.c
--- a/core/src/host/lib/context.c
+++ b/core/src/host/lib/context.c
@@ -36,7 +36,6 @@
  */
 
 #include <string.h>
-#include <pthread.h>
 #include <ppu_intrinsics.h>
 
 #include "config.h"
@@ -50,7 +49,6 @@
 #include "kernel_internal_types.h"
 
 static struct mars_context *mars_shared_context;
-static pthread_mutex_t mars_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 uint32_t mars_get_ticks(void)
 {
@@ -167,9 +165,9 @@ int mars_context_create(struct mars_cont
 		num_mpus = num_mpus_max;
 
 	/* lock mutex */
-	ret = pthread_mutex_lock(&mars_mutex);
-	if (ret)
-		return MARS_ERROR_INTERNAL;
+	ret = mars_host_mutex_lock(&mars_shared_context_lock);
+	if (ret != MARS_SUCCESS)
+		return ret;
 
 	/* shared context requested */
 	if (shared && mars_shared_context) {
@@ -236,15 +234,13 @@ int mars_context_create(struct mars_cont
 
 done:
 	/* unlock mutex */
-	ret = pthread_mutex_unlock(&mars_mutex);
-	if (ret) {
-		ret = MARS_ERROR_INTERNAL;
-		goto error_pthread_mutex_unlock;
-	}
+	ret = mars_host_mutex_unlock(&mars_shared_context_lock);
+	if (ret != MARS_SUCCESS)
+		goto error_shared_context_unlock;
 
 	return MARS_SUCCESS;
 
-error_pthread_mutex_unlock:
+error_shared_context_unlock:
 	mpu_contexts_destroy(mars);
 error_mpu_contexts_create:
 	mars_workload_queue_destroy(mars);
@@ -255,7 +251,7 @@ error_malloc_mpu_contexts:
 error_malloc_kernel_params:
 	mars_free(mars);
 error:
-	pthread_mutex_unlock(&mars_mutex);
+	mars_host_mutex_unlock(&mars_shared_context_lock);
 
 	return ret;
 }
@@ -269,9 +265,9 @@ int mars_context_destroy(struct mars_con
 		return MARS_ERROR_NULL;
 
 	/* lock mutex */
-	ret = pthread_mutex_lock(&mars_mutex);
-	if (ret)
-		return MARS_ERROR_INTERNAL;
+	ret = mars_host_mutex_lock(&mars_shared_context_lock);
+	if (ret != MARS_SUCCESS)
+		return ret;
 
 	/* decrement reference count */
 	mars->reference_count--;
@@ -305,14 +301,14 @@ int mars_context_destroy(struct mars_con
 
 done:
 	/* unlock mutex */
-	ret = pthread_mutex_unlock(&mars_mutex);
-	if (ret)
-		return MARS_ERROR_INTERNAL;
+	ret = mars_host_mutex_unlock(&mars_shared_context_lock);
+	if (ret != MARS_SUCCESS)
+		return ret;
 
 	return MARS_SUCCESS;
 
 error:
-	pthread_mutex_unlock(&mars_mutex);
+	mars_host_mutex_unlock(&mars_shared_context_lock);
 
 	return ret;
 }
--- a/core/src/host/lib/context_internal_types.h
+++ b/core/src/host/lib/context_internal_types.h
@@ -48,6 +48,10 @@
 typedef pthread_t mars_mpu_context_t;
 #endif
 
+#ifdef HAVE_LIBPTHREAD
+typedef pthread_mutex_t mars_host_mutex_t;
+#endif
+
 struct mars_context {
 	/* parameters for the MARS kernel */
 	uint64_t kernel_params_ea;
@@ -65,4 +69,10 @@ int mars_mpu_max(int *num);
 int mars_mpu_run(mars_mpu_context_t *mpu, uint64_t params_ea);
 int mars_mpu_wait(mars_mpu_context_t *mpu);
 
+
+extern mars_host_mutex_t mars_shared_context_lock;
+
+int mars_host_mutex_lock(mars_host_mutex_t *mutex);
+int mars_host_mutex_unlock(mars_host_mutex_t *mutex);
+
 #endif
--- /dev/null
+++ b/core/src/host/lib/host_mutex.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Sony Corporation of America
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this Library and associated documentation files (the
+ * "Library"), to deal in the Library without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Library, and to
+ * permit persons to whom the Library is furnished to do so, subject to
+ * the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be
+ *  included in all copies or substantial portions of the Library.
+ *
+ *  If you modify the Library, you may copy and distribute your modified
+ *  version of the Library in object code or as an executable provided
+ *  that you also do one of the following:
+ *
+ *   Accompany the modified version of the Library with the complete
+ *   corresponding machine-readable source code for the modified version
+ *   of the Library; or,
+ *
+ *   Accompany the modified version of the Library with a written offer
+ *   for a complete machine-readable copy of the corresponding source
+ *   code of the modified version of the Library.
+ *
+ *
+ * THE LIBRARY IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * LIBRARY OR THE USE OR OTHER DEALINGS IN THE LIBRARY.
+ */
+
+
+#include "config.h"
+
+#include "mars/error.h"
+
+#include "context_internal_types.h"
+
+/* global locks */
+pthread_mutex_t mars_shared_context_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int mars_host_mutex_lock(mars_host_mutex_t *mutex)
+{
+	int ret;
+
+	ret = pthread_mutex_lock(mutex);
+	if (ret)
+		return MARS_ERROR_INTERNAL;
+
+	return MARS_SUCCESS;
+}
+
+int mars_host_mutex_unlock(mars_host_mutex_t *mutex)
+{
+	int ret;
+
+	ret = pthread_mutex_unlock(mutex);
+	if (ret)
+		return MARS_ERROR_INTERNAL;
+
+	return MARS_SUCCESS;
+}






More information about the cbe-oss-dev mailing list