[Skiboot] [PATCH 1/5 v3] core: Move free-list locking to a separate per-region lock

Jeremy Kerr jk at ozlabs.org
Tue May 12 20:12:20 AEST 2015


Currently, we have a single lock for the entire mem_region system; this
protects both the global region list, and the allocations from each
region.

This means we can't allocate memory while traversing the global region
list, as any malloc/realloc/free will try to acquire the mem_region lock
again.

This change separates the locking into different functions. We keep the
mem_region_lock to protect the regions list, and introduce a per-region
lock to protect allocations from the regions' free_lists.

Then we remove the open-coded invocations of mem_alloc, where we'd
avoided malloc() due to the above locking issue.

Signed-off-by: Jeremy Kerr <jk at ozlabs.org>

---
 core/malloc.c                   |   12 ++++-----
 core/mem_region.c               |   39 +++++++++++++++++---------------
 core/test/run-malloc-speed.c    |    2 -
 core/test/run-malloc.c          |   28 +++++++++++-----------
 core/test/run-mem_region.c      |    2 -
 core/test/run-mem_region_init.c |    2 -
 include/mem_region.h            |    3 ++
 7 files changed, 47 insertions(+), 41 deletions(-)

diff --git a/core/malloc.c b/core/malloc.c
index 9e715f6..669cc59 100644
--- a/core/malloc.c
+++ b/core/malloc.c
@@ -25,9 +25,9 @@ void *__memalign(size_t blocksize, size_t bytes, const char *location)
 {
 	void *p;
 
-	lock(&mem_region_lock);
+	lock(&skiboot_heap.free_list_lock);
 	p = mem_alloc(&skiboot_heap, bytes, blocksize, location);
-	unlock(&mem_region_lock);
+	unlock(&skiboot_heap.free_list_lock);
 
 	return p;
 }
@@ -39,9 +39,9 @@ void *__malloc(size_t bytes, const char *location)
 
 void __free(void *p, const char *location)
 {
-	lock(&mem_region_lock);
+	lock(&skiboot_heap.free_list_lock);
 	mem_free(&skiboot_heap, p, location);
-	unlock(&mem_region_lock);
+	unlock(&skiboot_heap.free_list_lock);
 }
 
 void *__realloc(void *ptr, size_t size, const char *location)
@@ -56,7 +56,7 @@ void *__realloc(void *ptr, size_t size, const char *location)
 	if (!ptr)
 		return __malloc(size, location);
 
-	lock(&mem_region_lock);
+	lock(&skiboot_heap.free_list_lock);
 	if (mem_resize(&skiboot_heap, ptr, size, location)) {
 		newptr = ptr;
 	} else {
@@ -70,7 +70,7 @@ void *__realloc(void *ptr, size_t size, const char *location)
 			mem_free(&skiboot_heap, ptr, location);
 		}
 	}
-	unlock(&mem_region_lock);
+	unlock(&skiboot_heap.free_list_lock);
 	return newptr;
 }
 
diff --git a/core/mem_region.c b/core/mem_region.c
index d7c677d..8adb036 100644
--- a/core/mem_region.c
+++ b/core/mem_region.c
@@ -30,6 +30,19 @@
 #define POISON_MEM_REGION_WITH	0x99
 #define POISON_MEM_REGION_LIMIT 1*1024*1024*1024
 
+/* Locking: The mem_region_lock protects the regions list from concurrent
+ * updates. Additions to, or removals from, the region list must be done
+ * with this lock held. This is typically done when we're establishing
+ * the memory & reserved regions.
+ *
+ * Each region has a lock (region->free_list_lock) to protect the free list
+ * from concurrent modification. This lock is used when we're allocating
+ * memory out of a specific region.
+ *
+ * If both locks are needed (eg, __local_alloc, where we need to find a region,
+ * then allocate from it), the mem_region_lock must be acquired before (and
+ * released after) the per-region lock.
+ */
 struct lock mem_region_lock = LOCK_UNLOCKED;
 
 static struct list_head regions = LIST_HEAD_INIT(regions);
@@ -542,9 +555,7 @@ static struct mem_region *new_region(const char *name,
 {
 	struct mem_region *region;
 
-	/* Avoid lock recursion, call mem_alloc directly. */
-	region = mem_alloc(&skiboot_heap,
-			   sizeof(*region), __alignof__(*region), __location__);
+	region = malloc(sizeof(*region));
 	if (!region)
 		return NULL;
 
@@ -554,6 +565,7 @@ static struct mem_region *new_region(const char *name,
 	region->mem_node = mem_node;
 	region->type = type;
 	region->free_list.n.next = NULL;
+	init_lock(&region->free_list_lock);
 
 	return region;
 }
@@ -629,8 +641,7 @@ static bool add_region(struct mem_region *region)
 		assert(r->start == region->start);
 		assert(r->len == region->len);
 		list_del_from(&regions, &r->list);
-		/* We already hold mem_region lock */
-		mem_free(&skiboot_heap, r, __location__);
+		free(r);
 	}
 
 	/* Finally, add in our own region. */
@@ -695,7 +706,9 @@ restart:
 		}
 
 		/* Second pass, match anything */
+		lock(&region->free_list_lock);
 		p = mem_alloc(region, size, align, location);
+		unlock(&region->free_list_lock);
 		if (p)
 			break;
 	}
@@ -809,10 +822,7 @@ void mem_region_init(void)
 			char *name;
 
 			len = strlen(names->prop + n) + 1;
-
-			name = mem_alloc(&skiboot_heap, len,
-					__alignof__(*name), __location__);
-			memcpy(name, names->prop + n, len);
+			name = strdup(names->prop + n);
 
 			region = new_region(name,
 					dt_get_number(range, 2),
@@ -930,15 +940,8 @@ void mem_region_add_dt_reserved(void)
 		ranges_len += 2 * sizeof(uint64_t);
 	}
 
-	/* Allocate property data with mem_alloc; malloc() acquires
-	 * mem_region_lock */
-	names = mem_alloc(&skiboot_heap, names_len,
-			__alignof__(*names), __location__);
-	ranges = mem_alloc(&skiboot_heap, ranges_len,
-			__alignof__(*ranges), __location__);
-
-	name = names;
-	range = ranges;
+	name = names = malloc(names_len);
+	range = ranges = malloc(ranges_len);
 
 	printf("Reserved regions:\n");
 	/* Second pass: populate property data */
diff --git a/core/test/run-malloc-speed.c b/core/test/run-malloc-speed.c
index 18c0de9..713a74b 100644
--- a/core/test/run-malloc-speed.c
+++ b/core/test/run-malloc-speed.c
@@ -90,7 +90,7 @@ int main(void)
 		       + skiboot_heap.len);
 	}
 	assert(mem_check(&skiboot_heap));
-	assert(mem_region_lock.lock_val == 0);
+	assert(skiboot_heap.free_list_lock.lock_val == 0);
 	free(region_start(&skiboot_heap));
 	real_free(p);
 	return 0;
diff --git a/core/test/run-malloc.c b/core/test/run-malloc.c
index bbd2a48..723cb10 100644
--- a/core/test/run-malloc.c
+++ b/core/test/run-malloc.c
@@ -92,45 +92,45 @@ int main(void)
 		assert(p);
 		assert(p > (char *)test_heap);
 		assert(p + (1ULL << i) <= (char *)test_heap + TEST_HEAP_SIZE);
-		assert(!mem_region_lock.lock_val);
+		assert(!skiboot_heap.free_list_lock.lock_val);
 		free(p);
-		assert(!mem_region_lock.lock_val);
+		assert(!skiboot_heap.free_list_lock.lock_val);
 		assert(heap_empty());
 	}
 
 	/* Realloc as malloc. */
-	mem_region_lock.lock_val = 0;
+	skiboot_heap.free_list_lock.lock_val = 0;
 	p = realloc(NULL, 100);
 	assert(p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 
 	/* Realloc as free. */
 	p = realloc(p, 0);
 	assert(!p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	assert(heap_empty());
 
 	/* Realloc longer. */
 	p = realloc(NULL, 100);
 	assert(p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	p2 = realloc(p, 200);
 	assert(p2 == p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	free(p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	assert(heap_empty());
 
 	/* Realloc shorter. */
-	mem_region_lock.lock_val = 0;
+	skiboot_heap.free_list_lock.lock_val = 0;
 	p = realloc(NULL, 100);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	assert(p);
 	p2 = realloc(p, 1);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	assert(p2 == p);
 	free(p);
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 	assert(heap_empty());
 
 	/* zalloc failure */
@@ -152,7 +152,7 @@ int main(void)
 	assert(p2[63] == 'c');
 	free(p2);
 	assert(heap_empty());
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 
 	/* Realloc with failure to allocate new size */
 	p2 = malloc(TEST_HEAP_SIZE - sizeof(struct alloc_hdr)*2);
@@ -176,7 +176,7 @@ int main(void)
 	free(p);
 	free(p4);
 	assert(heap_empty());
-	assert(!mem_region_lock.lock_val);
+	assert(!skiboot_heap.free_list_lock.lock_val);
 
 	real_free(test_heap);
 	return 0;
diff --git a/core/test/run-mem_region.c b/core/test/run-mem_region.c
index db1ca02..e27459b 100644
--- a/core/test/run-mem_region.c
+++ b/core/test/run-mem_region.c
@@ -244,7 +244,7 @@ int main(void)
 		list_del(&r->list);
 		mem_free(&skiboot_heap, r, __location__);
 	}
-	assert(mem_region_lock.lock_val == 0);
+	assert(skiboot_heap.free_list_lock.lock_val == 0);
 	__free(test_heap, "");
 	return 0;
 }
diff --git a/core/test/run-mem_region_init.c b/core/test/run-mem_region_init.c
index bb606ef..7624057 100644
--- a/core/test/run-mem_region_init.c
+++ b/core/test/run-mem_region_init.c
@@ -177,7 +177,7 @@ int main(void)
 		}
 		assert(mem_check(&skiboot_heap));
 	}
-	assert(mem_region_lock.lock_val == 0);
+	assert(skiboot_heap.free_list_lock.lock_val == 0);
 	real_free(heap);
 	return 0;
 }
diff --git a/include/mem_region.h b/include/mem_region.h
index d6dd296..e5cf0f6 100644
--- a/include/mem_region.h
+++ b/include/mem_region.h
@@ -19,6 +19,8 @@
 #include <ccan/list/list.h>
 #include <stdint.h>
 
+#include <lock.h>
+
 enum mem_region_type {
 	/* ranges allocatable by mem_alloc: this will be most of memory */
 	REGION_SKIBOOT_HEAP,
@@ -41,6 +43,7 @@ struct mem_region {
 	struct dt_node *mem_node;
 	enum mem_region_type type;
 	struct list_head free_list;
+	struct lock free_list_lock;
 };
 
 extern struct lock mem_region_lock;


More information about the Skiboot mailing list