[PATCH] ioremap kmallocs inside spinlocks - review request

John Rose johnrose at austin.ibm.com
Wed Jan 28 05:06:54 EST 2004


Anton, Team-

Good catch, there are two such uses of kmalloc in that file.  Thoughts
on the patch below?  The vm_struct's that might be required are
preallocated inside im_get_area() and im_get_free_area(), so that the
kmalloc's are moved outside the spinlock.

The nastiness of this patch is that zero, one, or both of the alloc'ed
vm_structs are freed in different functions than they are allocated in.
The other option is to report back to im_get_area() and
im_get_free_area() about which of the new regions were used, and free
where necessary there.  I think I like my way better than that.
OR the easiest option is a one-line change within the spinlock from
GFP_KERNEL to GFP_ATOMIC.  :)  But I understand that this is
discouraged.

Comments welcome, thanks-
John

diff -Nru a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
--- a/arch/ppc64/mm/imalloc.c	Tue Jan 27 11:55:38 2004
+++ b/arch/ppc64/mm/imalloc.c	Tue Jan 27 11:55:38 2004
@@ -102,112 +102,111 @@
 }

 static struct vm_struct * split_im_region(unsigned long v_addr,
-		unsigned long size, struct vm_struct *parent)
+		unsigned long size, struct vm_struct *parent,
+		struct vm_struct *new_vm1, struct vm_struct *new_vm2)
 {
-	struct vm_struct *vm1 = NULL;
-	struct vm_struct *vm2 = NULL;
-	struct vm_struct *new_vm = NULL;
+	struct vm_struct *requested_vm = NULL;

-	vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL);
-	if (vm1	== NULL) {
-		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
-		return NULL;
-	}
-
 	if (v_addr == (unsigned long) parent->addr) {
-	        /* Use existing parent vm_struct to represent child, allocate
-		 * new one for the remainder of parent range
+	        /* Use existing parent vm_struct to represent child, use
+		 * first new one for the remainder of parent range, free
+		 * second new one
 		 */
-		vm1->size = parent->size - size;
-		vm1->addr = (void *) (v_addr + size);
-		vm1->next = parent->next;
+		new_vm1->size = parent->size - size;
+		new_vm1->addr = (void *) (v_addr + size);
+		new_vm1->next = parent->next;

 		parent->size = size;
-		parent->next = vm1;
-		new_vm = parent;
+		parent->next = new_vm1;
+		requested_vm = parent;
+
+		kfree(new_vm2);
 	} else if (v_addr + size == (unsigned long) parent->addr +
 			parent->size) {
-		/* Allocate new vm_struct to represent child, use existing
-		 * parent one for remainder of parent range
+		/* Use first new vm_struct to represent child, use existing
+		 * parent one for remainder of parent range, free second new
+		 * vm_struct
 		 */
-		vm1->size = size;
-		vm1->addr = (void *) v_addr;
-		vm1->next = parent->next;
-		new_vm = vm1;
+		new_vm1->size = size;
+		new_vm1->addr = (void *) v_addr;
+		new_vm1->next = parent->next;
+		requested_vm = new_vm1;

 		parent->size -= size;
-		parent->next = vm1;
+		parent->next = new_vm1;
+
+		kfree(new_vm2);
 	} else {
-	        /* Allocate two new vm_structs for the new child and
+	        /* Use two new vm_structs for the new child and
 		 * uppermost remainder, and use existing parent one for the
 		 * lower remainder of parent range
 		 */
-		vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL);
-		if (vm2 == NULL) {
-			printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
-			kfree(vm1);
-			return NULL;
-		}
-
-		vm1->size = size;
-		vm1->addr = (void *) v_addr;
-		vm1->next = vm2;
-		new_vm = vm1;
-
-		vm2->size = ((unsigned long) parent->addr + parent->size) -
-				(v_addr + size);
-		vm2->addr = (void *) v_addr + size;
-		vm2->next = parent->next;
+
+		new_vm1->size = size;
+		new_vm1->addr = (void *) v_addr;
+		new_vm1->next = new_vm2;
+		requested_vm = new_vm1;
+
+		new_vm2->size = ((unsigned long) parent->addr + parent->size)
+			 		- (v_addr + size);
+		new_vm2->addr = (void *) v_addr + size;
+		new_vm2->next = parent->next;

 		parent->size = v_addr - (unsigned long) parent->addr;
-		parent->next = vm1;
+		parent->next = new_vm1;
 	}

-	return new_vm;
+	return requested_vm;
 }

 static struct vm_struct * __add_new_im_area(unsigned long req_addr,
-					    unsigned long size)
+					    unsigned long size,
+					    struct vm_struct *new_vm)
 {
-	struct vm_struct **p, *tmp, *area;
+	struct vm_struct **p, *tmp;

 	for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
 		if (req_addr + size <= (unsigned long)tmp->addr)
 			break;
 	}

-	area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
-	if (!area)
-		return NULL;
-	area->flags = 0;
-	area->addr = (void *)req_addr;
-	area->size = size;
-	area->next = *p;
-	*p = area;
+	new_vm->flags = 0;
+	new_vm->addr = (void *)req_addr;
+	new_vm->size = size;
+	new_vm->next = *p;
+	*p = new_vm;

-	return area;
+	return new_vm;
 }

 static struct vm_struct * __im_get_area(unsigned long req_addr,
 					unsigned long size,
-					int criteria)
+					int criteria,
+					struct vm_struct *new_vm1,
+					struct vm_struct *new_vm2)
 {
 	struct vm_struct *tmp;
 	int status;

 	status = im_region_status(req_addr, size, &tmp);
 	if ((criteria & status) == 0) {
+		kfree(new_vm1);
+		kfree(new_vm2);
 		return NULL;
 	}

 	switch (status) {
 	case IM_REGION_UNUSED:
-		tmp = __add_new_im_area(req_addr, size);
+		tmp = __add_new_im_area(req_addr, size, new_vm1);
+		kfree(new_vm2);
 		break;
 	case IM_REGION_SUBSET:
-		tmp = split_im_region(req_addr, size, tmp);
+		tmp = split_im_region(req_addr, size, tmp, new_vm1,
+					new_vm2);
 		break;
 	case IM_REGION_EXISTS:
+		kfree(new_vm1);
+		kfree(new_vm2);
 		break;
 	default:
 		printk(KERN_ERR "%s() unexpected imalloc region status\n",
@@ -221,8 +220,27 @@
 struct vm_struct * im_get_free_area(unsigned long size)
 {
 	struct vm_struct *area;
+	struct vm_struct *new_vm1;
+	struct vm_struct *new_vm2;
 	unsigned long addr;

+	/* Allocate new vm_structs here to avoid kmalloc inside spinlock.
+	 * If not used, these will be freed in __im_get_area() or
+	 * split_im_region().
+	 */
+	new_vm1 = (struct vm_struct *) kmalloc(sizeof(*new_vm1), GFP_KERNEL);
+	if (!new_vm1) {
+		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
+		return NULL;
+	}
+
+	new_vm2 = (struct vm_struct *) kmalloc(sizeof(*new_vm2), GFP_KERNEL);
+	if (!new_vm2) {
+		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
+		kfree(new_vm1);
+		return NULL;
+	}
+
 	write_lock(&imlist_lock);
 	if (get_free_im_addr(size, &addr)) {
 		printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
@@ -231,7 +249,7 @@
 		goto next_im_done;
 	}

-	area = __im_get_area(addr, size, IM_REGION_UNUSED);
+	area = __im_get_area(addr, size, IM_REGION_UNUSED, new_vm1, new_vm2);
 	if (area == NULL) {
 		printk(KERN_ERR
 		       "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
@@ -246,9 +264,28 @@
 		int criteria)
 {
 	struct vm_struct *area;
+	struct vm_struct *new_vm1;
+	struct vm_struct *new_vm2;
+
+	/* Allocate new vm_structs here to avoid kmalloc inside spinlock.
+	 * If not used, these will be freed in __im_get_area() or
+	 * split_im_region().
+	 */
+	new_vm1 = (struct vm_struct *) kmalloc(sizeof(*new_vm1), GFP_KERNEL);
+	if (!new_vm1) {
+		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
+		return NULL;
+	}
+
+	new_vm2 = (struct vm_struct *) kmalloc(sizeof(*new_vm2), GFP_KERNEL);
+	if (!new_vm2) {
+		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
+		kfree(new_vm1);
+		return NULL;
+	}

 	write_lock(&imlist_lock);
-	area = __im_get_area(v_addr, size, criteria);
+	area = __im_get_area(v_addr, size, criteria, new_vm1, new_vm2);
 	write_unlock(&imlist_lock);
 	return area;
 }


** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list