Browse Source

malloc: fix race condition and other bugs in the no-mmu malloc

Fixes multiple race conditions on mmb list. This was done by
making the mmb_heap_lock into a recursive lock and making the
regular heap_lock extend to cover the mmb heap handling.

Also move the new_mmb allocation up to before the mmb list is
iterated through to find the insertion point. When the mmb_heap
also runs out and needs to be extended when the regular heap is
just extended, the mmb list could be messed up.

Signed-off-by: Freeman Wang <xwang@ubicom.com>
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
Freeman Wang 15 years ago
parent
commit
23528282b7
2 changed files with 7 additions and 6 deletions
  1. 3 3
      libc/stdlib/malloc/free.c
  2. 4 3
      libc/stdlib/malloc/malloc.c

+ 3 - 3
libc/stdlib/malloc/free.c

@@ -177,14 +177,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
 	      /* Start searching again from the end of this block.  */
 	      /* Start searching again from the end of this block.  */
 	      start = mmb_end;
 	      start = mmb_end;
 
 
+	      /* Release the descriptor block we used.  */
+	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+
 	      /* We have to unlock the heap before we recurse to free the mmb
 	      /* We have to unlock the heap before we recurse to free the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 heap.  */
 		 heap.  */
               __heap_unlock (heap_lock);
               __heap_unlock (heap_lock);
 
 
-	      /* Release the descriptor block we used.  */
-	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
-
 	      /* Do the actual munmap.  */
 	      /* Do the actual munmap.  */
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 
 

+ 4 - 3
libc/stdlib/malloc/malloc.c

@@ -46,7 +46,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
 struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
 struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
 #ifdef HEAP_USE_LOCKING
 #ifdef HEAP_USE_LOCKING
-malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
 #endif
 #endif
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 
 
@@ -149,19 +149,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
 	  /* Try again to allocate.  */
 	  /* Try again to allocate.  */
 	  mem = __heap_alloc (heap, &size);
 	  mem = __heap_alloc (heap, &size);
 
 
-	  __heap_unlock (heap_lock);
 
 
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 	  /* Insert a record of BLOCK in sorted order into the
 	  /* Insert a record of BLOCK in sorted order into the
 	     __malloc_mmapped_blocks list.  */
 	     __malloc_mmapped_blocks list.  */
 
 
+	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+
 	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
 	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
 	       mmb;
 	       mmb;
 	       prev_mmb = mmb, mmb = mmb->next)
 	       prev_mmb = mmb, mmb = mmb->next)
 	    if (block < mmb->mem)
 	    if (block < mmb->mem)
 	      break;
 	      break;
 
 
-	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
 	  new_mmb->next = mmb;
 	  new_mmb->next = mmb;
 	  new_mmb->mem = block;
 	  new_mmb->mem = block;
 	  new_mmb->size = block_size;
 	  new_mmb->size = block_size;
@@ -175,6 +175,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
 			    (unsigned)new_mmb,
 			    (unsigned)new_mmb,
 			    (unsigned)new_mmb->mem, block_size);
 			    (unsigned)new_mmb->mem, block_size);
 #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+	  __heap_unlock (heap_lock);
 	}
 	}
     }
     }