Browse Source

Fix bug 4994 hangs on read(). I have tested the patch extensibly on ARM/LT.old.
Thank you Chase Douglas for reporting it and for the patch.

Khem Raj 17 years ago
parent
commit
6494060312

+ 1 - 1
libc/stdlib/malloc-simple/alloc.c

@@ -116,7 +116,7 @@ void free(void *ptr)
 #ifdef L_memalign
 #ifdef L_memalign
 
 
 #include <bits/uClibc_mutex.h>
 #include <bits/uClibc_mutex.h>
-__UCLIBC_MUTEX_STATIC(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
 #define __MALLOC_LOCK		__UCLIBC_MUTEX_LOCK(__malloc_lock)
 #define __MALLOC_LOCK		__UCLIBC_MUTEX_LOCK(__malloc_lock)
 #define __MALLOC_UNLOCK		__UCLIBC_MUTEX_UNLOCK(__malloc_lock)
 #define __MALLOC_UNLOCK		__UCLIBC_MUTEX_UNLOCK(__malloc_lock)
 
 

+ 11 - 11
libc/stdlib/malloc/free.c

@@ -22,7 +22,7 @@ libc_hidden_proto(sbrk)
 #include "heap.h"
 #include "heap.h"
 
 
 static void
 static void
-free_to_heap (void *mem, struct heap *heap)
+free_to_heap (void *mem, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
 {
 {
   size_t size;
   size_t size;
   struct heap_free_area *fa;
   struct heap_free_area *fa;
@@ -39,7 +39,7 @@ free_to_heap (void *mem, struct heap *heap)
   size = MALLOC_SIZE (mem);
   size = MALLOC_SIZE (mem);
   mem = MALLOC_BASE (mem);
   mem = MALLOC_BASE (mem);
 
 
-  __heap_lock (heap);
+  __pthread_mutex_lock (heap_lock);
 
 
   /* Put MEM back in the heap, and get the free-area it was placed in.  */
   /* Put MEM back in the heap, and get the free-area it was placed in.  */
   fa = __heap_free (heap, mem, size);
   fa = __heap_free (heap, mem, size);
@@ -48,7 +48,7 @@ free_to_heap (void *mem, struct heap *heap)
      unmapped.  */
      unmapped.  */
   if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
   if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
     /* Nope, nothing left to do, just release the lock.  */
     /* Nope, nothing left to do, just release the lock.  */
-    __heap_unlock (heap);
+    __pthread_mutex_unlock (heap_lock);
   else
   else
     /* Yup, try to unmap FA.  */
     /* Yup, try to unmap FA.  */
     {
     {
@@ -81,7 +81,7 @@ free_to_heap (void *mem, struct heap *heap)
 	  MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
 	  MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
 			start, end, end - start);
 			start, end, end - start);
 	  __malloc_unlock_sbrk ();
 	  __malloc_unlock_sbrk ();
-	  __heap_unlock (heap);
+	  __pthread_mutex_unlock (heap_lock);
 	  return;
 	  return;
 	}
 	}
 #endif
 #endif
@@ -108,7 +108,7 @@ free_to_heap (void *mem, struct heap *heap)
 #ifdef MALLOC_USE_SBRK
 #ifdef MALLOC_USE_SBRK
 
 
       /* Release the heap lock; we're still holding the sbrk lock.  */
       /* Release the heap lock; we're still holding the sbrk lock.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
       /* Lower the brk.  */
       /* Lower the brk.  */
       sbrk (start - end);
       sbrk (start - end);
       /* Release the sbrk lock too; now we hold no locks.  */
       /* Release the sbrk lock too; now we hold no locks.  */
@@ -172,15 +172,15 @@ free_to_heap (void *mem, struct heap *heap)
 	      /* We have to unlock the heap before we recurse to free the mmb
 	      /* We have to unlock the heap before we recurse to free the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 heap.  */
 		 heap.  */
-	      __heap_unlock (heap);
+              __pthread_mutex_unlock (heap_lock);
 
 
 	      /* Release the descriptor block we used.  */
 	      /* Release the descriptor block we used.  */
-	      free_to_heap (mmb, &__malloc_mmb_heap);
+	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
 
 
 	      /* Do the actual munmap.  */
 	      /* Do the actual munmap.  */
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 
 
-	      __heap_lock (heap);
+              __pthread_mutex_lock (heap_lock);
 
 
 #  ifdef __UCLIBC_HAS_THREADS__
 #  ifdef __UCLIBC_HAS_THREADS__
 	      /* In a multi-threaded program, it's possible that PREV_MMB has
 	      /* In a multi-threaded program, it's possible that PREV_MMB has
@@ -213,7 +213,7 @@ free_to_heap (void *mem, struct heap *heap)
 	}
 	}
 
 
       /* Finally release the lock for good.  */
       /* Finally release the lock for good.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
 
 
       MALLOC_MMB_DEBUG_INDENT (-1);
       MALLOC_MMB_DEBUG_INDENT (-1);
 
 
@@ -243,7 +243,7 @@ free_to_heap (void *mem, struct heap *heap)
 	}
 	}
 
 
       /* Release the heap lock before we do the system call.  */
       /* Release the heap lock before we do the system call.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
 
 
       if (unmap_end > unmap_start)
       if (unmap_end > unmap_start)
 	/* Finally, actually unmap the memory.  */
 	/* Finally, actually unmap the memory.  */
@@ -260,5 +260,5 @@ free_to_heap (void *mem, struct heap *heap)
 void
 void
 free (void *mem)
 free (void *mem)
 {
 {
-  free_to_heap (mem, &__malloc_heap);
+  free_to_heap (mem, __malloc_heap, &__malloc_heap_lock);
 }
 }

+ 16 - 40
libc/stdlib/malloc/heap.h

@@ -29,32 +29,12 @@
 #define HEAP_GRANULARITY	(__alignof__ (HEAP_GRANULARITY_TYPE))
 #define HEAP_GRANULARITY	(__alignof__ (HEAP_GRANULARITY_TYPE))
 
 
 
 
-/* A heap is a collection of memory blocks, from which smaller blocks
-   of memory can be allocated.  */
-struct heap
-{
-  /* A list of memory in the heap available for allocation.  */
-  struct heap_free_area *free_areas;
-
-#ifdef HEAP_USE_LOCKING
-  /* A lock that can be used by callers to control access to the heap.
-     The heap code _does not_ use this lock, it's merely here for the
-     convenience of users!  */
-  pthread_mutex_t lock;
-#endif
-};
-
 /* The HEAP_INIT macro can be used as a static initializer for a heap
 /* The HEAP_INIT macro can be used as a static initializer for a heap
    variable.  The HEAP_INIT_WITH_FA variant is used to initialize a heap
    variable.  The HEAP_INIT_WITH_FA variant is used to initialize a heap
    with an initial static free-area; its argument FA should be declared
    with an initial static free-area; its argument FA should be declared
    using HEAP_DECLARE_STATIC_FREE_AREA.  */
    using HEAP_DECLARE_STATIC_FREE_AREA.  */
-#ifdef HEAP_USE_LOCKING
-# define HEAP_INIT 		{ 0, PTHREAD_MUTEX_INITIALIZER }
-# define HEAP_INIT_WITH_FA(fa)	{ &fa._fa, PTHREAD_MUTEX_INITIALIZER }
-#else
-# define HEAP_INIT 		{ 0 }
-# define HEAP_INIT_WITH_FA(fa) 	{ &fa._fa }
-#endif
+# define HEAP_INIT 		0
+# define HEAP_INIT_WITH_FA(fa)	&fa._fa
 
 
 /* A free-list area `header'.  These are actually stored at the _ends_ of
 /* A free-list area `header'.  These are actually stored at the _ends_ of
    free areas (to make allocating from the beginning of the area simpler),
    free areas (to make allocating from the beginning of the area simpler),
@@ -129,27 +109,23 @@ extern int __heap_debug;
 #endif
 #endif
 
 
 /* Output a text representation of HEAP to stderr, labelling it with STR.  */
 /* Output a text representation of HEAP to stderr, labelling it with STR.  */
-extern void __heap_dump (struct heap *heap, const char *str);
+extern void __heap_dump (struct heap_free_area *heap, const char *str);
 
 
 /* Do some consistency checks on HEAP.  If they fail, output an error
 /* Do some consistency checks on HEAP.  If they fail, output an error
    message to stderr, and exit.  STR is printed with the failure message.  */
    message to stderr, and exit.  STR is printed with the failure message.  */
-extern void __heap_check (struct heap *heap, const char *str);
-
-
-#define __heap_lock(heap)	__pthread_mutex_lock (&(heap)->lock)
-#define __heap_unlock(heap)	__pthread_mutex_unlock (&(heap)->lock)
+extern void __heap_check (struct heap_free_area *heap, const char *str);
 
 
 
 
 /* Delete the free-area FA from HEAP.  */
 /* Delete the free-area FA from HEAP.  */
 static __inline__ void
 static __inline__ void
-__heap_delete (struct heap *heap, struct heap_free_area *fa)
+__heap_delete (struct heap_free_area *heap, struct heap_free_area *fa)
 {
 {
   if (fa->next)
   if (fa->next)
     fa->next->prev = fa->prev;
     fa->next->prev = fa->prev;
   if (fa->prev)
   if (fa->prev)
     fa->prev->next = fa->next;
     fa->prev->next = fa->next;
   else
   else
-    heap->free_areas = fa->next;
+    heap = fa->next;
 }
 }
 
 
 
 
@@ -157,7 +133,7 @@ __heap_delete (struct heap *heap, struct heap_free_area *fa)
    HEAP.  PREV and NEXT may be 0; if PREV is 0, FA is installed as the
    HEAP.  PREV and NEXT may be 0; if PREV is 0, FA is installed as the
    first free-area.  */
    first free-area.  */
 static __inline__ void
 static __inline__ void
-__heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
+__heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa,
 		       struct heap_free_area *prev,
 		       struct heap_free_area *prev,
 		       struct heap_free_area *next)
 		       struct heap_free_area *next)
 {
 {
@@ -167,7 +143,7 @@ __heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
   if (prev)
   if (prev)
     prev->next = fa;
     prev->next = fa;
   else
   else
-    heap->free_areas = fa;
+    heap = fa;
   if (next)
   if (next)
     next->prev = fa;
     next->prev = fa;
 }
 }
@@ -176,14 +152,14 @@ __heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
    PREV may be 0, in which case FA is installed as the first free-area (but
    PREV may be 0, in which case FA is installed as the first free-area (but
    FA may not be 0).  */
    FA may not be 0).  */
 static __inline__ void
 static __inline__ void
-__heap_link_free_area_after (struct heap *heap,
+__heap_link_free_area_after (struct heap_free_area *heap,
 			     struct heap_free_area *fa,
 			     struct heap_free_area *fa,
 			     struct heap_free_area *prev)
 			     struct heap_free_area *prev)
 {
 {
   if (prev)
   if (prev)
     prev->next = fa;
     prev->next = fa;
   else
   else
-    heap->free_areas = fa;
+    heap = fa;
   fa->prev = prev;
   fa->prev = prev;
 }
 }
 
 
@@ -192,7 +168,7 @@ __heap_link_free_area_after (struct heap *heap,
    PREV and NEXT may be 0; if PREV is 0, MEM is installed as the first
    PREV and NEXT may be 0; if PREV is 0, MEM is installed as the first
    free-area.  */
    free-area.  */
 static __inline__ struct heap_free_area *
 static __inline__ struct heap_free_area *
-__heap_add_free_area (struct heap *heap, void *mem, size_t size,
+__heap_add_free_area (struct heap_free_area *heap, void *mem, size_t size,
 		      struct heap_free_area *prev,
 		      struct heap_free_area *prev,
 		      struct heap_free_area *next)
 		      struct heap_free_area *next)
 {
 {
@@ -210,7 +186,7 @@ __heap_add_free_area (struct heap *heap, void *mem, size_t size,
 /* Allocate SIZE bytes from the front of the free-area FA in HEAP, and
 /* Allocate SIZE bytes from the front of the free-area FA in HEAP, and
    return the amount actually allocated (which may be more than SIZE).  */
    return the amount actually allocated (which may be more than SIZE).  */
 static __inline__ size_t
 static __inline__ size_t
-__heap_free_area_alloc (struct heap *heap,
+__heap_free_area_alloc (struct heap_free_area *heap,
 			struct heap_free_area *fa, size_t size)
 			struct heap_free_area *fa, size_t size)
 {
 {
   size_t fa_size = fa->size;
   size_t fa_size = fa->size;
@@ -234,16 +210,16 @@ __heap_free_area_alloc (struct heap *heap,
 /* Allocate and return a block at least *SIZE bytes long from HEAP.
 /* Allocate and return a block at least *SIZE bytes long from HEAP.
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    greater than requested).  */
    greater than requested).  */
-extern void *__heap_alloc (struct heap *heap, size_t *size);
+extern void *__heap_alloc (struct heap_free_area *heap, size_t *size);
 
 
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
    allocated, or 0 if we failed.  */
    allocated, or 0 if we failed.  */
-extern size_t __heap_alloc_at (struct heap *heap, void *mem, size_t size);
+extern size_t __heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size);
 
 
 /* Return the memory area MEM of size SIZE to HEAP.
 /* Return the memory area MEM of size SIZE to HEAP.
    Returns the heap free area into which the memory was placed.  */
    Returns the heap free area into which the memory was placed.  */
-extern struct heap_free_area *__heap_free (struct heap *heap,
+extern struct heap_free_area *__heap_free (struct heap_free_area *heap,
 					   void *mem, size_t size);
 					   void *mem, size_t size);
 
 
 /* Return true if HEAP contains absolutely no memory.  */
 /* Return true if HEAP contains absolutely no memory.  */
-#define __heap_is_empty(heap) (! (heap)->free_areas)
+#define __heap_is_empty(heap) (! (heap))

+ 2 - 2
libc/stdlib/malloc/heap_alloc.c

@@ -20,7 +20,7 @@
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    greater than requested).  */
    greater than requested).  */
 void *
 void *
-__heap_alloc (struct heap *heap, size_t *size)
+__heap_alloc (struct heap_free_area *heap, size_t *size)
 {
 {
   struct heap_free_area *fa;
   struct heap_free_area *fa;
   size_t _size = *size;
   size_t _size = *size;
@@ -36,7 +36,7 @@ __heap_alloc (struct heap *heap, size_t *size)
   HEAP_DEBUG (heap, "before __heap_alloc");
   HEAP_DEBUG (heap, "before __heap_alloc");
 
 
   /* Look for a free area that can contain _SIZE bytes.  */
   /* Look for a free area that can contain _SIZE bytes.  */
-  for (fa = heap->free_areas; fa; fa = fa->next)
+  for (fa = heap; fa; fa = fa->next)
     if (fa->size >= _size)
     if (fa->size >= _size)
       {
       {
 	/* Found one!  */
 	/* Found one!  */

+ 2 - 2
libc/stdlib/malloc/heap_alloc_at.c

@@ -19,7 +19,7 @@
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
    allocated, or 0 if we failed.  */
    allocated, or 0 if we failed.  */
 size_t
 size_t
-__heap_alloc_at (struct heap *heap, void *mem, size_t size)
+__heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size)
 {
 {
   struct heap_free_area *fa;
   struct heap_free_area *fa;
   size_t alloced = 0;
   size_t alloced = 0;
@@ -29,7 +29,7 @@ __heap_alloc_at (struct heap *heap, void *mem, size_t size)
   HEAP_DEBUG (heap, "before __heap_alloc_at");
   HEAP_DEBUG (heap, "before __heap_alloc_at");
 
 
   /* Look for a free area that can contain SIZE bytes.  */
   /* Look for a free area that can contain SIZE bytes.  */
-  for (fa = heap->free_areas; fa; fa = fa->next)
+  for (fa = heap; fa; fa = fa->next)
     {
     {
       void *fa_mem = HEAP_FREE_AREA_START (fa);
       void *fa_mem = HEAP_FREE_AREA_START (fa);
       if (fa_mem <= mem)
       if (fa_mem <= mem)

+ 2 - 2
libc/stdlib/malloc/heap_free.c

@@ -18,7 +18,7 @@
 
 
 /* Return the block of memory at MEM, of size SIZE, to HEAP.  */
 /* Return the block of memory at MEM, of size SIZE, to HEAP.  */
 struct heap_free_area *
 struct heap_free_area *
-__heap_free (struct heap *heap, void *mem, size_t size)
+__heap_free (struct heap_free_area *heap, void *mem, size_t size)
 {
 {
   struct heap_free_area *fa, *prev_fa;
   struct heap_free_area *fa, *prev_fa;
   void *end = (char *)mem + size;
   void *end = (char *)mem + size;
@@ -32,7 +32,7 @@ __heap_free (struct heap *heap, void *mem, size_t size)
      in the free-list when it becomes fragmented and long.  [A better
      in the free-list when it becomes fragmented and long.  [A better
      implemention would use a balanced tree or something for the free-list,
      implemention would use a balanced tree or something for the free-list,
      though that bloats the code-size and complexity quite a bit.]  */
      though that bloats the code-size and complexity quite a bit.]  */
-  for (prev_fa = 0, fa = heap->free_areas; fa; prev_fa = fa, fa = fa->next)
+  for (prev_fa = 0, fa = heap; fa; prev_fa = fa, fa = fa->next)
     if (unlikely (HEAP_FREE_AREA_END (fa) >= mem))
     if (unlikely (HEAP_FREE_AREA_END (fa) >= mem))
       break;
       break;
 
 

+ 11 - 9
libc/stdlib/malloc/malloc.c

@@ -26,7 +26,8 @@ libc_hidden_proto(sbrk)
 /* The malloc heap.  We provide a bit of initial static space so that
 /* The malloc heap.  We provide a bit of initial static space so that
    programs can do a little mallocing without mmaping in more space.  */
    programs can do a little mallocing without mmaping in more space.  */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
 HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
-struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER;
 
 
 #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
 #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
 /* A lock protecting our use of sbrk.  */
 /* A lock protecting our use of sbrk.  */
@@ -43,12 +44,13 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
    them from the main heap, but that tends to cause heap fragmentation in
    them from the main heap, but that tends to cause heap fragmentation in
    annoying ways.  */
    annoying ways.  */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
-struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 
 
 
 
 static void *
 static void *
-malloc_from_heap (size_t size, struct heap *heap)
+malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
 {
 {
   void *mem;
   void *mem;
 
 
@@ -57,12 +59,12 @@ malloc_from_heap (size_t size, struct heap *heap)
   /* Include extra space to record the size of the allocated block.  */
   /* Include extra space to record the size of the allocated block.  */
   size += MALLOC_HEADER_SIZE;
   size += MALLOC_HEADER_SIZE;
 
 
-  __heap_lock (heap);
+  __pthread_mutex_lock (heap_lock);
 
 
   /* First try to get memory that's already in our heap.  */
   /* First try to get memory that's already in our heap.  */
   mem = __heap_alloc (heap, &size);
   mem = __heap_alloc (heap, &size);
 
 
-  __heap_unlock (heap);
+  __pthread_mutex_unlock (heap_lock);
 
 
   if (unlikely (! mem))
   if (unlikely (! mem))
     /* We couldn't allocate from the heap, so grab some more
     /* We couldn't allocate from the heap, so grab some more
@@ -126,7 +128,7 @@ malloc_from_heap (size_t size, struct heap *heap)
 			(long)block, (long)block + block_size, block_size);
 			(long)block, (long)block + block_size, block_size);
 
 
 	  /* Get back the heap lock.  */
 	  /* Get back the heap lock.  */
-	  __heap_lock (heap);
+	  __pthread_mutex_lock (heap_lock);
 
 
 	  /* Put BLOCK into the heap.  */
 	  /* Put BLOCK into the heap.  */
 	  __heap_free (heap, block, block_size);
 	  __heap_free (heap, block, block_size);
@@ -136,7 +138,7 @@ malloc_from_heap (size_t size, struct heap *heap)
 	  /* Try again to allocate.  */
 	  /* Try again to allocate.  */
 	  mem = __heap_alloc (heap, &size);
 	  mem = __heap_alloc (heap, &size);
 
 
-	  __heap_unlock (heap);
+	  __pthread_mutex_unlock (heap_lock);
 
 
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 	  /* Insert a record of BLOCK in sorted order into the
 	  /* Insert a record of BLOCK in sorted order into the
@@ -148,7 +150,7 @@ malloc_from_heap (size_t size, struct heap *heap)
 	    if (block < mmb->mem)
 	    if (block < mmb->mem)
 	      break;
 	      break;
 
 
-	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+	  new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock);
 	  new_mmb->next = mmb;
 	  new_mmb->next = mmb;
 	  new_mmb->mem = block;
 	  new_mmb->mem = block;
 	  new_mmb->size = block_size;
 	  new_mmb->size = block_size;
@@ -207,7 +209,7 @@ malloc (size_t size)
   if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
   if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
     goto oom;
     goto oom;
 
 
-  mem = malloc_from_heap (size, &__malloc_heap);
+  mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock);
   if (unlikely (!mem))
   if (unlikely (!mem))
     {
     {
     oom:
     oom:

+ 5 - 1
libc/stdlib/malloc/malloc.h

@@ -221,4 +221,8 @@ extern void __malloc_debug_printf (int indent, const char *fmt, ...);
 
 
 
 
 /* The malloc heap.  */
 /* The malloc heap.  */
-extern struct heap __malloc_heap;
+extern struct heap_free_area *__malloc_heap;
+extern malloc_mutex_t __malloc_heap_lock;
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+extern malloc_mutex_t __malloc_mmb_heap_lock;
+#endif

+ 1 - 1
libc/stdlib/malloc/memalign.c

@@ -36,7 +36,7 @@ memalign (size_t alignment, size_t size)
 {
 {
   void *mem, *base;
   void *mem, *base;
   unsigned long tot_addr, tot_end_addr, addr, end_addr;
   unsigned long tot_addr, tot_end_addr, addr, end_addr;
-  struct heap *heap = &__malloc_heap;
+  struct heap_free_area *heap = __malloc_heap;
 
 
   /* Make SIZE something we like.  */
   /* Make SIZE something we like.  */
   size = HEAP_ADJUST_SIZE (size);
   size = HEAP_ADJUST_SIZE (size);

+ 6 - 6
libc/stdlib/malloc/realloc.c

@@ -59,9 +59,9 @@ realloc (void *mem, size_t new_size)
     {
     {
       size_t extra = new_size - size;
       size_t extra = new_size - size;
 
 
-      __heap_lock (&__malloc_heap);
-      extra = __heap_alloc_at (&__malloc_heap, base_mem + size, extra);
-      __heap_unlock (&__malloc_heap);
+      __pthread_mutex_lock (&__malloc_heap_lock);
+      extra = __heap_alloc_at (__malloc_heap, base_mem + size, extra);
+      __pthread_mutex_unlock (&__malloc_heap_lock);
 
 
       if (extra)
       if (extra)
 	/* Record the changed size.  */
 	/* Record the changed size.  */
@@ -82,9 +82,9 @@ realloc (void *mem, size_t new_size)
   else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
   else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
     /* Shrink the block.  */
     /* Shrink the block.  */
     {
     {
-      __heap_lock (&__malloc_heap);
-      __heap_free (&__malloc_heap, base_mem + new_size, size - new_size);
-      __heap_unlock (&__malloc_heap);
+      __pthread_mutex_lock (&__malloc_heap_lock);
+      __heap_free (__malloc_heap, base_mem + new_size, size - new_size);
+      __pthread_mutex_unlock (&__malloc_heap_lock);
       MALLOC_SET_SIZE (base_mem, new_size);
       MALLOC_SET_SIZE (base_mem, new_size);
     }
     }
 
 

+ 49 - 1
libpthread/linuxthreads.old/ptfork.c

@@ -20,6 +20,7 @@
 
 
 #ifdef __ARCH_USE_MMU__
 #ifdef __ARCH_USE_MMU__
 
 
+#include <bits/uClibc_mutex.h>
 #include <stddef.h>
 #include <stddef.h>
 #include <stdlib.h>
 #include <stdlib.h>
 #include <unistd.h>
 #include <unistd.h>
@@ -36,6 +37,16 @@ static struct handler_list * pthread_atfork_prepare = NULL;
 static struct handler_list * pthread_atfork_parent = NULL;
 static struct handler_list * pthread_atfork_parent = NULL;
 static struct handler_list * pthread_atfork_child = NULL;
 static struct handler_list * pthread_atfork_child = NULL;
 
 
+#ifdef __MALLOC__
+__UCLIBC_MUTEX_EXTERN(__malloc_heap_lock);
+__UCLIBC_MUTEX_EXTERN(__malloc_sbrk_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+__UCLIBC_MUTEX_EXTERN(__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+__UCLIBC_MUTEX_EXTERN(__malloc_lock);
+#endif
+
 static void pthread_insert_list(struct handler_list ** list,
 static void pthread_insert_list(struct handler_list ** list,
                                 void (*handler)(void),
                                 void (*handler)(void),
                                 struct handler_list * newlist,
                                 struct handler_list * newlist,
@@ -78,6 +89,10 @@ static __inline__ void pthread_call_handlers(struct handler_list * list)
   for (/*nothing*/; list != NULL; list = list->next) (list->handler)();
   for (/*nothing*/; list != NULL; list = list->next) (list->handler)();
 }
 }
 
 
+void __pthread_once_fork_prepare(void);
+void __pthread_once_fork_child(void);
+void __pthread_once_fork_parent(void);
+
 extern __typeof(fork) __libc_fork;
 extern __typeof(fork) __libc_fork;
 
 
 pid_t __fork(void) attribute_hidden;
 pid_t __fork(void) attribute_hidden;
@@ -90,14 +105,47 @@ pid_t __fork(void)
   prepare = pthread_atfork_prepare;
   prepare = pthread_atfork_prepare;
   child = pthread_atfork_child;
   child = pthread_atfork_child;
   parent = pthread_atfork_parent;
   parent = pthread_atfork_parent;
-  __pthread_mutex_unlock(&pthread_atfork_lock);
   pthread_call_handlers(prepare);
   pthread_call_handlers(prepare);
+
+  __pthread_once_fork_prepare();
+#ifdef __MALLOC__
+  __pthread_mutex_lock(&__malloc_sbrk_lock);
+  __pthread_mutex_lock(&__malloc_heap_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+  __pthread_mutex_lock(&__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+  __pthread_mutex_lock(&__malloc_lock);
+#endif
+
   pid = __libc_fork();
   pid = __libc_fork();
   if (pid == 0) {
   if (pid == 0) {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+    __libc_lock_init_recursive(__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+    __libc_lock_init_adaptive(__malloc_mmb_heap_lock);
+#endif
+    __libc_lock_init_adaptive(__malloc_heap_lock);
+    __libc_lock_init(__malloc_sbrk_lock);
+#endif
+    __libc_lock_init_adaptive(pthread_atfork_lock);
     __pthread_reset_main_thread();
     __pthread_reset_main_thread();
     __fresetlockfiles();
     __fresetlockfiles();
+    __pthread_once_fork_child();
     pthread_call_handlers(child);
     pthread_call_handlers(child);
   } else {
   } else {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+    __pthread_mutex_unlock(&__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+    __pthread_mutex_unlock(&__malloc_mmb_heap_lock);
+#endif
+    __pthread_mutex_unlock(&__malloc_heap_lock);
+    __pthread_mutex_unlock(&__malloc_sbrk_lock);
+#endif
+    __pthread_mutex_unlock(&pthread_atfork_lock);
+    __pthread_once_fork_parent();
     pthread_call_handlers(parent);
     pthread_call_handlers(parent);
   }
   }
   return pid;
   return pid;

+ 31 - 7
libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h

@@ -30,7 +30,7 @@
 /* Mutex type.  */
 /* Mutex type.  */
 #if defined(_LIBC) || defined(_IO_MTSAFE_IO)
 #if defined(_LIBC) || defined(_IO_MTSAFE_IO)
 typedef pthread_mutex_t __libc_lock_t;
 typedef pthread_mutex_t __libc_lock_t;
-typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
+typedef pthread_mutex_t __libc_lock_recursive_t;
 # ifdef __USE_UNIX98
 # ifdef __USE_UNIX98
 typedef pthread_rwlock_t __libc_rwlock_t;
 typedef pthread_rwlock_t __libc_rwlock_t;
 # else
 # else
@@ -132,15 +132,39 @@ typedef pthread_key_t __libc_key_t;
 #define __libc_rwlock_init(NAME) \
 #define __libc_rwlock_init(NAME) \
   (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
   (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
 
 
+/* Same as last but this time we initialize an adaptive mutex.  */
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
+#define __libc_lock_init_adaptive(NAME) \
+  ({									      \
+    (NAME).__m_count = 0;						      \
+    (NAME).__m_owner = NULL;						      \
+    (NAME).__m_kind = PTHREAD_MUTEX_ADAPTIVE_NP;			      \
+    (NAME).__m_lock.__status = 0;					      \
+    (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT;			      \
+    0; })
+#else
+#define __libc_lock_init_adaptive(NAME) \
+  do {									      \
+    if (__pthread_mutex_init != NULL)					      \
+      {									      \
+	pthread_mutexattr_t __attr;					      \
+	__pthread_mutexattr_init (&__attr);				      \
+	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_ADAPTIVE_NP);     \
+	__pthread_mutex_init (&(NAME), &__attr);			      \
+	__pthread_mutexattr_destroy (&__attr);				      \
+      }									      \
+  } while (0);
+#endif
+
 /* Same as last but this time we initialize a recursive mutex.  */
 /* Same as last but this time we initialize a recursive mutex.  */
 #if defined _LIBC && !defined NOT_IN_libc && defined SHARED
 #if defined _LIBC && !defined NOT_IN_libc && defined SHARED
 #define __libc_lock_init_recursive(NAME) \
 #define __libc_lock_init_recursive(NAME) \
   ({									      \
   ({									      \
-    (NAME).mutex.__m_count = 0;						      \
-    (NAME).mutex.__m_owner = NULL;					      \
-    (NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP;			      \
-    (NAME).mutex.__m_lock.__status = 0;					      \
-    (NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT;		      \
+    (NAME).__m_count = 0;						      \
+    (NAME).__m_owner = NULL;					      \
+    (NAME).__m_kind = PTHREAD_MUTEX_RECURSIVE_NP;			      \
+    (NAME).__m_lock.__status = 0;					      \
+    (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT;		      \
     0; })
     0; })
 #else
 #else
 #define __libc_lock_init_recursive(NAME) \
 #define __libc_lock_init_recursive(NAME) \
@@ -150,7 +174,7 @@ typedef pthread_key_t __libc_key_t;
 	pthread_mutexattr_t __attr;					      \
 	pthread_mutexattr_t __attr;					      \
 	__pthread_mutexattr_init (&__attr);				      \
 	__pthread_mutexattr_init (&__attr);				      \
 	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
 	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
-	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
+	__pthread_mutex_init (&(NAME), &__attr);			      \
 	__pthread_mutexattr_destroy (&__attr);				      \
 	__pthread_mutexattr_destroy (&__attr);				      \
       }									      \
       }									      \
   } while (0);
   } while (0);