Browse Source

* Add support for uClinux's broken munmap, contingent on
__UCLIBC_UCLINUX_BROKEN_MUNMAP__ (which is currently not defined anywhere).
This makes other cases a tiny bit less efficient too.
* Move the malloc lock into the heap structure (locking is still done
at the malloc level though, not by the heap functions).
* Initialize the malloc heap to contain a tiny initial static free-area so
that programs that only do a very little allocation won't ever call mmap.

Miles Bader 21 years ago
parent
commit
306eedf9ae
4 changed files with 264 additions and 29 deletions
  1. 110 10
      libc/stdlib/malloc/free.c
  2. 45 1
      libc/stdlib/malloc/heap.h
  3. 68 18
      libc/stdlib/malloc/malloc.c
  4. 41 0
      libc/stdlib/malloc/malloc.h

+ 110 - 10
libc/stdlib/malloc/free.c

@@ -19,12 +19,11 @@
 #include "heap.h"
 
 
-void
-free (void *mem)
+static void
+free_to_heap (void *mem, struct heap *heap)
 {
   size_t size;
   struct heap_free_area *fa;
-  struct heap *heap = &__malloc_heap;
 
   /* Check for special cases.  */
   if (unlikely (! mem))
@@ -38,7 +37,7 @@ free (void *mem)
   size = MALLOC_SIZE (mem);
   mem = MALLOC_BASE (mem);
 
-  __malloc_lock ();
+  __heap_lock (heap);
 
   /* Put MEM back in the heap, and get the free-area it was placed in.  */
   fa = __heap_free (heap, mem, size);
@@ -47,15 +46,20 @@ free (void *mem)
      unmapped.  */
   if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
     /* Nope, nothing left to do, just release the lock.  */
-    __malloc_unlock ();
+    __heap_unlock (heap);
   else
     /* Yup, try to unmap FA.  */
     {
       unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa);
       unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa);
 #ifndef MALLOC_USE_SBRK
+# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+      struct malloc_mmb *mmb, *prev_mmb;
+      unsigned long mmb_start, mmb_end;
+# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
       unsigned long unmap_start, unmap_end;
-#endif
+# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+#endif /* !MALLOC_USE_SBRK */
 
 #ifdef MALLOC_USE_SBRK
       /* Get the sbrk lock so that the two possible calls to sbrk below
@@ -75,7 +79,7 @@ free (void *mem)
 	  MALLOC_DEBUG ("  not unmapping: 0x%lx - 0x%lx (%ld bytes)\n",
 			start, end, end - start);
 	  __malloc_unlock_sbrk ();
-	  __malloc_unlock ();
+	  __heap_unlock (heap);
 	  return;
 	}
 #endif
@@ -102,7 +106,7 @@ free (void *mem)
 #ifdef MALLOC_USE_SBRK
 
       /* Release the main lock; we're still holding the sbrk lock.  */
-      __malloc_unlock ();
+      __heap_unlock (heap);
       /* Lower the brk.  */
       sbrk (start - end);
       /* Release the sbrk lock too; now we hold no locks.  */
@@ -110,6 +114,94 @@ free (void *mem)
 
 #else /* !MALLOC_USE_SBRK */
 
+# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+      /* Using the uClinux broken munmap, we have to only munmap blocks
+	 exactly as we got them from mmap, so scan through our list of
+	 mmapped blocks, and return them in order.  */
+
+      MALLOC_MMB_DEBUG ("  walking mmb list for region 0x%x[%d]...\n", start, end - start);
+
+      prev_mmb = 0;
+      mmb = __malloc_mmapped_blocks;
+      while (mmb
+	     && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
+		 <= end))
+	{
+	  MALLOC_MMB_DEBUG ("    considering mmb at 0x%x: 0x%x[%d]\n",
+			    (unsigned)mmb, mmb_start, mmb_end - mmb_start);
+
+	  if (mmb_start >= start
+	      /* If the space between START and MMB_START is non-zero, but
+		 too small to return to the heap, we can't unmap MMB.  */
+	      && (start == mmb_start
+		  || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
+	    {
+	      struct malloc_mmb *next_mmb = mmb->next;
+
+	      if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
+		/* There's too little space left at the end to deallocate
+		   this block, so give up.  */
+		break;
+
+	      MALLOC_MMB_DEBUG ("      unmapping mmb at 0x%x: 0x%x[%d]\n",
+				(unsigned)mmb, mmb_start, mmb_end - mmb_start);
+
+	      if (mmb_start != start)
+		/* We're going to unmap a part of the heap that begins after
+		   start, so put the intervening region back into the heap.  */
+		{
+		  MALLOC_MMB_DEBUG ("        putting intervening region back into heap: 0x%x[%d]\n",
+				    start, mmb_start - start);
+		  __heap_free (heap, (void *)start, mmb_start - start);
+		}
+
+	      /* Unlink MMB from the list.  */
+	      if (prev_mmb)
+		prev_mmb->next = next_mmb;
+	      else
+		__malloc_mmapped_blocks = next_mmb;
+
+	      /* Release the descriptor block we used.  */
+	      free_to_heap (mmb, &__malloc_mmb_heap);
+
+	      /* Do the actual munmap.  */
+	      __heap_unlock (heap);
+	      munmap ((void *)mmb_start, mmb_end - mmb_start);
+	      __heap_lock (heap);
+
+	      /* Start searching again from the end of that block.  */
+	      start = mmb_end;
+
+#  ifdef __UCLIBC_HAS_THREADS__
+	      /* In a multi-threaded program, it's possible that PREV_MMB has
+		 been invalidated by another thread when we released the
+		 heap lock to do the munmap system call, so just start over
+		 from the beginning of the list.  It sucks, but oh well;
+		 it's probably not worth the bother to do better.  */
+	      prev_mmb = 0;
+	      mmb = __malloc_mmapped_blocks;
+#  else
+	      mmb = next_mmb;
+#  endif
+	    }
+	  else
+	    {
+	      prev_mmb = mmb;
+	      mmb = mmb->next;
+	    }
+	}
+
+      if (start != end)
+	/* Hmm, well there's something we couldn't unmap, so put it back
+	   into the heap.  */
+	{
+	  MALLOC_MMB_DEBUG ("    putting tail region back into heap: 0x%x[%d]\n",
+			    start, end - start);
+	  __heap_free (heap, (void *)start, end - start);
+	}
+
+# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
       /* MEM/LEN may not be page-aligned, so we have to page-align them,
 	 and return any left-over bits on the end to the heap.  */
       unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
@@ -133,13 +225,21 @@ free (void *mem)
 	  __heap_free (heap, (void *)unmap_end, end - unmap_end);
 	}
 
-      /* Release the malloc lock before we do the system call.  */
-      __malloc_unlock ();
+      /* Release the heap lock before we do the system call.  */
+      __heap_unlock (heap);
 
       if (unmap_end > unmap_start)
 	/* Finally, actually unmap the memory.  */
 	munmap ((void *)unmap_start, unmap_end - unmap_start);
 
+# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
 #endif /* MALLOC_USE_SBRK */
     }
 }
+
+void
+free (void *mem)
+{
+  free_to_heap (mem, &__malloc_heap);
+}

+ 45 - 1
libc/stdlib/malloc/heap.h

@@ -14,6 +14,13 @@
 #include <features.h>
 
 
+/* On multi-threaded systems, the heap includes a lock.  */
+#ifdef __UCLIBC_HAS_THREADS__
+# include <pthread.h>
+# define HEAP_USE_LOCKING
+#endif
+
+
 /* The heap allocates in multiples of, and aligned to, HEAP_GRANULARITY.
    HEAP_GRANULARITY must be a power of 2.  Malloc depends on this being the
    same as MALLOC_ALIGNMENT.  */
@@ -26,9 +33,26 @@ struct heap
 {
   /* A list of memory in the heap available for allocation.  */
   struct heap_free_area *free_areas;
+
+#ifdef HEAP_USE_LOCKING
+  /* A lock that can be used by callers to control access to the heap.
+     The heap code _does not_ use this lock, it's merely here for the
+     convenience of users!  */
+  extern heap_mutex_t lock;
+#endif
 };
-#define HEAP_INIT 	{ 0 }
 
+/* The HEAP_INIT macro can be used as a static initializer for a heap
+   variable.  The HEAP_INIT_WITH_FA variant is used to initialize a heap
+   with an initial static free-area; its argument FA should be declared
+   using HEAP_DECLARE_STATIC_FREE_AREA.  */
+#ifdef HEAP_USE_LOCKING
+# define HEAP_INIT 		{ 0, PTHREAD_MUTEX_INITIALIZER }
+# define HEAP_INIT_WITH_FA(fa)	{ &fa._fa, PTHREAD_MUTEX_INITIALIZER }
+#else
+# define HEAP_INIT 		{ 0 }
+# define HEAP_INIT_WITH_FA(fa) 	{ &fa._fa }
+#endif
 
 /* A free-list area `header'.  These are actually stored at the _ends_ of
    free areas (to make allocating from the beginning of the area simpler),
@@ -47,6 +71,16 @@ struct heap_free_area
 /* Return the size of the frea area FA.  */
 #define HEAP_FREE_AREA_SIZE(fa) ((fa)->size)
 
+/* This rather clumsy macro allows one to declare a static free-area for
+   passing to HEAP_INIT_WITH_FA initializer macro.  This is only use for
+   which NAME is allowed.  */
+#define HEAP_DECLARE_STATIC_FREE_AREA(name, size)			\
+  static struct								\
+  {									\
+    char space[(size) - sizeof (struct heap_free_area)];		\
+    struct heap_free_area _fa;						\
+  } name = { "", { (size), 0, 0 } }
+
 
 /* Rounds SZ up to be a multiple of HEAP_GRANULARITY.  */
 #define HEAP_ADJUST_SIZE(sz)  \
@@ -97,6 +131,16 @@ extern void __heap_dump (struct heap *heap, const char *str);
 extern void __heap_check (struct heap *heap, const char *str);
 
 
+#ifdef HEAP_USE_LOCKING
+# define __heap_lock(heap)	pthread_mutex_lock (&(heap)->lock)
+# define __heap_unlock(heap)	pthread_mutex_unlock (&(heap)->lock)
+#else /* !__UCLIBC_HAS_THREADS__ */
+/* Without threads, mutex operations are a nop.  */
+# define __heap_lock(heap)	(void)0
+# define __heap_unlock(heap)	(void)0
+#endif /* HEAP_USE_LOCKING */
+
+
 /* Delete the free-area FA from HEAP.  */
 extern inline void
 __heap_delete (struct heap *heap, struct heap_free_area *fa)

+ 68 - 18
libc/stdlib/malloc/malloc.c

@@ -19,35 +19,49 @@
 #include "heap.h"
 
 
-/* The malloc heap.  */
-struct heap __malloc_heap = HEAP_INIT;
+/* The malloc heap.  We provide a bit of initial static space so that
+   programs can do a little mallocing without mmaping in more space.  */
+HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
+struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
 
-#ifdef MALLOC_USE_LOCKING
-/* A lock protecting the malloc heap.  */
-malloc_mutex_t __malloc_lock;
-# ifdef MALLOC_USE_SBRK
+#if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
 /* A lock protecting our use of sbrk.  */
 malloc_mutex_t __malloc_sbrk_lock;
-# endif /* MALLOC_USE_SBRK */
-#endif /* MALLOC_USE_LOCKING */
+#endif /* MALLOC_USE_LOCKING && MALLOC_USE_SBRK */
 
 #ifdef MALLOC_DEBUGGING
 int __malloc_debug = 0;
 #endif
 
 
-void *
-malloc (size_t size)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+/* A list of all malloc_mmb structures describing blocsk that
+   malloc has mmapped, ordered by the block address.  */
+struct malloc_mmb *__malloc_mmapped_blocks = 0;
+
+/* A heap used for allocating malloc_mmb structures.  We could allocate
+   them from the main heap, but that tends to cause heap fragmentation in
+   annoying ways.  */
+HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
+struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+
+# ifdef MALLOC_MMB_DEBUGGING
+int __malloc_mmb_debug = 0;
+# endif
+#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
+
+static void *
+malloc_from_heap (size_t size, struct heap *heap)
 {
   void *mem;
-  struct heap *heap = &__malloc_heap;
 
   MALLOC_DEBUG ("malloc: %d bytes\n", size);
 
   /* Include extra space to record the size of the allocated block.  */
   size += MALLOC_HEADER_SIZE;
 
-  __malloc_lock ();
+  __heap_lock (heap);
 
   /* First try to get memory that's already in our heap.  */
   mem = __heap_alloc (heap, &size);
@@ -65,14 +79,14 @@ malloc (size_t size)
 	   : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
 
 #ifdef MALLOC_USE_SBRK
-      /* Get the sbrk lock while we've still got the main lock.  */
+      /* Get the sbrk lock while we've still got the heap lock.  */
       __malloc_lock_sbrk ();
 #endif
 
-      /* Don't hold the main lock during the syscall, so that small
+      /* Don't hold the heap lock during the syscall, so that small
 	 allocations in a different thread may succeed while we're
 	 blocked.  */
-      __malloc_unlock ();
+      __heap_unlock (heap);
 
       /* Allocate the new heap block.  */
 #ifdef MALLOC_USE_SBRK
@@ -106,23 +120,53 @@ malloc (size_t size)
 
 #endif /* MALLOC_USE_SBRK */
 
-      /* Get back the main lock.  */
-      __malloc_lock ();
+      /* Get back the heap lock.  */
+      __heap_lock (heap);
 
       if (likely (block != (void *)-1))
 	{
+#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
+	  struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
+#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
 	  MALLOC_DEBUG ("  adding memory: 0x%lx - 0x%lx (%d bytes)\n",
 			(long)block, (long)block + block_size, block_size);
 
 	  /* Put BLOCK into the heap.  */
 	  __heap_free (heap, block, block_size);
 
+#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
+	  /* Insert a record of this allocation in sorted order into the
+	     __malloc_mmapped_blocks list.  */
+
+	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
+	       mmb;
+	       prev_mmb = mmb, mmb = mmb->next)
+	    if (block < mmb->mem)
+	      break;
+
+	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+	  new_mmb->next = mmb;
+	  new_mmb->mem = block;
+	  new_mmb->size = block_size;
+
+	  MALLOC_MMB_DEBUG ("  new mmb at 0x%x: 0x%x[%d]\n",
+			    (unsigned)new_mmb,
+			    (unsigned)new_mmb->mem, block_size);
+
+	  if (prev_mmb)
+	    prev_mmb->next = new_mmb;
+	  else
+	    __malloc_mmapped_blocks = new_mmb;
+
+#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
 	  /* Try again to allocate.  */
 	  mem = __heap_alloc (heap, &size);
 	}
     }
 
-  __malloc_unlock ();
+  __heap_unlock (heap);
 
   if (likely (mem))
     /* Record the size of the block and get the user address.  */
@@ -135,3 +179,9 @@ malloc (size_t size)
 
   return mem;
 }
+
+void *
+malloc (size_t size)
+{
+  return malloc_from_heap (size, &__malloc_heap);
+}

+ 41 - 0
libc/stdlib/malloc/malloc.h

@@ -48,6 +48,47 @@
 #endif
 
 
+/* The current implementation of munmap in uClinux doesn't work correctly:
+   it requires that ever call to munmap exactly match a corresponding call
+   to mmap (that is, it doesn't allow you to unmap only part of a
+   previously allocated block, or to unmap two contiguous blocks with a
+   single call to munmap).  This behavior is broken, and uClinux should be
+   fixed; however, until it is, we add code to work around the problem in
+   malloc.  */
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+
+/* A structure recording a block of memory mmapped by malloc.  */
+struct malloc_mmb
+{
+  void *mem;			/* the mmapped block */
+  size_t size;			/* its size */
+  struct malloc_mmb *next;
+};
+
+/* A list of all malloc_mmb structures describing blocsk that malloc has
+   mmapped, ordered by the block address.  */
+extern struct malloc_mmb *__malloc_mmapped_blocks;
+
+/* A heap used for allocating malloc_mmb structures.  We could allocate
+   them from the main heap, but that tends to cause heap fragmentation in
+   annoying ways.  */
+extern struct heap __malloc_mmb_heap;
+
+/* Define MALLOC_MMB_DEBUGGING to cause malloc to emit debugging info about
+   about mmap block allocation/freeing by the `uclinux broken munmap' code
+   to stderr, when the variable __malloc_mmb_debug is set to true. */
+#ifdef MALLOC_MMB_DEBUGGING
+#include <stdio.h>
+extern int __malloc_mmb_debug;
+#define MALLOC_MMB_DEBUG(fmt, args...) \
+  (__malloc_mmb_debug ? fprintf (stderr, fmt , ##args) : 0)
+#else /* !MALLOC_MMB_DEBUGGING */
+#define MALLOC_MMB_DEBUG(fmt, args...) (void)0
+#endif /* MALLOC_MMB_DEBUGGING */
+
+#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
+
 /* The size of a malloc allocation is stored in a size_t word
    MALLOC_ALIGNMENT bytes prior to the start address of the allocation: