浏览代码

Rework, reduce the size, add proper locking
-Erik

Eric Andersen 23 年之前
父节点
当前提交
f1952f0996

+ 2 - 2
libc/stdlib/malloc-930716/Makefile

@@ -24,11 +24,11 @@
 TOPDIR=../../../
 include $(TOPDIR)Rules.mak
 
-CSRC=calloc.c free.c malloc.c memalign.c morecore.c realloc.c valloc.c
+CSRC=calloc.c malloc.c
 COBJS=$(patsubst %.c,%.o, $(CSRC))
 
 MSRC=../malloc/alloc.c
-MOBJ=malloc_dbg.o free_dbg.o calloc_dbg.o
+MOBJ=#malloc_dbg.o free_dbg.o calloc_dbg.o
 OBJS=$(COBJS) $(MOBJ)
 
 all: $(OBJS) $(LIBC)

+ 2 - 3
libc/stdlib/malloc-930716/calloc.c

@@ -8,13 +8,12 @@
    WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
    SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
 
+#include <stdlib.h>
 #include <string.h>
-#include "malloc.h"
 
 /* Allocate space for the given number of elements of the given
    size, initializing the whole region to binary zeroes. */
-void *
-calloc(size_t nelem, size_t size)
+void * calloc(size_t nelem, size_t size)
 {
     void *result;
 

+ 0 - 156
libc/stdlib/malloc-930716/free.c

@@ -1,156 +0,0 @@
-/* free.c - C standard library routine.
-   Copyright (c) 1989, 1993  Michael J. Haertel
-   You may redistribute this library under the terms of the
-   GNU Library General Public License (version 2 or any later
-   version) as published by the Free Software Foundation.
-   THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED
-   WARRANTY.  IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR
-   WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
-   SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include "malloc.h"
-
-/* Return memory to the heap. */
-void
-_free_internal (void *ptr)
-{
-    int block, blocks, i, type;
-    struct list *prev, *next;
-
-    if (!ptr)
-	return;
-
-    block = BLOCK(ptr);
-
-    switch (type = _heapinfo[block].busy.type) {
-    case 0:
-	/* Find the free cluster previous to this one in the free list.
-	   Start searching at the last block referenced; this may benefit
-	   programs with locality of allocation. */
-	i = _heapindex;
-	if (i > block)
-	    while (i > block)
-		i = _heapinfo[i].free.prev;
-	else {
-	    do
-		i = _heapinfo[i].free.next;
-	    while (i > 0 && i < block);
-	    i = _heapinfo[i].free.prev;
-	}
-
-	/* Determine how to link this block into the free list. */
-	if (block == i + _heapinfo[i].free.size) {
-	    /* Coalesce this block with its predecessor. */
-	    _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
-	    block = i;
-	} else {
-	    /* Really link this block back into the free list. */
-	    _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
-	    _heapinfo[block].free.next = _heapinfo[i].free.next;
-	    _heapinfo[block].free.prev = i;
-	    _heapinfo[i].free.next = block;
-	    _heapinfo[_heapinfo[block].free.next].free.prev = block;
-	}
-
-	/* Now that the block is linked in, see if we can coalesce it
-	   with its successor (by deleting its successor from the list
-	   and adding in its size). */
-	if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) {
-	    _heapinfo[block].free.size
-		+= _heapinfo[_heapinfo[block].free.next].free.size;
-	    _heapinfo[block].free.next
-		= _heapinfo[_heapinfo[block].free.next].free.next;
-	    _heapinfo[_heapinfo[block].free.next].free.prev = block;
-	}
-
-	/* Now see if we can return stuff to the system. */
-	blocks = _heapinfo[block].free.size;
-	if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
-	    && (*__morecore)(0) == ADDRESS(block + blocks)) {
-	    _heaplimit -= blocks;
-	    (*__morecore)(-blocks * BLOCKSIZE);
-	    _heapinfo[_heapinfo[block].free.prev].free.next
-		= _heapinfo[block].free.next;
-	    _heapinfo[_heapinfo[block].free.next].free.prev
-		= _heapinfo[block].free.prev;
-	    block = _heapinfo[block].free.prev;
-	}
-
-	/* Set the next search to begin at this block. */
-	_heapindex = block;
-	break;
-
-    default:
-	/* Get the address of the first free fragment in this block. */
-	prev = (struct list *) ((char *) ADDRESS(block)
-				+ (_heapinfo[block].busy.info.frag.first
-				   << type));
-
-	if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1
-	&& _fragblocks[type] > 1) {
-	    /* If all fragments of this block are free, remove them
-	       from the fragment list and free the whole block. */
-	    --_fragblocks[type];
-	    for (next = prev, i = 1; i < BLOCKSIZE >> type; ++i)
-		next = next->next;
-	    prev->prev->next = next;
-	    if (next)
-		next->prev = prev->prev;
-	    _heapinfo[block].busy.type = 0;
-	    _heapinfo[block].busy.info.size = 1;
-	    free(ADDRESS(block));
-	} else if (_heapinfo[block].busy.info.frag.nfree) {
-	    /* If some fragments of this block are free, link this fragment
-	       into the fragment list after the first free fragment of
-	       this block. */
-	    next = ptr;
-	    next->next = prev->next;
-	    next->prev = prev;
-	    prev->next = next;
-	    if (next->next)
-		next->next->prev = next;
-	    ++_heapinfo[block].busy.info.frag.nfree;
-	} else {
-	    /* No fragments of this block are free, so link this fragment
-	       into the fragment list and announce that it is the first
-	       free fragment of this block. */
-	    prev = (struct list *) ptr;
-	    _heapinfo[block].busy.info.frag.nfree = 1;
-	    _heapinfo[block].busy.info.frag.first
-		= (unsigned int) ((char *) ptr - (char *) NULL) % BLOCKSIZE
-		  >> type;
-	    prev->next = _fraghead[type].next;
-	    prev->prev = &_fraghead[type];
-	    prev->prev->next = prev;
-	    if (prev->next)
-		prev->next->prev = prev;
-	}
-	break;
-    }
-}
-
-struct alignlist *_aligned_blocks = NULL;
-
-void
-free (void *ptr)
-{
-  register struct alignlist *l;
-  
-  if (ptr == NULL)
-    return;
-	 
-  for (l = _aligned_blocks; l != NULL; l = l->next)
-  {
-    if (l->aligned == ptr)
-    {
-      l->aligned = NULL;	/* Mark the slot in the list as free.  */
-      ptr = l->exact;
-      break;
-    }
-  }
-
-  _free_internal (ptr);
-}

+ 329 - 32
libc/stdlib/malloc-930716/malloc.c

@@ -8,42 +8,69 @@
    WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
    SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
 
+#define _GNU_SOURCE
+#include <features.h>
 #include <limits.h>
 #include <stddef.h>
 #include <stdlib.h>
 #include <string.h>
+#include <unistd.h>
 #include "malloc.h"
 
+#define MIN(x,y) ({ \
+	const typeof(x) _x = (x);       \
+	const typeof(y) _y = (y);       \
+	(void) (&_x == &_y);            \
+	_x < _y ? _x : _y; })
+
+
+#ifdef __UCLIBC_HAS_THREADS__
+#include <pthread.h>
+static pthread_mutex_t malloclock = PTHREAD_MUTEX_INITIALIZER;
+# define LOCK	pthread_mutex_lock(&malloclock)
+# define UNLOCK	pthread_mutex_unlock(&malloclock);
+#else
+# define LOCK
+# define UNLOCK
+#endif
+    
+static void * malloc_unlocked (size_t size);
+static void free_unlocked(void *ptr);
+static void * __default_morecore_init(long size);
+
 /* How to really get more memory. */
-void *(*__morecore)(long) = __default_morecore_init;
+static void *(*__morecore)(long) = __default_morecore_init;
 
 /* Pointer to the base of the first block. */
-char *_heapbase;
+static char *_heapbase;
 
 /* Block information table. */
-union info *_heapinfo;
+static union info *_heapinfo;
 
 /* Number of info entries. */
-static int heapsize;
+static size_t heapsize;
 
 /* Search index in the info table. */
-int _heapindex;
+static size_t _heapindex;
 
 /* Limit of valid info table indices. */
-int _heaplimit;
+static size_t _heaplimit;
 
 /* Count of large blocks allocated for each fragment size. */
-int _fragblocks[BLOCKLOG];
+static size_t _fragblocks[BLOCKLOG];
 
 /* Free lists for each fragment size. */
-struct list _fraghead[BLOCKLOG];
+static struct list _fraghead[BLOCKLOG];
 
 /* Are we experienced? */
 static int initialized;
 
-/* Aligned allocation. */
-static void *
-align(size_t size)
+
+
+/* Aligned allocation.
+ * Called within the lock in initialize() and morecore(), 
+ * so no explicit locking needed... */
+static void * align(size_t size)
 {
     void *result;
     unsigned int adj;
@@ -57,14 +84,16 @@ align(size_t size)
     return result;
 }
 
-/* Set everything up and remember that we have. */
-static int
-initialize(void)
+/* Set everything up and remember that we have. 
+ * Called within the lock in malloc(), so no
+ * explicit locking needed... */
+static int initialize(void)
 {
     heapsize = HEAP / BLOCKSIZE;
     _heapinfo = align(heapsize * sizeof (union info));
-    if (!_heapinfo)
+    if (!_heapinfo) {
 	return 0;
+    }
     memset(_heapinfo, 0, heapsize * sizeof (union info));
     _heapinfo[0].free.size = 0;
     _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
@@ -74,14 +103,15 @@ initialize(void)
     return 1;
 }
 
-/* Get neatly aligned memory, initializing or growing the
-   heap info table as necessary. */
-static void *
-morecore(size_t size)
+/* Get neatly aligned memory, initializing or growing the 
+ * heap info table as necessary. 
+ * Called within a lock in malloc() and free(), 
+ * so no explicit locking needed... */
+static void * morecore(size_t size)
 {
     void *result;
     union info *newinfo, *oldinfo;
-    int newsize;
+    size_t newsize;
 
     result = align(size);
     if (!result)
@@ -104,11 +134,7 @@ morecore(size_t size)
 	newinfo[BLOCK(oldinfo)].busy.info.size
 	    = BLOCKIFY(heapsize * sizeof (union info));
 	_heapinfo = newinfo;
-#if 0
-	free(oldinfo);
-#else
-	_free_internal (oldinfo);
-#endif
+	free_unlocked(oldinfo);
 	heapsize = newsize;
     }
 
@@ -116,16 +142,42 @@ morecore(size_t size)
     return result;
 }
 
+/* Note that morecore has to take a signed argument so
+   that negative values can return memory to the system. */
+static void * __default_morecore_init(long size)
+{
+    void *result;
+
+    result = sbrk(size);
+    if (result == (void *) -1)
+	return NULL;
+    return result;
+}
+
 /* Allocate memory from the heap. */
-void *
-malloc (size_t size)
+void * malloc (size_t size)
+{
+    void * ptr;
+    LOCK;
+    ptr = malloc_unlocked(size);
+    UNLOCK;
+    return(ptr);
+}
+
+static void * malloc_unlocked (size_t size)
 {
     void *result;
-    int log, block, blocks, i, lastblocks, start;
+    size_t log, block, blocks, i, lastblocks, start;
     struct list *next;
 
-    if (!initialized && !initialize())
+#if 1
+    /* Some programs will call malloc (0).  Lets be strict and return NULL */
+    if (size == 0)
 	return NULL;
+#endif
+
+    if (size < sizeof (struct list))
+	size = sizeof (struct list);
 
 #if 1
     /* Some programs will call malloc (0).  Lets be strict and return NULL */
@@ -136,6 +188,10 @@ malloc (size_t size)
     if (size < sizeof (struct list))
 	size = sizeof (struct list);
 
+    if (!initialized && !initialize()) {
+	return NULL;
+    }
+
     /* Determine the allocation policy based on the request size. */
     if (size <= BLOCKSIZE / 2) {
 	/* Small allocation to receive a fragment of a block. Determine
@@ -162,9 +218,10 @@ malloc (size_t size)
 	} else {
 	    /* No free fragments of the desired size, so get a new block
 	       and break it into fragments, returning the first. */
-	    result = malloc(BLOCKSIZE);
-	    if (!result)
+	    result = malloc_unlocked(BLOCKSIZE);
+	    if (!result) {
 		return NULL;
+	    }
 	    ++_fragblocks[log];
 
 	    /* Link all fragments but the first into the free list. */
@@ -213,8 +270,9 @@ malloc (size_t size)
 		    continue;
 		}
 		result = morecore(blocks * BLOCKSIZE);
-		if (!result)
+		if (!result) {
 		    return NULL;
+		}
 		block = BLOCK(result);
 		_heapinfo[block].busy.type = 0;
 		_heapinfo[block].busy.info.size = blocks;
@@ -252,3 +310,242 @@ malloc (size_t size)
 
     return result;
 }
+
+
+
+/* Return memory to the heap. */
+void free(void *ptr)
+{
+    LOCK;
+    free_unlocked(ptr);
+    UNLOCK;
+}
+
+static void free_unlocked(void *ptr)
+{
+    int block, blocks, i, type;
+    struct list *prev, *next;
+
+    if (ptr == NULL)
+	return;
+
+    block = BLOCK(ptr);
+
+    switch (type = _heapinfo[block].busy.type) {
+	case 0:
+	    /* Find the free cluster previous to this one in the free list.
+	       Start searching at the last block referenced; this may benefit
+	       programs with locality of allocation. */
+	    i = _heapindex;
+	    if (i > block)
+		while (i > block)
+		    i = _heapinfo[i].free.prev;
+	    else {
+		do
+		    i = _heapinfo[i].free.next;
+		while (i > 0 && i < block);
+		i = _heapinfo[i].free.prev;
+	    }
+
+	    /* Determine how to link this block into the free list. */
+	    if (block == i + _heapinfo[i].free.size) {
+		/* Coalesce this block with its predecessor. */
+		_heapinfo[i].free.size += _heapinfo[block].busy.info.size;
+		block = i;
+	    } else {
+		/* Really link this block back into the free list. */
+		_heapinfo[block].free.size = _heapinfo[block].busy.info.size;
+		_heapinfo[block].free.next = _heapinfo[i].free.next;
+		_heapinfo[block].free.prev = i;
+		_heapinfo[i].free.next = block;
+		_heapinfo[_heapinfo[block].free.next].free.prev = block;
+	    }
+
+	    /* Now that the block is linked in, see if we can coalesce it
+	       with its successor (by deleting its successor from the list
+	       and adding in its size). */
+	    if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) {
+		_heapinfo[block].free.size
+		    += _heapinfo[_heapinfo[block].free.next].free.size;
+		_heapinfo[block].free.next
+		    = _heapinfo[_heapinfo[block].free.next].free.next;
+		_heapinfo[_heapinfo[block].free.next].free.prev = block;
+	    }
+
+	    /* Now see if we can return stuff to the system. */
+	    blocks = _heapinfo[block].free.size;
+	    if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
+		    && (*__morecore)(0) == ADDRESS(block + blocks)) {
+		_heaplimit -= blocks;
+		(*__morecore)(-blocks * BLOCKSIZE);
+		_heapinfo[_heapinfo[block].free.prev].free.next
+		    = _heapinfo[block].free.next;
+		_heapinfo[_heapinfo[block].free.next].free.prev
+		    = _heapinfo[block].free.prev;
+		block = _heapinfo[block].free.prev;
+	    }
+
+	    /* Set the next search to begin at this block. */
+	    _heapindex = block;
+	    break;
+
+	default:
+	    /* Get the address of the first free fragment in this block. */
+	    prev = (struct list *) ((char *) ADDRESS(block)
+		    + (_heapinfo[block].busy.info.frag.first
+			<< type));
+
+	    if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1
+		    && _fragblocks[type] > 1) {
+		/* If all fragments of this block are free, remove them
+		   from the fragment list and free the whole block. */
+		--_fragblocks[type];
+		for (next = prev, i = 1; i < BLOCKSIZE >> type; ++i)
+		    next = next->next;
+		prev->prev->next = next;
+		if (next)
+		    next->prev = prev->prev;
+		_heapinfo[block].busy.type = 0;
+		_heapinfo[block].busy.info.size = 1;
+		free_unlocked(ADDRESS(block));
+	    } else if (_heapinfo[block].busy.info.frag.nfree) {
+		/* If some fragments of this block are free, link this fragment
+		   into the fragment list after the first free fragment of
+		   this block. */
+		next = ptr;
+		next->next = prev->next;
+		next->prev = prev;
+		prev->next = next;
+		if (next->next)
+		    next->next->prev = next;
+		++_heapinfo[block].busy.info.frag.nfree;
+	    } else {
+		/* No fragments of this block are free, so link this fragment
+		   into the fragment list and announce that it is the first
+		   free fragment of this block. */
+		prev = (struct list *) ptr;
+		_heapinfo[block].busy.info.frag.nfree = 1;
+		_heapinfo[block].busy.info.frag.first
+		    = (unsigned int) ((char *) ptr - (char *) NULL) % BLOCKSIZE
+		    >> type;
+		prev->next = _fraghead[type].next;
+		prev->prev = &_fraghead[type];
+		prev->prev->next = prev;
+		if (prev->next)
+		    prev->next->prev = prev;
+	    }
+	    break;
+    }
+}
+
+/* Resize the given region to the new size, returning a pointer
+   to the (possibly moved) region.  This is optimized for speed;
+   some benchmarks seem to indicate that greater compactness is
+   achieved by unconditionally allocating and copying to a
+   new region. */
+void * realloc (void *ptr, size_t size)
+{
+    void *result, *previous;
+    size_t block, blocks, type;
+    size_t oldlimit;
+
+    if (!ptr)
+	return malloc(size);
+    if (!size) {
+	LOCK;
+	free_unlocked(ptr);
+	result = malloc_unlocked(0);
+	UNLOCK;
+	return(result);
+    }
+
+    LOCK;
+    block = BLOCK(ptr);
+
+    switch (type = _heapinfo[block].busy.type) {
+	case 0:
+	    /* Maybe reallocate a large block to a small fragment. */
+	    if (size <= BLOCKSIZE / 2) {
+		if ((result = malloc_unlocked(size)) != NULL) {
+		    memcpy(result, ptr, size);
+		    free(ptr);
+		}
+		UNLOCK;
+		return result;
+	    }
+
+	    /* The new size is a large allocation as well; see if
+	       we can hold it in place. */
+	    blocks = BLOCKIFY(size);
+	    if (blocks < _heapinfo[block].busy.info.size) {
+		/* The new size is smaller; return excess memory
+		   to the free list. */
+		_heapinfo[block + blocks].busy.type = 0;
+		_heapinfo[block + blocks].busy.info.size
+		    = _heapinfo[block].busy.info.size - blocks;
+		_heapinfo[block].busy.info.size = blocks;
+		free(ADDRESS(block + blocks));
+		UNLOCK;
+		return ptr;
+	    } else if (blocks == _heapinfo[block].busy.info.size) {
+		/* No size change necessary. */
+		UNLOCK;
+		return ptr;
+	    } else {
+		/* Won't fit, so allocate a new region that will.  Free
+		   the old region first in case there is sufficient adjacent
+		   free space to grow without moving. */
+		blocks = _heapinfo[block].busy.info.size;
+		/* Prevent free from actually returning memory to the system. */
+		oldlimit = _heaplimit;
+		_heaplimit = 0;
+		free(ptr);
+		_heaplimit = oldlimit;
+		result = malloc_unlocked(size);
+		if (!result) {
+		    /* Now we're really in trouble.  We have to unfree
+		       the thing we just freed.  Unfortunately it might
+		       have been coalesced with its neighbors. */
+		    if (_heapindex == block)
+			malloc_unlocked(blocks * BLOCKSIZE);
+		    else {
+			previous = malloc_unlocked((block - _heapindex) * BLOCKSIZE);
+			malloc_unlocked(blocks * BLOCKSIZE);
+			free(previous);
+		    }	    
+		    UNLOCK;
+		    return NULL;
+		}
+		if (ptr != result)
+		    memmove(result, ptr, blocks * BLOCKSIZE);
+		UNLOCK;
+		return result;
+	    }
+	    break;
+
+	default:
+	    /* Old size is a fragment; type is logarithm to base two of
+	       the fragment size. */
+	    if ((size > 1 << (type - 1)) && (size <= 1 << type)) {
+		/* New size is the same kind of fragment. */
+		UNLOCK;
+		return ptr;
+	    }
+	    else {
+		/* New size is different; allocate a new space, and copy
+		   the lesser of the new size and the old. */
+		result = malloc_unlocked(size);
+		if (!result) {
+		    UNLOCK;
+		    return NULL;
+		}
+		memcpy(result, ptr, MIN(size, (size_t)(1 << type)));
+		free(ptr);
+		UNLOCK;
+		return result;
+	    }
+	    break;
+    }
+    UNLOCK;
+}
+

+ 9 - 54
libc/stdlib/malloc-930716/malloc.h

@@ -10,23 +10,13 @@
 
 #include <sys/cdefs.h>
 
-/* Underlying allocation function; successive calls should return
-   contiguous pieces of memory. */
-extern void *(*__morecore)(long);
-
-/* Default value of previous. */
-extern void *__default_morecore_init(long);
-extern void *__default_morecore(long);
-
 /* The allocator divides the heap into blocks of fixed size; large
    requests receive one or more whole blocks, and small requests
    receive a fragment of a block.  Fragment sizes are powers of two,
    and all fragments of a block are the same size.  When all the
    fragments in a block have been freed, the block itself is freed.
-   WARNING: BLOCKSIZE must be set greater than or equal to the
-   machine's page size for valloc() to work correctly.  The default
-   definition here is 4096 bytes. */
-#define INT_BIT (CHAR_BIT * sizeof (int))
+   */
+#define INT_BIT (CHAR_BIT * sizeof (size_t))
 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
 #define BLOCKSIZE (1 << BLOCKLOG)
 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
@@ -42,66 +32,31 @@ extern void *__default_morecore(long);
 /* Data structure giving per-block information. */
 union info {
     struct {
-	int type;		/* Zero for a large block, or positive
+	size_t type;		/* Zero for a large block, or positive
 				   giving the logarithm to the base two
 				   of the fragment size. */
 	union {
 	    struct {
-		int nfree;	/* Free fragments in a fragmented block. */
-		int first;	/* First free fragment of the block. */
+		size_t nfree;	/* Free fragments in a fragmented block. */
+		size_t first;	/* First free fragment of the block. */
 	    } frag;
-	    int size;		/* Size (in blocks) of a large cluster. */
+	    size_t size;	/* Size (in blocks) of a large cluster. */
 	} info;
     } busy;
     struct {
-	int size;		/* Size (in blocks) of a free cluster. */
-	int next;		/* Index of next free cluster. */
-	int prev;		/* Index of previous free cluster. */
+	size_t size;		/* Size (in blocks) of a free cluster. */
+	size_t next;		/* Index of next free cluster. */
+	size_t prev;		/* Index of previous free cluster. */
     } free;
 };
 
-/* Pointer to first block of the heap. */
-extern char *_heapbase;
-
-/* Table indexed by block number giving per-block information. */
-extern union info *_heapinfo;
-
 /* Address to block number and vice versa. */
 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
 
-/* Current search index for the heap table. */
-extern int _heapindex;
-
-/* Limit of valid info table indices. */
-extern int _heaplimit;
-
 /* Doubly linked lists of free fragments. */
 struct list {
     struct list *next;
     struct list *prev;
 };
 
-/* Count of blocks for each fragment size. */
-extern int _fragblocks[];
-
-/* Free list headers for each fragment size. */
-extern struct list _fraghead[];
-
-/* List of blocks allocated with `memalign' (or `valloc').  */
-struct alignlist
-{ 
-  struct alignlist *next;
-  __ptr_t aligned;	/* The address that memaligned returned.  */
-  __ptr_t exact;	/* The address that malloc returned.  */
-};
-extern struct alignlist *_aligned_blocks;
-
-extern void _free_internal __P ((__ptr_t __ptr));
-
-extern void free (void *);
-extern void * malloc (size_t);
-extern void * calloc (size_t, size_t);
-extern void * valloc (size_t);
-extern void * memalign (size_t, size_t);
-extern void * realloc (void *, size_t);

+ 0 - 61
libc/stdlib/malloc-930716/memalign.c

@@ -1,61 +0,0 @@
-/* Copyright (C) 1991, 1992 Free Software Foundation, Inc.
-
-This library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Library General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-This library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-Library General Public License for more details.
-
-You should have received a copy of the GNU Library General Public
-License along with this library; see the file COPYING.LIB.  If
-not, write to the Free Software Foundation, Inc., 675 Mass Ave,
-Cambridge, MA 02139, USA.  */
-
-#include <stdlib.h>
-#include "malloc.h"
-
-__ptr_t
-memalign (alignment, size)
-     size_t alignment;
-     size_t size;
-{
-  __ptr_t result;
-  unsigned long int adj;
-
-  result = malloc (size + alignment - 1);
-  if (result == NULL)
-    return NULL;
-  adj = (unsigned long int) ((unsigned long int) ((char *) result -
-						(char *) NULL)) % alignment;
-  if (adj != 0)
-    {
-      struct alignlist *l;
-      for (l = _aligned_blocks; l != NULL; l = l->next)
-	if (l->aligned == NULL)
-	  /* This slot is free.  Use it.  */
-	  break;
-      if (l == NULL)
-	{
-	  l = (struct alignlist *) malloc (sizeof (struct alignlist));
-	  if (l == NULL)
-	    {
-#if 1
-	      free (result);
-#else
-	      _free_internal (result);
-#endif
-	      return NULL;
-	    }
-	  l->next = _aligned_blocks;
-	  _aligned_blocks = l;
-	}
-      l->exact = result;
-      result = l->aligned = (char *) result + alignment - adj;
-    }
-
-  return result;
-}

+ 0 - 28
libc/stdlib/malloc-930716/morecore.c

@@ -1,28 +0,0 @@
-/* morecore.c - C library support routine for UNIX.
-   Copyright (c) 1989, 1993  Michael J. Haertel
-   You may redistribute this library under the terms of the
-   GNU Library General Public License (version 2 or any later
-   version) as published by the Free Software Foundation.
-   THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED
-   WARRANTY.  IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR
-   WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
-   SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
-
-#include <limits.h>
-#include <stddef.h>
-#include "malloc.h"
-
-extern void *sbrk(int);
-
-/* Note that morecore has to take a signed argument so
-   that negative values can return memory to the system. */
-void *
-__default_morecore_init(long size)
-{
-    void *result;
-
-    result = sbrk(size);
-    if (result == (void *) -1)
-	return NULL;
-    return result;
-}

+ 0 - 131
libc/stdlib/malloc-930716/realloc.c

@@ -1,131 +0,0 @@
-/* realloc.c - C standard library routine.
-   Copyright (c) 1989, 1993  Michael J. Haertel
-   You may redistribute this library under the terms of the
-   GNU Library General Public License (version 2 or any later
-   version) as published by the Free Software Foundation.
-   THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED
-   WARRANTY.  IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR
-   WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
-   SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-#include "malloc.h"
-
-#define MIN(A, B) ((A) < (B) ? (A) : (B))
-
-/* Resize the given region to the new size, returning a pointer
-   to the (possibly moved) region.  This is optimized for speed;
-   some benchmarks seem to indicate that greater compactness is
-   achieved by unconditionally allocating and copying to a
-   new region. */
-void *
-realloc (void *ptr, size_t size)
-{
-    void *result, *previous;
-    int block, blocks, type;
-    int oldlimit;
-
-    if (!ptr)
-	return malloc(size);
-    if (!size) {
-	free(ptr);
-	return malloc(0);
-    }
-
-    block = BLOCK(ptr);
-
-    switch (type = _heapinfo[block].busy.type) {
-    case 0:
-	/* Maybe reallocate a large block to a small fragment. */
-	if (size <= BLOCKSIZE / 2) {
-	    if ((result = malloc(size)) != NULL) {
-	    	memcpy(result, ptr, size);
-#if 1
-	    	free(ptr);
-#else
-	    	_free_internal(ptr);
-#endif
-
-	    }
-	    return result;
-	}
-
-	/* The new size is a large allocation as well; see if
-	   we can hold it in place. */
-	blocks = BLOCKIFY(size);
-	if (blocks < _heapinfo[block].busy.info.size) {
-	    /* The new size is smaller; return excess memory
-	       to the free list. */
-	    _heapinfo[block + blocks].busy.type = 0;
-	    _heapinfo[block + blocks].busy.info.size
-		= _heapinfo[block].busy.info.size - blocks;
-	    _heapinfo[block].busy.info.size = blocks;
-#if 1
-	    free(ADDRESS(block + blocks));
-#else
-	    _free_internal(ADDRESS(block + blocks));
-#endif
-	    return ptr;
-	} else if (blocks == _heapinfo[block].busy.info.size)
-	    /* No size change necessary. */
-	    return ptr;
-	else {
-	    /* Won't fit, so allocate a new region that will.  Free
-	       the old region first in case there is sufficient adjacent
-	       free space to grow without moving. */
-	    blocks = _heapinfo[block].busy.info.size;
-	    /* Prevent free from actually returning memory to the system. */
-	    oldlimit = _heaplimit;
-	    _heaplimit = 0;
-#if 1
-	    free(ptr);
-#else
-	    _free_internal(ptr);
-#endif
-	    _heaplimit = oldlimit;
-	    result = malloc(size);
-	    if (!result) {
-		/* Now we're really in trouble.  We have to unfree
-		   the thing we just freed.  Unfortunately it might
-		   have been coalesced with its neighbors. */
-		if (_heapindex == block)
-		    malloc(blocks * BLOCKSIZE);
-		else {
-		    previous = malloc((block - _heapindex) * BLOCKSIZE);
-		    malloc(blocks * BLOCKSIZE);
-#if 1
-		    free(previous);
-#else
-		    _free_internal(previous);
-#endif
-		}	    
-		return NULL;
-	    }
-	    if (ptr != result)
-		memmove(result, ptr, blocks * BLOCKSIZE);
-	    return result;
-	}
-	break;
-
-    default:
-	/* Old size is a fragment; type is logarithm to base two of
-	   the fragment size. */
-	if ((size > 1 << (type - 1)) && (size <= 1 << type))
-	    /* New size is the same kind of fragment. */
-	    return ptr;
-	else {
-	    /* New size is different; allocate a new space, and copy
-	       the lesser of the new size and the old. */
-	    result = malloc(size);
-	    if (!result)
-		return NULL;
-	    memcpy(result, ptr, MIN(size, 1 << type));
-	    free(ptr);
-	    return result;
-	}
-	break;
-    }
-}

+ 0 - 62
libc/stdlib/malloc-930716/valloc.c

@@ -1,62 +0,0 @@
-/* Allocate memory on a page boundary.
-   Copyright (C) 1991, 1992 Free Software Foundation, Inc.
-
-This library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Library General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-This library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-Library General Public License for more details.
-
-You should have received a copy of the GNU Library General Public
-License along with this library; see the file COPYING.LIB.  If
-not, write to the Free Software Foundation, Inc., 675 Mass Ave,
-Cambridge, MA 02139, USA.
-
-   The author may be reached (Email) at the address mike@ai.mit.edu,
-   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
-
-#include <stdlib.h>
-#include "malloc.h"
-
-#ifdef	 emacs
-#include "config.h"
-#endif
-
-#ifdef	__GNU_LIBRARY__
-extern size_t __getpagesize __P ((void));
-#else
-#ifndef	USG
-extern size_t getpagesize __P ((void));
-#define	__getpagesize()	getpagesize()
-#else
-#include <sys/param.h>
-#ifdef	EXEC_PAGESIZE
-#define	__getpagesize()	EXEC_PAGESIZE
-#else /* No EXEC_PAGESIZE.  */
-#ifdef	NBPG
-#ifndef	CLSIZE
-#define	CLSIZE	1
-#endif /* No CLSIZE.  */
-#define	__getpagesize()	(NBPG * CLSIZE)
-#else /* No NBPG.  */
-#define	__getpagesize()	NBPC
-#endif /* NBPG.  */
-#endif /* EXEC_PAGESIZE.  */
-#endif /* USG.  */
-#endif
-
-static size_t pagesize;
-
-__ptr_t
-valloc (size)
-     size_t size;
-{
-  if (pagesize == 0)
-    pagesize = __getpagesize ();
-
-  return memalign (pagesize, size);
-}