|
@@ -8,42 +8,69 @@
|
|
|
WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
|
|
|
SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
|
|
|
|
|
|
+#define _GNU_SOURCE
|
|
|
+#include <features.h>
|
|
|
#include <limits.h>
|
|
|
#include <stddef.h>
|
|
|
#include <stdlib.h>
|
|
|
#include <string.h>
|
|
|
+#include <unistd.h>
|
|
|
#include "malloc.h"
|
|
|
|
|
|
+#define MIN(x,y) ({ \
|
|
|
+ const typeof(x) _x = (x); \
|
|
|
+ const typeof(y) _y = (y); \
|
|
|
+ (void) (&_x == &_y); \
|
|
|
+ _x < _y ? _x : _y; })
|
|
|
+
|
|
|
+
|
|
|
+#ifdef __UCLIBC_HAS_THREADS__
|
|
|
+#include <pthread.h>
|
|
|
+static pthread_mutex_t malloclock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
+# define LOCK pthread_mutex_lock(&malloclock)
|
|
|
+# define UNLOCK pthread_mutex_unlock(&malloclock);
|
|
|
+#else
|
|
|
+# define LOCK
|
|
|
+# define UNLOCK
|
|
|
+#endif
|
|
|
+
|
|
|
+static void * malloc_unlocked (size_t size);
|
|
|
+static void free_unlocked(void *ptr);
|
|
|
+static void * __default_morecore_init(long size);
|
|
|
+
|
|
|
|
|
|
-void *(*__morecore)(long) = __default_morecore_init;
|
|
|
+static void *(*__morecore)(long) = __default_morecore_init;
|
|
|
|
|
|
|
|
|
-char *_heapbase;
|
|
|
+static char *_heapbase;
|
|
|
|
|
|
|
|
|
-union info *_heapinfo;
|
|
|
+static union info *_heapinfo;
|
|
|
|
|
|
|
|
|
-static int heapsize;
|
|
|
+static size_t heapsize;
|
|
|
|
|
|
|
|
|
-int _heapindex;
|
|
|
+static size_t _heapindex;
|
|
|
|
|
|
|
|
|
-int _heaplimit;
|
|
|
+static size_t _heaplimit;
|
|
|
|
|
|
|
|
|
-int _fragblocks[BLOCKLOG];
|
|
|
+static size_t _fragblocks[BLOCKLOG];
|
|
|
|
|
|
|
|
|
-struct list _fraghead[BLOCKLOG];
|
|
|
+static struct list _fraghead[BLOCKLOG];
|
|
|
|
|
|
|
|
|
static int initialized;
|
|
|
|
|
|
-
|
|
|
-static void *
|
|
|
-align(size_t size)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ * Called within the lock in initialize() and morecore(),
|
|
|
+ * so no explicit locking needed... */
|
|
|
+static void * align(size_t size)
|
|
|
{
|
|
|
void *result;
|
|
|
unsigned int adj;
|
|
@@ -57,14 +84,16 @@ align(size_t size)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static int
|
|
|
-initialize(void)
|
|
|
+
|
|
|
+ * Called within the lock in malloc(), so no
|
|
|
+ * explicit locking needed... */
|
|
|
+static int initialize(void)
|
|
|
{
|
|
|
heapsize = HEAP / BLOCKSIZE;
|
|
|
_heapinfo = align(heapsize * sizeof (union info));
|
|
|
- if (!_heapinfo)
|
|
|
+ if (!_heapinfo) {
|
|
|
return 0;
|
|
|
+ }
|
|
|
memset(_heapinfo, 0, heapsize * sizeof (union info));
|
|
|
_heapinfo[0].free.size = 0;
|
|
|
_heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
|
|
@@ -74,14 +103,15 @@ initialize(void)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- heap info table as necessary. */
|
|
|
-static void *
|
|
|
-morecore(size_t size)
|
|
|
+
|
|
|
+ * heap info table as necessary.
|
|
|
+ * Called within a lock in malloc() and free(),
|
|
|
+ * so no explicit locking needed... */
|
|
|
+static void * morecore(size_t size)
|
|
|
{
|
|
|
void *result;
|
|
|
union info *newinfo, *oldinfo;
|
|
|
- int newsize;
|
|
|
+ size_t newsize;
|
|
|
|
|
|
result = align(size);
|
|
|
if (!result)
|
|
@@ -104,11 +134,7 @@ morecore(size_t size)
|
|
|
newinfo[BLOCK(oldinfo)].busy.info.size
|
|
|
= BLOCKIFY(heapsize * sizeof (union info));
|
|
|
_heapinfo = newinfo;
|
|
|
-#if 0
|
|
|
- free(oldinfo);
|
|
|
-#else
|
|
|
- _free_internal (oldinfo);
|
|
|
-#endif
|
|
|
+ free_unlocked(oldinfo);
|
|
|
heapsize = newsize;
|
|
|
}
|
|
|
|
|
@@ -116,16 +142,42 @@ morecore(size_t size)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+ that negative values can return memory to the system. */
|
|
|
+static void * __default_morecore_init(long size)
|
|
|
+{
|
|
|
+ void *result;
|
|
|
+
|
|
|
+ result = sbrk(size);
|
|
|
+ if (result == (void *) -1)
|
|
|
+ return NULL;
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
|
|
|
-void *
|
|
|
-malloc (size_t size)
|
|
|
+void * malloc (size_t size)
|
|
|
+{
|
|
|
+ void * ptr;
|
|
|
+ LOCK;
|
|
|
+ ptr = malloc_unlocked(size);
|
|
|
+ UNLOCK;
|
|
|
+ return(ptr);
|
|
|
+}
|
|
|
+
|
|
|
+static void * malloc_unlocked (size_t size)
|
|
|
{
|
|
|
void *result;
|
|
|
- int log, block, blocks, i, lastblocks, start;
|
|
|
+ size_t log, block, blocks, i, lastblocks, start;
|
|
|
struct list *next;
|
|
|
|
|
|
- if (!initialized && !initialize())
|
|
|
+#if 1
|
|
|
+
|
|
|
+ if (size == 0)
|
|
|
return NULL;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (size < sizeof (struct list))
|
|
|
+ size = sizeof (struct list);
|
|
|
|
|
|
#if 1
|
|
|
|
|
@@ -136,6 +188,10 @@ malloc (size_t size)
|
|
|
if (size < sizeof (struct list))
|
|
|
size = sizeof (struct list);
|
|
|
|
|
|
+ if (!initialized && !initialize()) {
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
|
|
|
if (size <= BLOCKSIZE / 2) {
|
|
|
|
|
@@ -162,9 +218,10 @@ malloc (size_t size)
|
|
|
} else {
|
|
|
|
|
|
and break it into fragments, returning the first. */
|
|
|
- result = malloc(BLOCKSIZE);
|
|
|
- if (!result)
|
|
|
+ result = malloc_unlocked(BLOCKSIZE);
|
|
|
+ if (!result) {
|
|
|
return NULL;
|
|
|
+ }
|
|
|
++_fragblocks[log];
|
|
|
|
|
|
|
|
@@ -213,8 +270,9 @@ malloc (size_t size)
|
|
|
continue;
|
|
|
}
|
|
|
result = morecore(blocks * BLOCKSIZE);
|
|
|
- if (!result)
|
|
|
+ if (!result) {
|
|
|
return NULL;
|
|
|
+ }
|
|
|
block = BLOCK(result);
|
|
|
_heapinfo[block].busy.type = 0;
|
|
|
_heapinfo[block].busy.info.size = blocks;
|
|
@@ -252,3 +310,242 @@ malloc (size_t size)
|
|
|
|
|
|
return result;
|
|
|
}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+void free(void *ptr)
|
|
|
+{
|
|
|
+ LOCK;
|
|
|
+ free_unlocked(ptr);
|
|
|
+ UNLOCK;
|
|
|
+}
|
|
|
+
|
|
|
+static void free_unlocked(void *ptr)
|
|
|
+{
|
|
|
+ int block, blocks, i, type;
|
|
|
+ struct list *prev, *next;
|
|
|
+
|
|
|
+ if (ptr == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ block = BLOCK(ptr);
|
|
|
+
|
|
|
+ switch (type = _heapinfo[block].busy.type) {
|
|
|
+ case 0:
|
|
|
+
|
|
|
+ Start searching at the last block referenced; this may benefit
|
|
|
+ programs with locality of allocation. */
|
|
|
+ i = _heapindex;
|
|
|
+ if (i > block)
|
|
|
+ while (i > block)
|
|
|
+ i = _heapinfo[i].free.prev;
|
|
|
+ else {
|
|
|
+ do
|
|
|
+ i = _heapinfo[i].free.next;
|
|
|
+ while (i > 0 && i < block);
|
|
|
+ i = _heapinfo[i].free.prev;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if (block == i + _heapinfo[i].free.size) {
|
|
|
+
|
|
|
+ _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
|
|
|
+ block = i;
|
|
|
+ } else {
|
|
|
+
|
|
|
+ _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
|
|
|
+ _heapinfo[block].free.next = _heapinfo[i].free.next;
|
|
|
+ _heapinfo[block].free.prev = i;
|
|
|
+ _heapinfo[i].free.next = block;
|
|
|
+ _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ with its successor (by deleting its successor from the list
|
|
|
+ and adding in its size). */
|
|
|
+ if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) {
|
|
|
+ _heapinfo[block].free.size
|
|
|
+ += _heapinfo[_heapinfo[block].free.next].free.size;
|
|
|
+ _heapinfo[block].free.next
|
|
|
+ = _heapinfo[_heapinfo[block].free.next].free.next;
|
|
|
+ _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ blocks = _heapinfo[block].free.size;
|
|
|
+ if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
|
|
|
+ && (*__morecore)(0) == ADDRESS(block + blocks)) {
|
|
|
+ _heaplimit -= blocks;
|
|
|
+ (*__morecore)(-blocks * BLOCKSIZE);
|
|
|
+ _heapinfo[_heapinfo[block].free.prev].free.next
|
|
|
+ = _heapinfo[block].free.next;
|
|
|
+ _heapinfo[_heapinfo[block].free.next].free.prev
|
|
|
+ = _heapinfo[block].free.prev;
|
|
|
+ block = _heapinfo[block].free.prev;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ _heapindex = block;
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+
|
|
|
+ prev = (struct list *) ((char *) ADDRESS(block)
|
|
|
+ + (_heapinfo[block].busy.info.frag.first
|
|
|
+ << type));
|
|
|
+
|
|
|
+ if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1
|
|
|
+ && _fragblocks[type] > 1) {
|
|
|
+
|
|
|
+ from the fragment list and free the whole block. */
|
|
|
+ --_fragblocks[type];
|
|
|
+ for (next = prev, i = 1; i < BLOCKSIZE >> type; ++i)
|
|
|
+ next = next->next;
|
|
|
+ prev->prev->next = next;
|
|
|
+ if (next)
|
|
|
+ next->prev = prev->prev;
|
|
|
+ _heapinfo[block].busy.type = 0;
|
|
|
+ _heapinfo[block].busy.info.size = 1;
|
|
|
+ free_unlocked(ADDRESS(block));
|
|
|
+ } else if (_heapinfo[block].busy.info.frag.nfree) {
|
|
|
+
|
|
|
+ into the fragment list after the first free fragment of
|
|
|
+ this block. */
|
|
|
+ next = ptr;
|
|
|
+ next->next = prev->next;
|
|
|
+ next->prev = prev;
|
|
|
+ prev->next = next;
|
|
|
+ if (next->next)
|
|
|
+ next->next->prev = next;
|
|
|
+ ++_heapinfo[block].busy.info.frag.nfree;
|
|
|
+ } else {
|
|
|
+
|
|
|
+ into the fragment list and announce that it is the first
|
|
|
+ free fragment of this block. */
|
|
|
+ prev = (struct list *) ptr;
|
|
|
+ _heapinfo[block].busy.info.frag.nfree = 1;
|
|
|
+ _heapinfo[block].busy.info.frag.first
|
|
|
+ = (unsigned int) ((char *) ptr - (char *) NULL) % BLOCKSIZE
|
|
|
+ >> type;
|
|
|
+ prev->next = _fraghead[type].next;
|
|
|
+ prev->prev = &_fraghead[type];
|
|
|
+ prev->prev->next = prev;
|
|
|
+ if (prev->next)
|
|
|
+ prev->next->prev = prev;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+ to the (possibly moved) region. This is optimized for speed;
|
|
|
+ some benchmarks seem to indicate that greater compactness is
|
|
|
+ achieved by unconditionally allocating and copying to a
|
|
|
+ new region. */
|
|
|
+void * realloc (void *ptr, size_t size)
|
|
|
+{
|
|
|
+ void *result, *previous;
|
|
|
+ size_t block, blocks, type;
|
|
|
+ size_t oldlimit;
|
|
|
+
|
|
|
+ if (!ptr)
|
|
|
+ return malloc(size);
|
|
|
+ if (!size) {
|
|
|
+ LOCK;
|
|
|
+ free_unlocked(ptr);
|
|
|
+ result = malloc_unlocked(0);
|
|
|
+ UNLOCK;
|
|
|
+ return(result);
|
|
|
+ }
|
|
|
+
|
|
|
+ LOCK;
|
|
|
+ block = BLOCK(ptr);
|
|
|
+
|
|
|
+ switch (type = _heapinfo[block].busy.type) {
|
|
|
+ case 0:
|
|
|
+
|
|
|
+ if (size <= BLOCKSIZE / 2) {
|
|
|
+ if ((result = malloc_unlocked(size)) != NULL) {
|
|
|
+ memcpy(result, ptr, size);
|
|
|
+ free(ptr);
|
|
|
+ }
|
|
|
+ UNLOCK;
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ we can hold it in place. */
|
|
|
+ blocks = BLOCKIFY(size);
|
|
|
+ if (blocks < _heapinfo[block].busy.info.size) {
|
|
|
+
|
|
|
+ to the free list. */
|
|
|
+ _heapinfo[block + blocks].busy.type = 0;
|
|
|
+ _heapinfo[block + blocks].busy.info.size
|
|
|
+ = _heapinfo[block].busy.info.size - blocks;
|
|
|
+ _heapinfo[block].busy.info.size = blocks;
|
|
|
+ free(ADDRESS(block + blocks));
|
|
|
+ UNLOCK;
|
|
|
+ return ptr;
|
|
|
+ } else if (blocks == _heapinfo[block].busy.info.size) {
|
|
|
+
|
|
|
+ UNLOCK;
|
|
|
+ return ptr;
|
|
|
+ } else {
|
|
|
+
|
|
|
+ the old region first in case there is sufficient adjacent
|
|
|
+ free space to grow without moving. */
|
|
|
+ blocks = _heapinfo[block].busy.info.size;
|
|
|
+
|
|
|
+ oldlimit = _heaplimit;
|
|
|
+ _heaplimit = 0;
|
|
|
+ free(ptr);
|
|
|
+ _heaplimit = oldlimit;
|
|
|
+ result = malloc_unlocked(size);
|
|
|
+ if (!result) {
|
|
|
+
|
|
|
+ the thing we just freed. Unfortunately it might
|
|
|
+ have been coalesced with its neighbors. */
|
|
|
+ if (_heapindex == block)
|
|
|
+ malloc_unlocked(blocks * BLOCKSIZE);
|
|
|
+ else {
|
|
|
+ previous = malloc_unlocked((block - _heapindex) * BLOCKSIZE);
|
|
|
+ malloc_unlocked(blocks * BLOCKSIZE);
|
|
|
+ free(previous);
|
|
|
+ }
|
|
|
+ UNLOCK;
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ if (ptr != result)
|
|
|
+ memmove(result, ptr, blocks * BLOCKSIZE);
|
|
|
+ UNLOCK;
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+
|
|
|
+ the fragment size. */
|
|
|
+ if ((size > 1 << (type - 1)) && (size <= 1 << type)) {
|
|
|
+
|
|
|
+ UNLOCK;
|
|
|
+ return ptr;
|
|
|
+ }
|
|
|
+ else {
|
|
|
+
|
|
|
+ the lesser of the new size and the old. */
|
|
|
+ result = malloc_unlocked(size);
|
|
|
+ if (!result) {
|
|
|
+ UNLOCK;
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ memcpy(result, ptr, MIN(size, (size_t)(1 << type)));
|
|
|
+ free(ptr);
|
|
|
+ UNLOCK;
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ UNLOCK;
|
|
|
+}
|
|
|
+
|