|
@@ -17,62 +17,45 @@
|
|
|
#include <unistd.h>
|
|
|
#include "malloc.h"
|
|
|
|
|
|
-#define MIN(x,y) ({ \
|
|
|
- const typeof(x) _x = (x); \
|
|
|
- const typeof(y) _y = (y); \
|
|
|
- (void) (&_x == &_y); \
|
|
|
- _x < _y ? _x : _y; })
|
|
|
-
|
|
|
-
|
|
|
#ifdef __UCLIBC_HAS_THREADS__
|
|
|
#include <pthread.h>
|
|
|
-static pthread_mutex_t malloclock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
-# define LOCK pthread_mutex_lock(&malloclock)
|
|
|
-# define UNLOCK pthread_mutex_unlock(&malloclock);
|
|
|
+pthread_mutex_t __malloclock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
+# define LOCK pthread_mutex_lock(&__malloclock)
|
|
|
+# define UNLOCK pthread_mutex_unlock(&__malloclock);
|
|
|
#else
|
|
|
# define LOCK
|
|
|
# define UNLOCK
|
|
|
#endif
|
|
|
-
|
|
|
-static void * malloc_unlocked (size_t size);
|
|
|
-static void free_unlocked(void *ptr);
|
|
|
|
|
|
-
|
|
|
-static void * __morecore(long size);
|
|
|
|
|
|
-
|
|
|
-static char *_heapbase;
|
|
|
+
|
|
|
|
|
|
+
|
|
|
+char *_heapbase;
|
|
|
|
|
|
-static union info *_heapinfo;
|
|
|
-
|
|
|
-
|
|
|
-static size_t heapsize;
|
|
|
-
|
|
|
+union info *_heapinfo;
|
|
|
|
|
|
-static size_t _heapindex;
|
|
|
-
|
|
|
+size_t _heapindex;
|
|
|
|
|
|
-static size_t _heaplimit;
|
|
|
+size_t _heaplimit;
|
|
|
+
|
|
|
+struct alignlist *_aligned_blocks;
|
|
|
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+static void * __morecore(long size);
|
|
|
+
|
|
|
+static size_t heapsize;
|
|
|
|
|
|
static size_t _fragblocks[BLOCKLOG];
|
|
|
-
|
|
|
|
|
|
static struct list _fraghead[BLOCKLOG];
|
|
|
-
|
|
|
|
|
|
static int initialized;
|
|
|
|
|
|
-
|
|
|
-struct alignlist
|
|
|
-{
|
|
|
- struct alignlist *next;
|
|
|
- __ptr_t aligned;
|
|
|
- __ptr_t exact;
|
|
|
-};
|
|
|
-static struct alignlist *_aligned_blocks;
|
|
|
-
|
|
|
|
|
|
|
|
|
* Called within the lock in initialize() and morecore(),
|
|
@@ -141,7 +124,7 @@ static void * morecore(size_t size)
|
|
|
newinfo[BLOCK(oldinfo)].busy.info.size
|
|
|
= BLOCKIFY(heapsize * sizeof (union info));
|
|
|
_heapinfo = newinfo;
|
|
|
- free_unlocked(oldinfo);
|
|
|
+ __free_unlocked(oldinfo);
|
|
|
heapsize = newsize;
|
|
|
}
|
|
|
|
|
@@ -166,12 +149,12 @@ void * malloc (size_t size)
|
|
|
{
|
|
|
void * ptr;
|
|
|
LOCK;
|
|
|
- ptr = malloc_unlocked(size);
|
|
|
+ ptr = __malloc_unlocked(size);
|
|
|
UNLOCK;
|
|
|
return(ptr);
|
|
|
}
|
|
|
|
|
|
-static void * malloc_unlocked (size_t size)
|
|
|
+void * __malloc_unlocked (size_t size)
|
|
|
{
|
|
|
void *result;
|
|
|
size_t log, block, blocks, i, lastblocks, start;
|
|
@@ -216,7 +199,7 @@ static void * malloc_unlocked (size_t size)
|
|
|
} else {
|
|
|
|
|
|
and break it into fragments, returning the first. */
|
|
|
- result = malloc_unlocked(BLOCKSIZE);
|
|
|
+ result = __malloc_unlocked(BLOCKSIZE);
|
|
|
if (!result) {
|
|
|
return NULL;
|
|
|
}
|
|
@@ -327,11 +310,11 @@ void free(void *ptr)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- free_unlocked(ptr);
|
|
|
+ __free_unlocked(ptr);
|
|
|
UNLOCK;
|
|
|
}
|
|
|
|
|
|
-static void free_unlocked(void *ptr)
|
|
|
+void __free_unlocked(void *ptr)
|
|
|
{
|
|
|
int block, blocks, i, type;
|
|
|
struct list *prev, *next;
|
|
@@ -418,7 +401,7 @@ static void free_unlocked(void *ptr)
|
|
|
next->prev = prev->prev;
|
|
|
_heapinfo[block].busy.type = 0;
|
|
|
_heapinfo[block].busy.info.size = 1;
|
|
|
- free_unlocked(ADDRESS(block));
|
|
|
+ __free_unlocked(ADDRESS(block));
|
|
|
} else if (_heapinfo[block].busy.info.frag.nfree) {
|
|
|
|
|
|
into the fragment list after the first free fragment of
|
|
@@ -449,151 +432,3 @@ static void free_unlocked(void *ptr)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- to the (possibly moved) region. This is optimized for speed;
|
|
|
- some benchmarks seem to indicate that greater compactness is
|
|
|
- achieved by unconditionally allocating and copying to a
|
|
|
- new region. */
|
|
|
-void * realloc (void *ptr, size_t size)
|
|
|
-{
|
|
|
- void *result, *previous;
|
|
|
- size_t block, blocks, type;
|
|
|
- size_t oldlimit;
|
|
|
-
|
|
|
- if (!ptr)
|
|
|
- return malloc(size);
|
|
|
- if (!size) {
|
|
|
- LOCK;
|
|
|
- free_unlocked(ptr);
|
|
|
- result = malloc_unlocked(0);
|
|
|
- UNLOCK;
|
|
|
- return(result);
|
|
|
- }
|
|
|
-
|
|
|
- LOCK;
|
|
|
- block = BLOCK(ptr);
|
|
|
-
|
|
|
- switch (type = _heapinfo[block].busy.type) {
|
|
|
- case 0:
|
|
|
-
|
|
|
- if (size <= BLOCKSIZE / 2) {
|
|
|
- if ((result = malloc_unlocked(size)) != NULL) {
|
|
|
- memcpy(result, ptr, size);
|
|
|
- free_unlocked(ptr);
|
|
|
- }
|
|
|
- UNLOCK;
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- we can hold it in place. */
|
|
|
- blocks = BLOCKIFY(size);
|
|
|
- if (blocks < _heapinfo[block].busy.info.size) {
|
|
|
-
|
|
|
- to the free list. */
|
|
|
- _heapinfo[block + blocks].busy.type = 0;
|
|
|
- _heapinfo[block + blocks].busy.info.size
|
|
|
- = _heapinfo[block].busy.info.size - blocks;
|
|
|
- _heapinfo[block].busy.info.size = blocks;
|
|
|
- free_unlocked(ADDRESS(block + blocks));
|
|
|
- UNLOCK;
|
|
|
- return ptr;
|
|
|
- } else if (blocks == _heapinfo[block].busy.info.size) {
|
|
|
-
|
|
|
- UNLOCK;
|
|
|
- return ptr;
|
|
|
- } else {
|
|
|
-
|
|
|
- the old region first in case there is sufficient adjacent
|
|
|
- free space to grow without moving. */
|
|
|
- blocks = _heapinfo[block].busy.info.size;
|
|
|
-
|
|
|
- oldlimit = _heaplimit;
|
|
|
- _heaplimit = 0;
|
|
|
- free_unlocked(ptr);
|
|
|
- _heaplimit = oldlimit;
|
|
|
- result = malloc_unlocked(size);
|
|
|
- if (!result) {
|
|
|
-
|
|
|
- the thing we just freed. Unfortunately it might
|
|
|
- have been coalesced with its neighbors. */
|
|
|
- if (_heapindex == block)
|
|
|
- malloc_unlocked(blocks * BLOCKSIZE);
|
|
|
- else {
|
|
|
- previous = malloc_unlocked((block - _heapindex) * BLOCKSIZE);
|
|
|
- malloc_unlocked(blocks * BLOCKSIZE);
|
|
|
- free_unlocked(previous);
|
|
|
- }
|
|
|
- UNLOCK;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- if (ptr != result)
|
|
|
- memmove(result, ptr, blocks * BLOCKSIZE);
|
|
|
- UNLOCK;
|
|
|
- return result;
|
|
|
- }
|
|
|
- break;
|
|
|
-
|
|
|
- default:
|
|
|
-
|
|
|
- the fragment size. */
|
|
|
- if ((size > 1 << (type - 1)) && (size <= 1 << type)) {
|
|
|
-
|
|
|
- UNLOCK;
|
|
|
- return ptr;
|
|
|
- }
|
|
|
- else {
|
|
|
-
|
|
|
- the lesser of the new size and the old. */
|
|
|
- result = malloc_unlocked(size);
|
|
|
- if (!result) {
|
|
|
- UNLOCK;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- memcpy(result, ptr, MIN(size, (size_t)(1 << type)));
|
|
|
- free_unlocked(ptr);
|
|
|
- UNLOCK;
|
|
|
- return result;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- UNLOCK;
|
|
|
-}
|
|
|
-
|
|
|
-__ptr_t memalign (size_t alignment, size_t size)
|
|
|
-{
|
|
|
- __ptr_t result;
|
|
|
- unsigned long int adj;
|
|
|
-
|
|
|
- result = malloc (size + alignment - 1);
|
|
|
- if (result == NULL)
|
|
|
- return NULL;
|
|
|
- adj = (unsigned long int) ((unsigned long int) ((char *) result -
|
|
|
- (char *) NULL)) % alignment;
|
|
|
- if (adj != 0)
|
|
|
- {
|
|
|
- struct alignlist *l;
|
|
|
- LOCK;
|
|
|
- for (l = _aligned_blocks; l != NULL; l = l->next)
|
|
|
- if (l->aligned == NULL)
|
|
|
-
|
|
|
- break;
|
|
|
- if (l == NULL)
|
|
|
- {
|
|
|
- l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
|
|
- if (l == NULL) {
|
|
|
- free_unlocked (result);
|
|
|
- UNLOCK;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- l->next = _aligned_blocks;
|
|
|
- _aligned_blocks = l;
|
|
|
- }
|
|
|
- l->exact = result;
|
|
|
- result = l->aligned = (char *) result + alignment - adj;
|
|
|
- UNLOCK;
|
|
|
- }
|
|
|
-
|
|
|
- return result;
|
|
|
-}
|
|
|
-
|