Browse Source

New malloc routines.

Eric Andersen 23 years ago
parent
commit
9efafb8bbc

+ 33 - 0
libc/stdlib/malloc/.indent.pro

@@ -0,0 +1,33 @@
+--blank-lines-after-declarations
+--blank-lines-after-procedures
+--break-before-boolean-operator
+--no-blank-lines-after-commas
+--braces-on-if-line
+--braces-on-struct-decl-line
+--comment-indentation25
+--declaration-comment-column25
+--no-comment-delimiters-on-blank-lines
+--cuddle-else
+--continuation-indentation4
+--case-indentation0
+--else-endif-column33
+--space-after-cast
+--line-comments-indentation0
+--declaration-indentation1
+--dont-format-first-column-comments
+--dont-format-comments
+--honour-newlines
+--indent-level4
+/* changed from 0 to 4 */
+--parameter-indentation4
+--line-length78 /* changed from 75 */
+--continue-at-parentheses
+--no-space-after-function-call-names
+--dont-break-procedure-type
+--dont-star-comments
+--leave-optional-blank-lines
+--dont-space-special-semicolon
+--tab-size4
+/* additions by Mark */
+--case-brace-indentation0
+--leave-preprocessor-space

+ 7 - 8
libc/stdlib/malloc/Makefile

@@ -24,17 +24,16 @@ TOPDIR=../
 include $(TOPDIR)Rules.make
 LIBC=$(TOPDIR)libc.a
 
-MSRC=alloc.c
-MOBJ=malloc.o realloc.o free.o calloc.o malloc_dbg.o free_dbg.o calloc_dbg.o
+CSRC=malloc.c
+COBJS=$(patsubst %.c,%.o, $(CSRC))
 
-all: $(MOBJ) $(LIBC)
 
-$(LIBC): $(MOBJ)
-	$(AR) $(ARFLAGS) $(LIBC) $(MOBJ)
+all: $(COBJS) $(LIBC)
 
-$(MOBJ): $(MSRC)
-	$(CC) $(CFLAGS) -DL_$* $< -c -o $*.o
+$(LIBC): $(COBJS)
+	$(AR) $(ARFLAGS) $(LIBC) $(COBJS)
+
+$(COBJS): Makefile
 
 clean:
 	rm -f *.[oa] *~ core
-

+ 0 - 208
libc/stdlib/malloc/alloc.c

@@ -1,208 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-struct chunkControl {
-	size_t nodeCount;
-	size_t chunkSize;
-};
-
-struct nodeControl {
-	struct chunkControl *chunk;
-	size_t nodeSize;
-};
-
-#define ROUND_UP_LENGTH(len) ((len+7) & ~0x07)
-
-extern struct nodeControl *mallocNextNode;
-
-#ifdef L_malloc
-/* This variable is a pointer to the next place to allocate from.
- * Note: This variable makes the code NOT thread save. */
-struct nodeControl *mallocNextNode = 0;
-static size_t PageSize = 0;
-
-#endif
-
-#ifdef L_calloc_dbg
-
-void *
-calloc_dbg(size_t num, size_t size, char * function, char * file, int line)
-{
-	void * ptr;
-	fprintf(stderr, "calloc of %d bytes at %s @%s:%d = ", num*size, function, file, line);
-	ptr = calloc(num,size);
-	fprintf(stderr, "%p\n", ptr);
-	return ptr;
-}
-
-#endif
-
-#ifdef L_malloc_dbg
-
-void *
-malloc_dbg(size_t len, char * function, char * file, int line)
-{
-	void * result;
-	fprintf(stderr, "malloc of %d bytes at %s @%s:%d = ", len, function, file, line);
-	result = malloc(len);
-	fprintf(stderr, "%p\n", result);    
-	return result;
-}
-
-#endif
-
-#ifdef L_free_dbg
-
-void
-free_dbg(void * ptr, char * function, char * file, int line)
-{
-	fprintf(stderr, "free of %p at %s @%s:%d\n", ptr, function, file, line);
-  	free(ptr);
-}
-
-#endif
-
-
-#ifdef L_calloc
-
-void *
-calloc(size_t num, size_t size)
-{
-	void * ptr = malloc(num*size);
-	if (ptr)
-		memset(ptr, 0, num*size);
-	return ptr;
-}
-
-#endif
-
-#ifdef L_malloc
-
-void *
-malloc(size_t len)
-{
-	void *result;
-	struct chunkControl *chunk;
-	struct nodeControl  *next;
-	size_t size;
-
-	/* round len up to keep things on even boundaries */
-	len = ROUND_UP_LENGTH(len);
-
-	if (len == 0)
-		return 0;
-
-TryAgain:
-	if (mallocNextNode != 0) {
-		/* first see if this request will fit on this chunk */
-		next  = mallocNextNode;
-		chunk = next->chunk;
-		if (((char *)next + sizeof(struct nodeControl)*2 + len) < 
-		    ((char *)chunk + chunk->chunkSize))
-		{
-			/* this request will fit, so simply move the next
-			 * pointer ahead and update chunk node count */
-			next->nodeSize = len;
-			result = (char *)next + sizeof(struct nodeControl);
-			chunk->nodeCount++;
-			next = (struct nodeControl *)
-			          ((char *)next + (sizeof(struct nodeControl) + len));
-			next->chunk = chunk;
-			next->nodeSize = 0;
-			mallocNextNode = next;
-
-			return result; /* normal return path */
-		}
-		
-	}
-	
-	/* the request will not fit on this chunk, so get another chunk */
-	if (PageSize == 0) {
-		PageSize = getpagesize();
-	}
-	size = len + (sizeof(struct chunkControl) + (sizeof(struct nodeControl) * 2));
-	if (size < PageSize * 2) {
-		size = PageSize * 2;
-	}
-	size = (size + (PageSize-1)) & ~(PageSize-1);
-
-	chunk = mmap((void *)0, size, PROT_READ | PROT_WRITE,
-	              MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
-	if (chunk == (void*)-1)
-		return 0;
-
-	chunk->chunkSize = size;
-	chunk->nodeCount = 0;
-	next             = (struct nodeControl *)
-		                 ((char *)chunk + sizeof(struct chunkControl));
-	next->chunk      = chunk;
-	mallocNextNode   = next;
-
-	goto TryAgain;
-}
-
-#endif
-
-#ifdef L_free
-
-void
-free(void * ptr)
-{
-	struct chunkControl *chunk;
-	struct nodeControl  *node;
-	
-	if (ptr == 0) {
-		return;
-	}
-	/* get a pointer to the control information for this memory node
-	 * and the chunk it belongs to */
-	node  = (struct nodeControl *)ptr - 1;
-	chunk = node->chunk;
-	/* decrement the node count and if it is zero free the chunk */
-	chunk->nodeCount--;
-	if (chunk->nodeCount == 0) {
-		if ((void *)mallocNextNode >= (void *)chunk && 
-		     ((void *)mallocNextNode < (void *)((char *)chunk + chunk->chunkSize)))
-		{
-			mallocNextNode = 0;
-		}
-		munmap(chunk, chunk->chunkSize);
-	}
-}
-
-#endif
-
-#ifdef L_realloc
-
-void *
-realloc(void *ptr, size_t len)
-{
-	struct nodeControl *node;
-	size_t oldSize;
-	void *new;
-	
-	
-	if (ptr == 0) {
-		return malloc(len);
-	}
-	if (len == 0) {
-		free(ptr);
-		return 0;
-	}
-	node    = (struct nodeControl *)ptr - 1;
-	oldSize = node->nodeSize;
-	if (oldSize >= len) {
-		return ptr;
-	}
-	
-	new = malloc(len);
-	memcpy(new, ptr, len);
-	free(ptr);
-	return new;
-}
-
-#endif
-

+ 214 - 0
libc/stdlib/malloc/avlmacro.h

@@ -0,0 +1,214 @@
+/* MACRO-CODED FAST FIXED AVL TREES IMPLEMENTATION IN C              */
+/* COPYRIGHT (C) 1998 VALERY SHCHEDRIN                               */
+/* IT IS DISTRIBUTED UNDER GLPL (GNU GENERAL LIBRARY PUBLIC LICENSE) */
+
+#define balance(objname, pr, root, ht_changed) \
+  { \
+    objname *p; \
+    p = *root; \
+    if (p->bal_##pr < -1) \
+    { \
+      if (p->l_##pr->bal_##pr == 1) \
+      { \
+	objname *pp; \
+	pp=p->l_##pr; *root=p->l_##pr->r_##pr; p->l_##pr = (*root)->r_##pr; \
+	(*root)->r_##pr = p; pp->r_##pr = (*root)->l_##pr; \
+	p = *root; p->l_##pr = pp; \
+	if (p->bal_##pr > 0) p->l_##pr ->bal_##pr = -p->bal_##pr ; \
+	else p->l_##pr ->bal_##pr = 0; \
+	if (p->bal_##pr < 0) p->r_##pr ->bal_##pr = -p->bal_##pr ; \
+	else p->r_##pr ->bal_##pr = 0; \
+	p->bal_##pr = 0; \
+	ht_changed = 1; \
+      } \
+      else \
+      { \
+	ht_changed = (p->l_##pr ->bal_##pr)?1:0; \
+	*root = p->l_##pr ; \
+	p->l_##pr = (*root)->r_##pr ; (*root)->r_##pr = p; \
+	p->bal_##pr = - (++((*root)->bal_##pr )); \
+      } \
+    } \
+    else if (p->bal_##pr > 1) \
+    { \
+      if (p->r_##pr->bal_##pr == -1) \
+      { \
+	objname *pp; \
+	pp=p->r_##pr ; *root=p->r_##pr ->l_##pr ; p->r_##pr =(*root)->l_##pr ; \
+	(*root)->l_##pr = p; pp->l_##pr = (*root)->r_##pr ; \
+	p = *root; p->r_##pr = pp; \
+	if (p->bal_##pr > 0) p->l_##pr ->bal_##pr = -p->bal_##pr ; \
+	else p->l_##pr ->bal_##pr = 0; \
+	if (p->bal_##pr < 0) p->r_##pr ->bal_##pr = -p->bal_##pr ; \
+	else p->r_##pr ->bal_##pr = 0; \
+	p->bal_##pr = 0; \
+	ht_changed = 1; \
+      } \
+      else \
+      { \
+	ht_changed = (p->r_##pr ->bal_##pr)?1:0; \
+	*root = p->r_##pr ; \
+	p->r_##pr = (*root)->l_##pr ; (*root)->l_##pr = p; \
+	p->bal_##pr = - (--((*root)->bal_##pr )); \
+      } \
+    } else ht_changed = 0; \
+  }
+
+#define Avl_r_insert_proto(objname, pr, COMPARE) \
+  static int Avl_##objname##pr##_r_insert(objname **root) \
+  { \
+    int i; /* height increase */ \
+    if (!*root) \
+    { \
+      *root = Avl_##objname##pr##_new_node; \
+      Avl_##objname##pr##_new_node = NULL; \
+      return 1; \
+    } \
+    COMPARE(i, Avl_##objname##pr##_new_node, *root); \
+    \
+    if (i < 0) \
+    { /* insert into the left subtree */ \
+      i = -Avl_##objname##pr##_r_insert(&((*root)->l_##pr)); \
+      if (Avl_##objname##pr##_new_node != NULL) return 0; /* already there */ \
+    } \
+    else if (i > 0) \
+    { /* insert into the right subtree */ \
+      i = Avl_##objname##pr##_r_insert(&((*root)->r_##pr)); \
+      if (Avl_##objname##pr##_new_node != NULL) return 0; /* already there */ \
+    } \
+    else \
+    { /* found */ \
+      Avl_##objname##pr##_new_node = *root; \
+      return 0; \
+    } \
+    if (!i) return 0; \
+    (*root)->bal_##pr += i; /* update balance factor */ \
+    if ((*root)->bal_##pr) \
+    { \
+      balance(objname,pr,root,i); \
+      return 1-i; \
+    } \
+    else return 0; \
+  }
+
+#define Avl_r_delete_proto(objname,pr,COMPARE) \
+  static int Avl_##objname##pr##_r_delete(objname **root) \
+  { \
+    int i; /* height decrease */ \
+    \
+    if (!*root) return 0; /* not found */ \
+    \
+    COMPARE(i, Avl_##objname##pr##_new_node, *root); \
+    \
+    if (i < 0) \
+      i = -Avl_##objname##pr##_r_delete(&((*root)->l_##pr)); \
+    else if (i > 0) \
+      i =  Avl_##objname##pr##_r_delete(&((*root)->r_##pr)); \
+    else \
+    { \
+      if (!(*root)->l_##pr) \
+      { \
+	*root = (*root)->r_##pr; \
+	return 1; \
+      } \
+      else if (!(*root)->r_##pr) \
+      { \
+	*root = (*root)->l_##pr; \
+	return 1; \
+      } \
+      else \
+      { \
+	i = Avl_##objname##pr##_r_delfix(&((*root)->r_##pr)); \
+	Avl_##objname##pr##_new_node->l_##pr = (*root)->l_##pr; \
+	Avl_##objname##pr##_new_node->r_##pr = (*root)->r_##pr; \
+	Avl_##objname##pr##_new_node->bal_##pr = (*root)->bal_##pr; \
+	*root = Avl_##objname##pr##_new_node; \
+      } \
+    } \
+    if (!i) return 0; \
+    (*root)->bal_##pr -= i; \
+    if ((*root)->bal_##pr) \
+    { \
+      balance(objname,pr,root,i); \
+      return i; \
+    } \
+    return 1; \
+  }
+
+#define Avl_r_delfix_proto(objname,pr) \
+  static int Avl_##objname##pr##_r_delfix(objname **root) \
+  { \
+    int i; /* height decrease */ \
+    \
+    if (!(*root)->l_##pr) \
+    { \
+      Avl_##objname##pr##_new_node = *root; \
+      *root = (*root)->r_##pr; \
+      return 1; \
+    } \
+    i = -Avl_##objname##pr##_r_delfix(&((*root)->l_##pr)); \
+    if (!i) return 0; \
+    (*root)->bal_##pr -= i; \
+    if ((*root)->bal_##pr) \
+    { \
+      balance(objname,pr,root,i); \
+      return i; \
+    } \
+    return 1; \
+  }
+
+#define Avl_ins_proto(alias,objname,pr) \
+  static objname *##alias##_ins(objname *data) \
+  { \
+    Avl_##objname##pr##_new_node = data; \
+    (data)->l_##pr = NULL; \
+    (data)->r_##pr = NULL; \
+    (data)->bal_##pr = 0; \
+    Avl_##objname##pr##_r_insert(&Avl_##objname##pr##_tree); \
+    if (Avl_##objname##pr##_new_node) \
+      return Avl_##objname##pr##_new_node; \
+    return NULL; \
+  }
+
+#define Avl_del_proto(alias,objname,pr) \
+  static void alias##_del(objname *data) \
+  { \
+    Avl_##objname##pr##_new_node = data; \
+    Avl_##objname##pr##_r_delete(&Avl_##objname##pr##_tree); \
+  }
+
+#define Avl_replace_proto(alias,objname,pr,COMPARE) \
+  static void alias##_replace(objname *data) \
+  { \
+    objname **p = &Avl_##objname##pr##_tree; \
+    int cmp; \
+    while (*p) \
+    { \
+      COMPARE(cmp, data, *p); \
+      if (cmp < 0) \
+	p = &((*p)->l_##pr); \
+      else if (cmp > 0) \
+	p = &((*p)->r_##pr); \
+      else \
+      { \
+	(data)->l_##pr = (*p)->l_##pr; \
+	(data)->r_##pr = (*p)->r_##pr; \
+	(data)->bal_##pr = (*p)->bal_##pr; \
+	*p = data; \
+	return; \
+      } \
+    } \
+  }
+
+#define Avl_Root(objname,pr) Avl_##objname##pr##_tree
+
+#define Avl_Tree(alias,objname,pr,COMPARE) \
+static objname *Avl_##objname##pr##_tree = NULL; \
+static objname *Avl_##objname##pr##_new_node; \
+Avl_r_insert_proto(objname,pr,COMPARE) \
+Avl_r_delfix_proto(objname,pr) \
+Avl_r_delete_proto(objname,pr,COMPARE) \
+Avl_ins_proto(alias,objname,pr) \
+Avl_del_proto(alias,objname,pr) \
+Avl_replace_proto(alias,objname,pr,COMPARE)
+

+ 765 - 0
libc/stdlib/malloc/malloc.c

@@ -0,0 +1,765 @@
+/*
+  mmalloc - heap manager based on heavy use of virtual memory management.
+  Copyright (C) 1998   Valery Shchedrin
+
+  This library is free software; you can redistribute it and/or
+  modify it under the terms of the GNU Library General Public
+  License as published by the Free Software Foundation; either
+  version 2 of the License, or (at your option) any later version.
+
+  This library is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  Library General Public License for more details.
+
+  You should have received a copy of the GNU Library General Public
+  License along with this library; if not, write to the Free
+  Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+  MA 02111-1307, USA
+  
+  Public Functions:
+  
+  void *mmalloc(size_t size);
+  
+    Allocates `size` bytes
+    returns NULL if no free memory available
+  
+  void *mcalloc(size_t unit, size_t quantity);
+  
+    Allocates `quantity*unit` zeroed bytes via internal malloc call
+  
+  void *mrealloc(void *ptr, size_t size);
+  
+    Reallocates already allocated block `ptr`, if `ptr` is not valid block
+    then it works as malloc. NULL is returned if no free memory available
+  
+  void *mrealloc_no_move(void *ptr, size_t size);
+  
+    Reallocates already allocated block `ptr`, if `ptr` is not valid block
+    or if reallocation can't be done with shrinking/expanding already
+    allocated block NULL is returned
+  
+  void mfree(void *ptr);
+  
+    Frees already allocated block, if `ptr` is incorrect one nothing will
+    happen.
+*/
+
+#define _POSIX_SOURCE
+#define _XOPEN_SOURCE
+#include <sys/types.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/time.h>
+#include <asm/page.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include "malloc.h"
+
+
+#define M_DOTRIMMING 1
+#define M_MULTITHREADED 0
+
+#define VALLOC_MSTART  ((void*)0x1c000000)
+#define LARGE_MSTART   ((void*)0x19000000)
+#define HUNK_MSTART    ((void*)0x18000000)
+#define HUNK_MSIZE     M_PAGESIZE
+#define HUNK_ID        0x99171713
+
+/* alignment of allocations > HUNK_THRESHOLD */
+#define MALLOC_ALIGN    4
+
+/* allocations < HUNK_THRESHOLD will not be aligned */
+#define HUNK_THRESHOLD  4
+
+/*up to HUNK_MAXSIZE blocks will be joined together to decrease memory waste*/
+#define HUNK_MAXSIZE 128
+
+/* returns value not less than size, aligned to MALLOC_ALIGN */
+#define ALIGN(size) (((size)+(MALLOC_ALIGN)-1)&(~((MALLOC_ALIGN)-1)))
+
+/* aligns s or p to page boundaries */
+#define PAGE_ALIGN(s) (((s)+M_PAGESIZE-1)&(~(M_PAGESIZE-1)))
+#define PAGE_ALIGNP(p) ((char*)PAGE_ALIGN((unsigned)(p)))
+#define PAGE_DOWNALIGNP(p) ((char*)(((unsigned)(p))&(~(M_PAGESIZE-1))))
+
+/* returns v * 2 for your machine (speed-up) */
+#define MUL2(v)  ((v)*2)
+
+/* does v *= 8 for your machine (speed-up) */
+#define EMUL8(v) v*=8
+
+/* does v/8 for your machind (speed-up) */
+#define DIV8(v)  ((v)/8)
+
+#if M_MULTITHREADED
+#error This version does not support threads
+#else
+typedef int mutex_t;
+#define mutex_lock(x)
+#define mutex_unlock(x)
+#define mutex_init(x)
+#define MUTEX_INITIALIZER 0
+#endif
+
+static int mmalloc_initialized = -1;
+ /* -1 == uninitialized, 0 == initializing, 1 == initialized */
+
+static mutex_t malloc_lock = MUTEX_INITIALIZER;
+
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void*)-1)
+#endif
+
+#if defined(MAP_ANONYMOUS) && !defined(MAP_ANON)
+#define MAP_ANON MAP_ANONYMOUS
+#endif
+
+#ifndef NULL
+#define NULL ((void*)0)
+#endif
+
+/* guess pagesize */
+#ifndef M_PAGESIZE
+ #ifdef _SC_PAGESIZE
+  #ifndef _SC_PAGE_SIZE
+   #define _SC_PAGE_SIZE _SC_PAGESIZE
+  #endif
+ #endif
+ #ifdef _SC_PAGE_SIZE
+  #define M_PAGESIZE sysconf(_SC_PAGE_SIZE)
+ #else /* !_SC_PAGESIZE */
+  #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+   extern size_t getpagesize();
+   #define M_PAGESIZE getpagesize()
+  #else /* !HAVE_GETPAGESIZE */
+   #include <sys/param.h>
+   #ifdef EXEC_PAGESIZE
+    #define M_PAGESIZE EXEC_PAGESIZE
+   #else /* !EXEC_PAGESIZE */
+    #ifdef NBPG
+     #ifndef CLSIZE
+      #define M_PAGESIZE NBPG
+     #else /* !CLSIZE */
+      #define M_PAGESIZE (NBPG*CLSIZE)
+     #endif /* CLSIZE */
+    #else
+     #ifdef NBPC
+      #define M_PAGESIZE NBPC
+     #else /* !NBPC */
+      #ifdef PAGESIZE
+       #define M_PAGESIZE PAGESIZE
+      #else /* !PAGESIZE */
+       #define M_PAGESIZE 4096
+      #endif /* PAGESIZE */
+     #endif /* NBPC */
+    #endif /* NBPG */
+   #endif /* EXEC_PAGESIZE */
+  #endif /* HAVE_GETPAGESIZE */
+ #endif /* _SC_PAGE_SIZE */
+#endif /* defined(M_PAGESIZE) */
+
+/* HUNK MANAGER */
+
+typedef struct Hunk_s  Hunk_t;
+
+struct Hunk_s { /* Hunked block - 8 byte overhead */
+  int id; /* unique id */
+  unsigned int total:12, used:12, size : 8;
+  Hunk_t *next; /* next free in free_h */
+};
+
+static Hunk_t *free_h[HUNK_MAXSIZE+1]; /* free hash */
+int total_h[HUNK_MAXSIZE+1]; /* Hunk_t's `total` member */
+
+#define usagemap(h) (((unsigned char *)(h))+sizeof(Hunk_t))
+#define hunk_ptr(h) (((char*)(h))+sizeof(Hunk_t)+ALIGN(DIV8(h->total+7)))
+#define hunk(h)  ((Hunk_t*)(h))
+
+/* hunk_alloc allocates <= HUNK_MAXSIZE blocks */
+static void *hunk_alloc(int size)
+{
+  Hunk_t *p;
+  unsigned long *cpl;
+  int i, c;
+  
+  if (size >= HUNK_THRESHOLD) size = ALIGN(size);
+  
+  /* Look for already allocated hunkblocks */
+  if ((p = free_h[size]) == NULL)
+  {
+    if ((p = (Hunk_t*)mmap(HUNK_MSTART,HUNK_MSIZE,PROT_READ|PROT_WRITE,
+	MAP_PRIVATE|MAP_ANON,0,0)) == (Hunk_t*)MAP_FAILED)
+      return NULL;
+    memset(p,0,HUNK_MSIZE);
+    p->id = HUNK_ID;
+    p->total = total_h[size];
+    /* p->used = 0; */
+    p->size = size;
+    /* p->next = (Hunk_t*)NULL; */
+    /* memset(usagemap(p), 0, bound); */
+    free_h[size] = p;
+  }
+
+  /* Locate free point in usagemap */
+  for (cpl=(unsigned long*)usagemap(p);*cpl==0xFFFFFFFF;cpl++);
+  i = ((unsigned char *)cpl) - usagemap(p);
+  if (*(unsigned short*)cpl != 0xFFFF) {
+    if (*(unsigned char*)cpl == 0xFF) {
+      c = *(int*)(((unsigned char *)cpl)+1); i++;
+      } else  c = *(int*)(unsigned char *)cpl;
+  } else {
+    i+=2; c = *(((unsigned char *)cpl)+2);
+    if (c == 0xFF) { c = *(int*)(((unsigned char *)cpl)+3); i++; }
+  }
+  EMUL8(i);
+  if ((c & 0xF) == 0xF) { c >>= 4; i+=4; }
+  if ((c & 0x3) == 0x3) { c >>= 2; i+=2; }
+  if (c & 1) i++;
+  
+  usagemap(p)[DIV8(i)] |= (1 << (i & 7)); /* set bit */
+  
+  /* Increment counter and update hashes */
+  if (++p->used == p->total)
+  {
+    free_h[p->size] = p->next;
+    p->next = NULL;
+  }
+  return hunk_ptr(p)+i*p->size;
+}
+
+/* hunk_free frees blocks allocated by hunk_alloc */
+static void hunk_free(char *ptr)
+{
+  unsigned char *up;
+  int i, v;
+  Hunk_t *h;
+  
+  if (!ptr) return;
+  
+  h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
+  
+  /* Validate `ptr` */
+  if (h->id != HUNK_ID) return;
+  v = ptr - hunk_ptr(h);
+  i = v / h->size;
+  if (v % h->size != 0 || i < 0 || i >= h->total) return;
+  
+  /* Update `usagemap` */
+  up = &(usagemap(h)[DIV8(i)]);
+  i = 1 << (i&7);
+  if (!(*up & i)) return;
+  *up ^= i;
+  
+  /* Update hunk counters */
+  if (h->used == h->total)
+  {
+    if (--h->used)
+    { /* insert into free_h */
+      h->next = free_h[h->size];
+      free_h[h->size] = h;
+    } /* else - it will be unmapped */
+  }
+  else
+  {
+    if (!--h->used)
+    { /* delete from free_h - will be bl_freed*/
+      Hunk_t *p, *pp;
+      for (p=free_h[h->size],pp=NULL;p!=h;pp=p,p=p->next);
+      if (!pp)
+	free_h[h->size] = p->next;
+      else
+	pp->next = p->next;
+    }
+  }
+  
+  /* Unmap empty Hunk_t */
+  if (!h->used) munmap((void*)h,HUNK_MSIZE);
+}
+
+/* BLOCK MANAGER */
+
+typedef struct Block_s Block_t;
+
+struct Block_s /* 32-bytes long control structure (if 4-byte aligned) */
+{
+  char *ptr;                        /* pointer to related data */
+  Block_t *next;                    /* next in free_mem list */
+  Block_t *l_free_mem, *r_free_mem; /* left & right subtrees of <free_mem> */
+  Block_t *l_ptrs, *r_ptrs;         /* left & right subtrees of <ptrs> */
+  size_t size;                      /* size - divided by align */
+
+  /* packed 4-byte attributes */
+/* { */
+  char bal_free_mem : 8;   /* balance of <free_mem> subtree */
+  char bal_ptrs : 8;       /* balance of <ptrs> subtree */
+  unsigned int used : 1;   /* used/free state of the block */
+  unsigned int broken : 1; /* 1 if previous block can't be merged with it */
+/* } */
+};
+
+static Block_t *bl_last; /* last mmapped block */
+
+#define bl_get() hunk_alloc(sizeof(Block_t))
+#define bl_rel(p) hunk_free((char*)p)
+
+/* like C++ templates ;-) */
+
+#include "avlmacro.h"
+
+#define FREE_MEM_COMPARE(i,a,b) { i = (a)->size - (b)->size; }
+#define PTRS_COMPARE(i,a,b) { i = (a)->ptr - (b)->ptr; }
+
+Avl_Tree(free_mem,Block_t,free_mem,FREE_MEM_COMPARE)
+Avl_Tree(ptrs,Block_t,ptrs,PTRS_COMPARE)
+
+#define free_mem_root Avl_Root(Block_t, free_mem)
+#define ptrs_root Avl_Root(Block_t, ptrs)
+
+/* pp is freed block */
+#define FREE_MEM_DEL_BLOCK(pp) \
+{ \
+  for (p = free_mem_root;;) \
+    if (p->size > pp->size) p = p->l_free_mem; \
+    else if (p->size < pp->size) p = p->r_free_mem; \
+    else break; \
+  if (p == pp) \
+  { \
+    if (pp->next) free_mem_replace(pp->next); \
+    else free_mem_del(pp); \
+  } \
+  else \
+  { \
+    for (;p->next != pp; p = p->next); \
+    p->next = pp->next; \
+  } \
+}
+
+#define FREE_MEM_INS_BLOCK(pp) \
+{ \
+  if ((p = free_mem_ins(pp)) != NULL)\
+  {\
+    pp->next = p->next;\
+    p->next = pp;\
+  }\
+  else pp->next = NULL; \
+}
+
+/* `b` is current block, `pp` is next block */
+#define COMBINE_BLOCKS(b,pp) \
+{\
+  ptrs_del(pp); \
+  b->size += pp->size; \
+  if (pp == bl_last) bl_last = b; \
+  bl_rel(pp); \
+}
+
+/* initializes new block b */
+#define INIT_BLOCK(b, pppp, sz) \
+{ \
+  memset(b, 0, sizeof(Block_t)); \
+  b->ptr  = pppp; \
+  b->size = sz; \
+  ptrs_ins(b); \
+  FREE_MEM_INS_BLOCK(b); \
+}
+
+/* `b` is current block, `sz` its new size */
+/* block `b` will be splitted to one busy & one free block */
+#define SPLIT_BLOCK(b,sz) \
+{\
+  Block_t *bt; \
+  bt = bl_get(); \
+  INIT_BLOCK(bt, b->ptr + sz, b->size - sz); \
+  b->size = sz; \
+  if (bl_last == b) bl_last = bt; \
+  bl_uncommit(bt);\
+}
+
+/* `b` is current block, `pp` is next free block, `sz` is needed size */
+#define SHRINK_BLOCK(b,pp,sz) \
+{\
+  FREE_MEM_DEL_BLOCK(pp); \
+  pp->ptr = b->ptr + sz; \
+  pp->size += b->size - sz; \
+  b->size = sz; \
+  FREE_MEM_INS_BLOCK(pp); \
+  bl_uncommit(pp); \
+}
+
+static Block_t *bl_mapnew(size_t size)
+{
+  size_t map_size;
+  Block_t *pp, *p;
+  void *pt;
+  
+  map_size = PAGE_ALIGN(size);
+  pt = mmap(LARGE_MSTART,map_size,PROT_READ|PROT_WRITE|PROT_EXEC,
+            MAP_PRIVATE|MAP_ANON,0,0);
+  if (pt == MAP_FAILED) return (Block_t*)NULL;
+  
+  bl_last = pp = bl_get();
+  INIT_BLOCK(pp, (char*)pt, map_size);
+  pp->broken = 1;
+  
+  return pp;
+}
+
+static void bl_uncommit(Block_t *b)
+{
+  char *u_start, *u_end;
+  
+  u_start = PAGE_ALIGNP(b->ptr);
+  u_end   = PAGE_DOWNALIGNP(b->ptr+b->size);
+  if (u_end <= u_start) return;
+
+#if M_DOTRIMMING
+  mmap(u_start,u_end-u_start,PROT_READ|PROT_WRITE|PROT_EXEC,
+    MAP_PRIVATE|MAP_ANON|MAP_FIXED,0,0);
+#endif
+}
+
+/* requested size must be aligned to ALIGNMENT */
+static Block_t *bl_alloc(size_t size)
+{
+  Block_t *p, *pp;
+  
+  /* try to find needed space in existing memory */
+  for (p = free_mem_root, pp = NULL;p;)
+  {
+    if (p->size > size) { pp = p; p = p->l_free_mem; }
+    else if (p->size < size) p = p->r_free_mem;
+    else { pp = p; break; }
+  }
+
+  if (!pp)
+  { /* map some memory */
+    if (!bl_last)
+    { /* just do initial mmap */
+      pp = bl_mapnew(size);
+      if (!pp) return NULL;
+    }
+    else if (!bl_last->used)
+    { /* try growing last unused */
+      if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
+ PAGE_ALIGNP(bl_last->ptr+bl_last->size) - PAGE_DOWNALIGNP(bl_last->ptr),
+ PAGE_ALIGNP(bl_last->ptr+size)-PAGE_DOWNALIGNP(bl_last->ptr),
+        0) == MAP_FAILED)
+      { /* unable to grow -- initiate new block */
+	pp = bl_mapnew(size);
+	if (!pp) return NULL;
+      }
+      else
+      {
+	pp = bl_last;
+	FREE_MEM_DEL_BLOCK(pp);
+	pp->size = PAGE_ALIGNP(pp->ptr+size) - pp->ptr;
+	FREE_MEM_INS_BLOCK(pp);
+      }
+    }
+    else
+    { /* bl_last is used block */
+      if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
+PAGE_ALIGNP(bl_last->ptr+bl_last->size)-PAGE_DOWNALIGNP(bl_last->ptr),
+PAGE_ALIGNP(bl_last->ptr+bl_last->size+size) - PAGE_DOWNALIGNP(bl_last->ptr),
+	0) == MAP_FAILED)
+      {
+	pp = bl_mapnew(size);
+	if (!pp) return NULL;
+      }
+      else
+      {
+	pp = bl_get();
+	INIT_BLOCK(pp,bl_last->ptr+bl_last->size,
+ PAGE_ALIGNP(bl_last->ptr+bl_last->size+size)-bl_last->ptr-bl_last->size);
+	bl_last = pp;
+      }
+    }
+  }
+  
+  /* just delete this node from free_mem tree */
+  if (pp->next) free_mem_replace(pp->next); else free_mem_del(pp);
+  pp->used = 1;
+  
+  if (pp->size - size > MALLOC_ALIGN)
+  { /* this block can be splitted (it is unused,not_broken) */
+    SPLIT_BLOCK(pp,size);
+  }
+  
+  return pp;
+}
+
+static void bl_free(Block_t *b)
+{
+  Block_t *p, *bl_next, *bl_prev;
+  
+  /* Look for blocks before & after `b` */
+  for (p = ptrs_root, bl_next = NULL, bl_prev = NULL; p;)
+  {
+    if      (p->ptr > b->ptr) { bl_next = p; p = p->l_ptrs; }
+    else if (p->ptr < b->ptr) { bl_prev = p; p = p->r_ptrs; }
+    else break;
+  }
+  if (b->l_ptrs)
+    for (bl_prev = b->l_ptrs; bl_prev->r_ptrs; bl_prev = bl_prev->r_ptrs);
+  if (b->r_ptrs)
+    for (bl_next = b->r_ptrs; bl_next->l_ptrs; bl_next = bl_next->l_ptrs);
+  
+  if (bl_next && !bl_next->broken && !bl_next->used)
+  {
+    FREE_MEM_DEL_BLOCK(bl_next)
+    COMBINE_BLOCKS(b,bl_next)
+  }
+
+  if (bl_prev && !b->broken && !bl_prev->used)
+  {
+    FREE_MEM_DEL_BLOCK(bl_prev)
+    COMBINE_BLOCKS(bl_prev,b)
+    b = bl_prev;
+  }
+  
+  b->used = 0;
+  FREE_MEM_INS_BLOCK(b)
+  bl_uncommit(b);
+}
+
+static void malloc_init(void)
+{
+  int i, mapsize, x, old_x, gcount;
+  
+  mapsize = M_PAGESIZE;
+  
+  mmalloc_initialized = 0;
+  bl_last = NULL;
+  free_mem_root = NULL;
+  ptrs_root = NULL;
+  mapsize -= sizeof(Hunk_t);
+  for (i = 1; i <= HUNK_MAXSIZE; i++)
+  {
+    free_h[i] = (Hunk_t*)NULL;
+    for (x = mapsize/i, gcount = 0, old_x = 0; old_x != x;)
+    {
+      old_x = x;
+      x = (mapsize - ALIGN(DIV8(old_x+7)))/i;
+      if (gcount > 1 && x*i + ALIGN(DIV8(x+7)) <= mapsize) break;
+      if (x*i + ALIGN(DIV8(x+7)) > mapsize) gcount++;
+    }
+    total_h[i] = x;
+  }
+  mutex_init(&malloc_lock);
+  mmalloc_initialized = 1;
+}
+
+static void *mmalloc(size_t size)
+{
+  void *p;
+  
+  if (size == 0) return NULL;
+ 
+  if (mmalloc_initialized < 0) malloc_init();
+  if (mmalloc_initialized) mutex_lock(&malloc_lock);
+  
+  if (size <= HUNK_MAXSIZE)
+    p = hunk_alloc(size);
+  else
+  {
+    if ((p = bl_alloc(ALIGN(size))) != NULL)
+      p = ((Block_t*)p)->ptr;
+  }
+  
+  if (mmalloc_initialized) mutex_unlock(&malloc_lock);
+  
+  return p;
+}
+
+static void mfree(void *ptr)
+{
+  Block_t *p, *best;
+ 
+  if (mmalloc_initialized < 0) return;
+  if (mmalloc_initialized) mutex_lock(&malloc_lock);
+  
+  for (p = ptrs_root, best = NULL;p;)
+  {
+    if (p->ptr > (char*)ptr) p = p->l_ptrs;
+    else { best = p; p = p->r_ptrs; }
+  }
+  
+  if (!best || !best->used || best->ptr != (char*)ptr)
+  {
+    hunk_free(ptr);
+    if (mmalloc_initialized) mutex_unlock(&malloc_lock);
+    return;
+  }
+  
+  bl_free(best);
+
+  if (mmalloc_initialized) mutex_unlock(&malloc_lock);
+}
+
+static void *mrealloc_no_move(void *ptr, size_t size)
+{
+  Block_t *p, *best, *next;
+  
+  if (size <= HUNK_MAXSIZE) return NULL;
+  
+  if (mmalloc_initialized <= 0) return mmalloc(size);
+  
+  mutex_lock(&malloc_lock);
+  
+  /* Locate block */
+  for (p = ptrs_root, best = NULL;p;)
+  {
+    if (p->ptr > (char*)ptr) p = p->l_ptrs;
+    else { best = p; p = p->r_ptrs; }
+  }
+  
+  if (!best || !best->used || best->ptr != (char*)ptr)
+  {
+    mutex_unlock(&malloc_lock);
+    return NULL;
+  }
+  
+  size = ALIGN(size);
+  
+  if (size == best->size)
+  {
+    mutex_unlock(&malloc_lock);
+    return ptr;
+  }
+  
+  if (best->r_ptrs) /* get block just after */
+    for (next = best->r_ptrs; next->l_ptrs; next = next->l_ptrs);
+  else
+    for (p = ptrs_root, next = NULL;p;)
+    {
+      if (p->ptr > best->ptr) { next = p; p = p->l_ptrs; }
+      else if (p->ptr < best->ptr) p = p->r_ptrs;
+      else break;
+    }
+  
+  if (size < best->size)
+  { /* shrink block */
+    if (!next || next->used || next->broken)
+    {
+      if (best->size - size > MALLOC_ALIGN)
+      { /* do split */
+	SPLIT_BLOCK(best,size);
+      }
+    }
+    else
+    { /* just move border of next block */
+      SHRINK_BLOCK(best,next,size);
+    }
+  }
+  else if (next && !next->broken && !next->used)
+  { /* can expand */
+    if (best->size + next->size > size + HUNK_MAXSIZE)
+    { /* shrink next free block */
+      SHRINK_BLOCK(best,next,size);
+    }
+    else if (best->size + next->size >= size)
+    { /* combine blocks (eat next one) */
+      FREE_MEM_DEL_BLOCK(next);
+      COMBINE_BLOCKS(best,next);
+    }
+    else
+    { /* not enough memory in next block */
+      mutex_unlock(&malloc_lock);
+      return NULL;
+    }
+  }
+  else
+  { /* no next block */
+    mutex_unlock(&malloc_lock);
+    return NULL;
+  }
+  mutex_unlock(&malloc_lock);
+  return best->ptr;
+}
+
+static void *mrealloc(void *ptr, size_t size)
+{
+  void *tmp;
+  
+  tmp = mrealloc_no_move(ptr, size);
+  
+  if (!tmp)
+  {
+    Block_t *p, *best;
+    
+    mutex_lock(&malloc_lock);
+
+    for (p = ptrs_root, best = NULL;p;)
+    {
+      if (p->ptr > (char*)ptr) p = p->l_ptrs;
+      else { best = p; p = p->r_ptrs; }
+    }
+    
+    if (!best || !best->used || best->ptr != (char*)ptr)
+    {
+      if (ptr)
+      {
+	Hunk_t *h;
+        h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
+        if (h->id == HUNK_ID)
+        {
+	  mutex_unlock(&malloc_lock);
+	  if ((size >= HUNK_THRESHOLD && ALIGN(size) == h->size) ||
+	      size == h->size) return ptr;
+	  if ((tmp = mmalloc(size)) == NULL) return NULL;
+	  mutex_lock(&malloc_lock);
+	  memcpy(tmp,ptr,((size<h->size)?size:h->size));
+	  hunk_free(ptr);
+	  mutex_unlock(&malloc_lock);
+	  return tmp;
+	}
+      }
+      mutex_unlock(&malloc_lock);
+      return mmalloc(size);
+    }
+    
+    mutex_unlock(&malloc_lock);
+    
+    /* copy whole block */
+    if ((tmp = mmalloc(size)) == NULL) return NULL;
+    memcpy(tmp,ptr,((size<best->size)?size:best->size));
+    
+    mutex_lock(&malloc_lock);
+    bl_free(best);
+    mutex_unlock(&malloc_lock);
+  }
+  return tmp;
+}
+
+static void *mcalloc(size_t unit, size_t quantity)
+{
+  void *p;
+  
+  unit *= quantity;
+ 
+  if ((p = mmalloc(unit)) == NULL) return NULL;
+  memset(p,0,unit);
+  return p;
+}
+
+/* PUBLIC functions */
+
+void *malloc(size_t size) {
+  return mmalloc(size);
+}
+
+void *calloc(size_t unit, size_t quantity) {
+  return mcalloc(unit,quantity);
+}
+
+void *realloc(void *ptr, size_t size) {
+  return mrealloc(ptr,size);
+}
+
+void free(void *ptr) {
+  return mfree(ptr);
+}
+
+