malloc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /*
  2. mmalloc - heap manager based on heavy use of virtual memory management.
  3. Copyright (C) 1998 Valery Shchedrin
  4. This library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Library General Public
  6. License as published by the Free Software Foundation; either
  7. version 2 of the License, or (at your option) any later version.
  8. This library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Library General Public License for more details.
  12. You should have received a copy of the GNU Library General Public
  13. License along with this library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  15. MA 02111-1307, USA
  16. Public Functions:
  17. void *mmalloc(size_t size);
  18. Allocates `size` bytes
  19. returns NULL if no free memory available
  20. void *mcalloc(size_t unit, size_t quantity);
  21. Allocates `quantity*unit` zeroed bytes via internal malloc call
  22. void *mrealloc(void *ptr, size_t size);
  23. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  24. then it works as malloc. NULL is returned if no free memory available
  25. void *mrealloc_no_move(void *ptr, size_t size);
  26. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  27. or if reallocation can't be done with shrinking/expanding already
  28. allocated block NULL is returned
  29. void mfree(void *ptr);
  30. Frees already allocated block, if `ptr` is incorrect one nothing will
  31. happen.
  32. */
  33. #define _POSIX_SOURCE
  34. #define _XOPEN_SOURCE
  35. #include <sys/types.h>
  36. #include <unistd.h>
  37. #include <limits.h>
  38. #include <sys/time.h>
  39. #include <asm/page.h>
  40. #include <unistd.h>
  41. #include <sys/mman.h>
  42. #include <string.h>
  43. #include "malloc.h"
  44. #define M_DOTRIMMING 1
  45. #define M_MULTITHREADED 0
  46. #define VALLOC_MSTART ((void*)0x1c000000)
  47. #define LARGE_MSTART ((void*)0x19000000)
  48. #define HUNK_MSTART ((void*)0x18000000)
  49. #define HUNK_MSIZE M_PAGESIZE
  50. #define HUNK_ID 0x99171713
  51. /* alignment of allocations > HUNK_THRESHOLD */
  52. #define MALLOC_ALIGN 4
  53. /* allocations < HUNK_THRESHOLD will not be aligned */
  54. #define HUNK_THRESHOLD 4
  55. /*up to HUNK_MAXSIZE blocks will be joined together to decrease memory waste*/
  56. #define HUNK_MAXSIZE 128
  57. /* returns value not less than size, aligned to MALLOC_ALIGN */
  58. #define ALIGN(size) (((size)+(MALLOC_ALIGN)-1)&(~((MALLOC_ALIGN)-1)))
  59. /* aligns s or p to page boundaries */
  60. #define PAGE_ALIGN(s) (((s)+M_PAGESIZE-1)&(~(M_PAGESIZE-1)))
  61. #define PAGE_ALIGNP(p) ((char*)PAGE_ALIGN((unsigned)(p)))
  62. #define PAGE_DOWNALIGNP(p) ((char*)(((unsigned)(p))&(~(M_PAGESIZE-1))))
  63. /* returns v * 2 for your machine (speed-up) */
  64. #define MUL2(v) ((v)*2)
  65. /* does v *= 8 for your machine (speed-up) */
  66. #define EMUL8(v) v*=8
  67. /* does v/8 for your machind (speed-up) */
  68. #define DIV8(v) ((v)/8)
  69. #if M_MULTITHREADED
  70. #error This version does not support threads
  71. #else
  72. typedef int mutex_t;
  73. #define mutex_lock(x)
  74. #define mutex_unlock(x)
  75. #define mutex_init(x)
  76. #define MUTEX_INITIALIZER 0
  77. #endif
  78. static int mmalloc_initialized = -1;
  79. /* -1 == uninitialized, 0 == initializing, 1 == initialized */
  80. static mutex_t malloc_lock = MUTEX_INITIALIZER;
  81. #ifndef MAP_FAILED
  82. #define MAP_FAILED ((void*)-1)
  83. #endif
  84. #if defined(MAP_ANONYMOUS) && !defined(MAP_ANON)
  85. #define MAP_ANON MAP_ANONYMOUS
  86. #endif
  87. #ifndef NULL
  88. #define NULL ((void*)0)
  89. #endif
  90. /* guess pagesize */
  91. #ifndef M_PAGESIZE
  92. #ifdef _SC_PAGESIZE
  93. #ifndef _SC_PAGE_SIZE
  94. #define _SC_PAGE_SIZE _SC_PAGESIZE
  95. #endif
  96. #endif
  97. #ifdef _SC_PAGE_SIZE
  98. #define M_PAGESIZE sysconf(_SC_PAGE_SIZE)
  99. #else /* !_SC_PAGESIZE */
  100. #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  101. extern size_t getpagesize();
  102. #define M_PAGESIZE getpagesize()
  103. #else /* !HAVE_GETPAGESIZE */
  104. #include <sys/param.h>
  105. #ifdef EXEC_PAGESIZE
  106. #define M_PAGESIZE EXEC_PAGESIZE
  107. #else /* !EXEC_PAGESIZE */
  108. #ifdef NBPG
  109. #ifndef CLSIZE
  110. #define M_PAGESIZE NBPG
  111. #else /* !CLSIZE */
  112. #define M_PAGESIZE (NBPG*CLSIZE)
  113. #endif /* CLSIZE */
  114. #else
  115. #ifdef NBPC
  116. #define M_PAGESIZE NBPC
  117. #else /* !NBPC */
  118. #ifdef PAGESIZE
  119. #define M_PAGESIZE PAGESIZE
  120. #else /* !PAGESIZE */
  121. #define M_PAGESIZE 4096
  122. #endif /* PAGESIZE */
  123. #endif /* NBPC */
  124. #endif /* NBPG */
  125. #endif /* EXEC_PAGESIZE */
  126. #endif /* HAVE_GETPAGESIZE */
  127. #endif /* _SC_PAGE_SIZE */
  128. #endif /* defined(M_PAGESIZE) */
  129. /* HUNK MANAGER */
  130. typedef struct Hunk_s Hunk_t;
  131. struct Hunk_s { /* Hunked block - 8 byte overhead */
  132. int id; /* unique id */
  133. unsigned int total:12, used:12, size : 8;
  134. Hunk_t *next; /* next free in free_h */
  135. };
  136. static Hunk_t *free_h[HUNK_MAXSIZE+1]; /* free hash */
  137. int total_h[HUNK_MAXSIZE+1]; /* Hunk_t's `total` member */
  138. #define usagemap(h) (((unsigned char *)(h))+sizeof(Hunk_t))
  139. #define hunk_ptr(h) (((char*)(h))+sizeof(Hunk_t)+ALIGN(DIV8(h->total+7)))
  140. #define hunk(h) ((Hunk_t*)(h))
  141. /* hunk_alloc allocates <= HUNK_MAXSIZE blocks */
  142. static void *hunk_alloc(int size)
  143. {
  144. Hunk_t *p;
  145. unsigned long *cpl;
  146. int i, c;
  147. if (size >= HUNK_THRESHOLD) size = ALIGN(size);
  148. /* Look for already allocated hunkblocks */
  149. if ((p = free_h[size]) == NULL)
  150. {
  151. if ((p = (Hunk_t*)mmap(HUNK_MSTART,HUNK_MSIZE,PROT_READ|PROT_WRITE,
  152. MAP_PRIVATE|MAP_ANON,0,0)) == (Hunk_t*)MAP_FAILED)
  153. return NULL;
  154. memset(p,0,HUNK_MSIZE);
  155. p->id = HUNK_ID;
  156. p->total = total_h[size];
  157. /* p->used = 0; */
  158. p->size = size;
  159. /* p->next = (Hunk_t*)NULL; */
  160. /* memset(usagemap(p), 0, bound); */
  161. free_h[size] = p;
  162. }
  163. /* Locate free point in usagemap */
  164. for (cpl=(unsigned long*)usagemap(p);*cpl==0xFFFFFFFF;cpl++);
  165. i = ((unsigned char *)cpl) - usagemap(p);
  166. if (*(unsigned short*)cpl != 0xFFFF) {
  167. if (*(unsigned char*)cpl == 0xFF) {
  168. c = *(int*)(((unsigned char *)cpl)+1); i++;
  169. } else c = *(int*)(unsigned char *)cpl;
  170. } else {
  171. i+=2; c = *(((unsigned char *)cpl)+2);
  172. if (c == 0xFF) { c = *(int*)(((unsigned char *)cpl)+3); i++; }
  173. }
  174. EMUL8(i);
  175. if ((c & 0xF) == 0xF) { c >>= 4; i+=4; }
  176. if ((c & 0x3) == 0x3) { c >>= 2; i+=2; }
  177. if (c & 1) i++;
  178. usagemap(p)[DIV8(i)] |= (1 << (i & 7)); /* set bit */
  179. /* Increment counter and update hashes */
  180. if (++p->used == p->total)
  181. {
  182. free_h[p->size] = p->next;
  183. p->next = NULL;
  184. }
  185. return hunk_ptr(p)+i*p->size;
  186. }
  187. /* hunk_free frees blocks allocated by hunk_alloc */
  188. static void hunk_free(char *ptr)
  189. {
  190. unsigned char *up;
  191. int i, v;
  192. Hunk_t *h;
  193. if (!ptr) return;
  194. h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
  195. /* Validate `ptr` */
  196. if (h->id != HUNK_ID) return;
  197. v = ptr - hunk_ptr(h);
  198. i = v / h->size;
  199. if (v % h->size != 0 || i < 0 || i >= h->total) return;
  200. /* Update `usagemap` */
  201. up = &(usagemap(h)[DIV8(i)]);
  202. i = 1 << (i&7);
  203. if (!(*up & i)) return;
  204. *up ^= i;
  205. /* Update hunk counters */
  206. if (h->used == h->total)
  207. {
  208. if (--h->used)
  209. { /* insert into free_h */
  210. h->next = free_h[h->size];
  211. free_h[h->size] = h;
  212. } /* else - it will be unmapped */
  213. }
  214. else
  215. {
  216. if (!--h->used)
  217. { /* delete from free_h - will be bl_freed*/
  218. Hunk_t *p, *pp;
  219. for (p=free_h[h->size],pp=NULL;p!=h;pp=p,p=p->next);
  220. if (!pp)
  221. free_h[h->size] = p->next;
  222. else
  223. pp->next = p->next;
  224. }
  225. }
  226. /* Unmap empty Hunk_t */
  227. if (!h->used) munmap((void*)h,HUNK_MSIZE);
  228. }
  229. /* BLOCK MANAGER */
  230. typedef struct Block_s Block_t;
  231. struct Block_s /* 32-bytes long control structure (if 4-byte aligned) */
  232. {
  233. char *ptr; /* pointer to related data */
  234. Block_t *next; /* next in free_mem list */
  235. Block_t *l_free_mem, *r_free_mem; /* left & right subtrees of <free_mem> */
  236. Block_t *l_ptrs, *r_ptrs; /* left & right subtrees of <ptrs> */
  237. size_t size; /* size - divided by align */
  238. /* packed 4-byte attributes */
  239. /* { */
  240. char bal_free_mem : 8; /* balance of <free_mem> subtree */
  241. char bal_ptrs : 8; /* balance of <ptrs> subtree */
  242. unsigned int used : 1; /* used/free state of the block */
  243. unsigned int broken : 1; /* 1 if previous block can't be merged with it */
  244. /* } */
  245. };
  246. static Block_t *bl_last; /* last mmapped block */
  247. #define bl_get() hunk_alloc(sizeof(Block_t))
  248. #define bl_rel(p) hunk_free((char*)p)
  249. /* like C++ templates ;-) */
  250. #include "avlmacro.h"
  251. #define FREE_MEM_COMPARE(i,a,b) { i = (a)->size - (b)->size; }
  252. #define PTRS_COMPARE(i,a,b) { i = (a)->ptr - (b)->ptr; }
  253. Avl_Tree(free_mem,Block_t,free_mem,FREE_MEM_COMPARE)
  254. Avl_Tree(ptrs,Block_t,ptrs,PTRS_COMPARE)
  255. #define free_mem_root Avl_Root(Block_t, free_mem)
  256. #define ptrs_root Avl_Root(Block_t, ptrs)
  257. /* pp is freed block */
  258. #define FREE_MEM_DEL_BLOCK(pp) \
  259. { \
  260. for (p = free_mem_root;;) \
  261. if (p->size > pp->size) p = p->l_free_mem; \
  262. else if (p->size < pp->size) p = p->r_free_mem; \
  263. else break; \
  264. if (p == pp) \
  265. { \
  266. if (pp->next) free_mem_replace(pp->next); \
  267. else free_mem_del(pp); \
  268. } \
  269. else \
  270. { \
  271. for (;p->next != pp; p = p->next); \
  272. p->next = pp->next; \
  273. } \
  274. }
  275. #define FREE_MEM_INS_BLOCK(pp) \
  276. { \
  277. if ((p = free_mem_ins(pp)) != NULL)\
  278. {\
  279. pp->next = p->next;\
  280. p->next = pp;\
  281. }\
  282. else pp->next = NULL; \
  283. }
  284. /* `b` is current block, `pp` is next block */
  285. #define COMBINE_BLOCKS(b,pp) \
  286. {\
  287. ptrs_del(pp); \
  288. b->size += pp->size; \
  289. if (pp == bl_last) bl_last = b; \
  290. bl_rel(pp); \
  291. }
  292. /* initializes new block b */
  293. #define INIT_BLOCK(b, pppp, sz) \
  294. { \
  295. memset(b, 0, sizeof(Block_t)); \
  296. b->ptr = pppp; \
  297. b->size = sz; \
  298. ptrs_ins(b); \
  299. FREE_MEM_INS_BLOCK(b); \
  300. }
  301. /* `b` is current block, `sz` its new size */
  302. /* block `b` will be splitted to one busy & one free block */
  303. #define SPLIT_BLOCK(b,sz) \
  304. {\
  305. Block_t *bt; \
  306. bt = bl_get(); \
  307. INIT_BLOCK(bt, b->ptr + sz, b->size - sz); \
  308. b->size = sz; \
  309. if (bl_last == b) bl_last = bt; \
  310. bl_uncommit(bt);\
  311. }
  312. /* `b` is current block, `pp` is next free block, `sz` is needed size */
  313. #define SHRINK_BLOCK(b,pp,sz) \
  314. {\
  315. FREE_MEM_DEL_BLOCK(pp); \
  316. pp->ptr = b->ptr + sz; \
  317. pp->size += b->size - sz; \
  318. b->size = sz; \
  319. FREE_MEM_INS_BLOCK(pp); \
  320. bl_uncommit(pp); \
  321. }
  322. static Block_t *bl_mapnew(size_t size)
  323. {
  324. size_t map_size;
  325. Block_t *pp, *p;
  326. void *pt;
  327. map_size = PAGE_ALIGN(size);
  328. pt = mmap(LARGE_MSTART,map_size,PROT_READ|PROT_WRITE|PROT_EXEC,
  329. MAP_PRIVATE|MAP_ANON,0,0);
  330. if (pt == MAP_FAILED) return (Block_t*)NULL;
  331. bl_last = pp = bl_get();
  332. INIT_BLOCK(pp, (char*)pt, map_size);
  333. pp->broken = 1;
  334. return pp;
  335. }
  336. static void bl_uncommit(Block_t *b)
  337. {
  338. char *u_start, *u_end;
  339. u_start = PAGE_ALIGNP(b->ptr);
  340. u_end = PAGE_DOWNALIGNP(b->ptr+b->size);
  341. if (u_end <= u_start) return;
  342. #if M_DOTRIMMING
  343. mmap(u_start,u_end-u_start,PROT_READ|PROT_WRITE|PROT_EXEC,
  344. MAP_PRIVATE|MAP_ANON|MAP_FIXED,0,0);
  345. #endif
  346. }
  347. /* requested size must be aligned to ALIGNMENT */
  348. static Block_t *bl_alloc(size_t size)
  349. {
  350. Block_t *p, *pp;
  351. /* try to find needed space in existing memory */
  352. for (p = free_mem_root, pp = NULL;p;)
  353. {
  354. if (p->size > size) { pp = p; p = p->l_free_mem; }
  355. else if (p->size < size) p = p->r_free_mem;
  356. else { pp = p; break; }
  357. }
  358. if (!pp)
  359. { /* map some memory */
  360. if (!bl_last)
  361. { /* just do initial mmap */
  362. pp = bl_mapnew(size);
  363. if (!pp) return NULL;
  364. }
  365. else if (!bl_last->used)
  366. { /* try growing last unused */
  367. if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
  368. PAGE_ALIGNP(bl_last->ptr+bl_last->size) - PAGE_DOWNALIGNP(bl_last->ptr),
  369. PAGE_ALIGNP(bl_last->ptr+size)-PAGE_DOWNALIGNP(bl_last->ptr),
  370. 0) == MAP_FAILED)
  371. { /* unable to grow -- initiate new block */
  372. pp = bl_mapnew(size);
  373. if (!pp) return NULL;
  374. }
  375. else
  376. {
  377. pp = bl_last;
  378. FREE_MEM_DEL_BLOCK(pp);
  379. pp->size = PAGE_ALIGNP(pp->ptr+size) - pp->ptr;
  380. FREE_MEM_INS_BLOCK(pp);
  381. }
  382. }
  383. else
  384. { /* bl_last is used block */
  385. if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
  386. PAGE_ALIGNP(bl_last->ptr+bl_last->size)-PAGE_DOWNALIGNP(bl_last->ptr),
  387. PAGE_ALIGNP(bl_last->ptr+bl_last->size+size) - PAGE_DOWNALIGNP(bl_last->ptr),
  388. 0) == MAP_FAILED)
  389. {
  390. pp = bl_mapnew(size);
  391. if (!pp) return NULL;
  392. }
  393. else
  394. {
  395. pp = bl_get();
  396. INIT_BLOCK(pp,bl_last->ptr+bl_last->size,
  397. PAGE_ALIGNP(bl_last->ptr+bl_last->size+size)-bl_last->ptr-bl_last->size);
  398. bl_last = pp;
  399. }
  400. }
  401. }
  402. /* just delete this node from free_mem tree */
  403. if (pp->next) free_mem_replace(pp->next); else free_mem_del(pp);
  404. pp->used = 1;
  405. if (pp->size - size > MALLOC_ALIGN)
  406. { /* this block can be splitted (it is unused,not_broken) */
  407. SPLIT_BLOCK(pp,size);
  408. }
  409. return pp;
  410. }
  411. static void bl_free(Block_t *b)
  412. {
  413. Block_t *p, *bl_next, *bl_prev;
  414. /* Look for blocks before & after `b` */
  415. for (p = ptrs_root, bl_next = NULL, bl_prev = NULL; p;)
  416. {
  417. if (p->ptr > b->ptr) { bl_next = p; p = p->l_ptrs; }
  418. else if (p->ptr < b->ptr) { bl_prev = p; p = p->r_ptrs; }
  419. else break;
  420. }
  421. if (b->l_ptrs)
  422. for (bl_prev = b->l_ptrs; bl_prev->r_ptrs; bl_prev = bl_prev->r_ptrs);
  423. if (b->r_ptrs)
  424. for (bl_next = b->r_ptrs; bl_next->l_ptrs; bl_next = bl_next->l_ptrs);
  425. if (bl_next && !bl_next->broken && !bl_next->used)
  426. {
  427. FREE_MEM_DEL_BLOCK(bl_next)
  428. COMBINE_BLOCKS(b,bl_next)
  429. }
  430. if (bl_prev && !b->broken && !bl_prev->used)
  431. {
  432. FREE_MEM_DEL_BLOCK(bl_prev)
  433. COMBINE_BLOCKS(bl_prev,b)
  434. b = bl_prev;
  435. }
  436. b->used = 0;
  437. FREE_MEM_INS_BLOCK(b)
  438. bl_uncommit(b);
  439. }
  440. static void malloc_init(void)
  441. {
  442. int i, mapsize, x, old_x, gcount;
  443. mapsize = M_PAGESIZE;
  444. mmalloc_initialized = 0;
  445. bl_last = NULL;
  446. free_mem_root = NULL;
  447. ptrs_root = NULL;
  448. mapsize -= sizeof(Hunk_t);
  449. for (i = 1; i <= HUNK_MAXSIZE; i++)
  450. {
  451. free_h[i] = (Hunk_t*)NULL;
  452. for (x = mapsize/i, gcount = 0, old_x = 0; old_x != x;)
  453. {
  454. old_x = x;
  455. x = (mapsize - ALIGN(DIV8(old_x+7)))/i;
  456. if (gcount > 1 && x*i + ALIGN(DIV8(x+7)) <= mapsize) break;
  457. if (x*i + ALIGN(DIV8(x+7)) > mapsize) gcount++;
  458. }
  459. total_h[i] = x;
  460. }
  461. mutex_init(&malloc_lock);
  462. mmalloc_initialized = 1;
  463. }
  464. static void *mmalloc(size_t size)
  465. {
  466. void *p;
  467. if (size == 0) return NULL;
  468. if (mmalloc_initialized < 0) malloc_init();
  469. if (mmalloc_initialized) mutex_lock(&malloc_lock);
  470. if (size <= HUNK_MAXSIZE)
  471. p = hunk_alloc(size);
  472. else
  473. {
  474. if ((p = bl_alloc(ALIGN(size))) != NULL)
  475. p = ((Block_t*)p)->ptr;
  476. }
  477. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  478. return p;
  479. }
  480. static void mfree(void *ptr)
  481. {
  482. Block_t *p, *best;
  483. if (mmalloc_initialized < 0) return;
  484. if (mmalloc_initialized) mutex_lock(&malloc_lock);
  485. for (p = ptrs_root, best = NULL;p;)
  486. {
  487. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  488. else { best = p; p = p->r_ptrs; }
  489. }
  490. if (!best || !best->used || best->ptr != (char*)ptr)
  491. {
  492. hunk_free(ptr);
  493. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  494. return;
  495. }
  496. bl_free(best);
  497. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  498. }
  499. static void *mrealloc_no_move(void *ptr, size_t size)
  500. {
  501. Block_t *p, *best, *next;
  502. if (size <= HUNK_MAXSIZE) return NULL;
  503. if (mmalloc_initialized <= 0) return mmalloc(size);
  504. mutex_lock(&malloc_lock);
  505. /* Locate block */
  506. for (p = ptrs_root, best = NULL;p;)
  507. {
  508. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  509. else { best = p; p = p->r_ptrs; }
  510. }
  511. if (!best || !best->used || best->ptr != (char*)ptr)
  512. {
  513. mutex_unlock(&malloc_lock);
  514. return NULL;
  515. }
  516. size = ALIGN(size);
  517. if (size == best->size)
  518. {
  519. mutex_unlock(&malloc_lock);
  520. return ptr;
  521. }
  522. if (best->r_ptrs) /* get block just after */
  523. for (next = best->r_ptrs; next->l_ptrs; next = next->l_ptrs);
  524. else
  525. for (p = ptrs_root, next = NULL;p;)
  526. {
  527. if (p->ptr > best->ptr) { next = p; p = p->l_ptrs; }
  528. else if (p->ptr < best->ptr) p = p->r_ptrs;
  529. else break;
  530. }
  531. if (size < best->size)
  532. { /* shrink block */
  533. if (!next || next->used || next->broken)
  534. {
  535. if (best->size - size > MALLOC_ALIGN)
  536. { /* do split */
  537. SPLIT_BLOCK(best,size);
  538. }
  539. }
  540. else
  541. { /* just move border of next block */
  542. SHRINK_BLOCK(best,next,size);
  543. }
  544. }
  545. else if (next && !next->broken && !next->used)
  546. { /* can expand */
  547. if (best->size + next->size > size + HUNK_MAXSIZE)
  548. { /* shrink next free block */
  549. SHRINK_BLOCK(best,next,size);
  550. }
  551. else if (best->size + next->size >= size)
  552. { /* combine blocks (eat next one) */
  553. FREE_MEM_DEL_BLOCK(next);
  554. COMBINE_BLOCKS(best,next);
  555. }
  556. else
  557. { /* not enough memory in next block */
  558. mutex_unlock(&malloc_lock);
  559. return NULL;
  560. }
  561. }
  562. else
  563. { /* no next block */
  564. mutex_unlock(&malloc_lock);
  565. return NULL;
  566. }
  567. mutex_unlock(&malloc_lock);
  568. return best->ptr;
  569. }
  570. static void *mrealloc(void *ptr, size_t size)
  571. {
  572. void *tmp;
  573. tmp = mrealloc_no_move(ptr, size);
  574. if (!tmp)
  575. {
  576. Block_t *p, *best;
  577. mutex_lock(&malloc_lock);
  578. for (p = ptrs_root, best = NULL;p;)
  579. {
  580. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  581. else { best = p; p = p->r_ptrs; }
  582. }
  583. if (!best || !best->used || best->ptr != (char*)ptr)
  584. {
  585. if (ptr)
  586. {
  587. Hunk_t *h;
  588. h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
  589. if (h->id == HUNK_ID)
  590. {
  591. mutex_unlock(&malloc_lock);
  592. if ((size >= HUNK_THRESHOLD && ALIGN(size) == h->size) ||
  593. size == h->size) return ptr;
  594. if ((tmp = mmalloc(size)) == NULL) return NULL;
  595. mutex_lock(&malloc_lock);
  596. memcpy(tmp,ptr,((size<h->size)?size:h->size));
  597. hunk_free(ptr);
  598. mutex_unlock(&malloc_lock);
  599. return tmp;
  600. }
  601. }
  602. mutex_unlock(&malloc_lock);
  603. return mmalloc(size);
  604. }
  605. mutex_unlock(&malloc_lock);
  606. /* copy whole block */
  607. if ((tmp = mmalloc(size)) == NULL) return NULL;
  608. memcpy(tmp,ptr,((size<best->size)?size:best->size));
  609. mutex_lock(&malloc_lock);
  610. bl_free(best);
  611. mutex_unlock(&malloc_lock);
  612. }
  613. return tmp;
  614. }
  615. static void *mcalloc(size_t unit, size_t quantity)
  616. {
  617. void *p;
  618. unit *= quantity;
  619. if ((p = mmalloc(unit)) == NULL) return NULL;
  620. memset(p,0,unit);
  621. return p;
  622. }
  623. /* PUBLIC functions */
  624. void *malloc(size_t size) {
  625. return mmalloc(size);
  626. }
  627. void *calloc(size_t unit, size_t quantity) {
  628. return mcalloc(unit,quantity);
  629. }
  630. void *realloc(void *ptr, size_t size) {
  631. return mrealloc(ptr,size);
  632. }
  633. void free(void *ptr) {
  634. return mfree(ptr);
  635. }