malloc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. mmalloc - heap manager based on heavy use of virtual memory management.
  3. Copyright (C) 1998 Valery Shchedrin
  4. This library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Library General Public
  6. License as published by the Free Software Foundation; either
  7. version 2 of the License, or (at your option) any later version.
  8. This library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Library General Public License for more details.
  12. You should have received a copy of the GNU Library General Public
  13. License along with this library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  15. MA 02111-1307, USA
  16. Public Functions:
  17. void *mmalloc(size_t size);
  18. Allocates `size` bytes
  19. returns NULL if no free memory available
  20. void *mcalloc(size_t unit, size_t quantity);
  21. Allocates `quantity*unit` zeroed bytes via internal malloc call
  22. void *mrealloc(void *ptr, size_t size);
  23. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  24. then it works as malloc. NULL is returned if no free memory available
  25. void *mrealloc_no_move(void *ptr, size_t size);
  26. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  27. or if reallocation can't be done with shrinking/expanding already
  28. allocated block NULL is returned
  29. void mfree(void *ptr);
  30. Frees already allocated block, if `ptr` is incorrect one nothing will
  31. happen.
  32. */
  33. #define _POSIX_SOURCE
  34. #define _XOPEN_SOURCE
  35. #include <sys/types.h>
  36. #include <unistd.h>
  37. #include <limits.h>
  38. #include <sys/time.h>
  39. #include <asm/page.h>
  40. #include <unistd.h>
  41. #include <sys/mman.h>
  42. #include <string.h>
  43. #include "malloc.h"
  44. #define M_DOTRIMMING 1
  45. #define M_MULTITHREADED 0
  46. #define VALLOC_MSTART ((void*)0x1c000000)
  47. #define LARGE_MSTART ((void*)0x19000000)
  48. #define HUNK_MSTART ((void*)0x18000000)
  49. #define HUNK_MSIZE M_PAGESIZE
  50. #define HUNK_ID 0x99171713
  51. /* alignment of allocations > HUNK_THRESHOLD */
  52. #define MALLOC_ALIGN 4
  53. /* allocations < HUNK_THRESHOLD will not be aligned */
  54. #define HUNK_THRESHOLD 4
  55. /*up to HUNK_MAXSIZE blocks will be joined together to decrease memory waste*/
  56. #define HUNK_MAXSIZE 128
  57. /* returns value not less than size, aligned to MALLOC_ALIGN */
  58. #define ALIGN(size) (((size)+(MALLOC_ALIGN)-1)&(~((MALLOC_ALIGN)-1)))
  59. /* aligns s or p to page boundaries */
  60. #define PAGE_ALIGN(s) (((s)+M_PAGESIZE-1)&(~(M_PAGESIZE-1)))
  61. #define PAGE_ALIGNP(p) ((char*)PAGE_ALIGN((unsigned)(p)))
  62. #define PAGE_DOWNALIGNP(p) ((char*)(((unsigned)(p))&(~(M_PAGESIZE-1))))
  63. /* returns v * 2 for your machine (speed-up) */
  64. #define MUL2(v) ((v)*2)
  65. /* does v *= 8 for your machine (speed-up) */
  66. #define EMUL8(v) v*=8
  67. /* does v/8 for your machind (speed-up) */
  68. #define DIV8(v) ((v)/8)
  69. #if M_MULTITHREADED
  70. #error This version does not support threads
  71. #else
  72. typedef int mutex_t;
  73. #define mutex_lock(x)
  74. #define mutex_unlock(x)
  75. #define mutex_init(x)
  76. #define MUTEX_INITIALIZER 0
  77. //static mutex_t malloc_lock = MUTEX_INITIALIZER;
  78. #endif
  79. static int mmalloc_initialized = -1;
  80. /* -1 == uninitialized, 0 == initializing, 1 == initialized */
  81. #ifndef MAP_FAILED
  82. #define MAP_FAILED ((void*)-1)
  83. #endif
  84. #if defined(MAP_ANONYMOUS) && !defined(MAP_ANON)
  85. #define MAP_ANON MAP_ANONYMOUS
  86. #endif
  87. #ifndef NULL
  88. #define NULL ((void*)0)
  89. #endif
  90. /* guess pagesize */
  91. #ifndef M_PAGESIZE
  92. #ifdef _SC_PAGESIZE
  93. #ifndef _SC_PAGE_SIZE
  94. #define _SC_PAGE_SIZE _SC_PAGESIZE
  95. #endif
  96. #endif
  97. #ifdef _SC_PAGE_SIZE
  98. #define M_PAGESIZE sysconf(_SC_PAGE_SIZE)
  99. #else /* !_SC_PAGESIZE */
  100. #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  101. extern size_t getpagesize();
  102. #define M_PAGESIZE getpagesize()
  103. #else /* !HAVE_GETPAGESIZE */
  104. #include <sys/param.h>
  105. #ifdef EXEC_PAGESIZE
  106. #define M_PAGESIZE EXEC_PAGESIZE
  107. #else /* !EXEC_PAGESIZE */
  108. #ifdef NBPG
  109. #ifndef CLSIZE
  110. #define M_PAGESIZE NBPG
  111. #else /* !CLSIZE */
  112. #define M_PAGESIZE (NBPG*CLSIZE)
  113. #endif /* CLSIZE */
  114. #else
  115. #ifdef NBPC
  116. #define M_PAGESIZE NBPC
  117. #else /* !NBPC */
  118. #ifdef PAGESIZE
  119. #define M_PAGESIZE PAGESIZE
  120. #else /* !PAGESIZE */
  121. #define M_PAGESIZE 4096
  122. #endif /* PAGESIZE */
  123. #endif /* NBPC */
  124. #endif /* NBPG */
  125. #endif /* EXEC_PAGESIZE */
  126. #endif /* HAVE_GETPAGESIZE */
  127. #endif /* _SC_PAGE_SIZE */
  128. #endif /* defined(M_PAGESIZE) */
  129. /* HUNK MANAGER */
  130. typedef struct Hunk_s Hunk_t;
  131. struct Hunk_s { /* Hunked block - 8 byte overhead */
  132. int id; /* unique id */
  133. unsigned int total:12, used:12, size : 8;
  134. Hunk_t *next; /* next free in free_h */
  135. };
  136. static Hunk_t *free_h[HUNK_MAXSIZE+1]; /* free hash */
  137. int total_h[HUNK_MAXSIZE+1]; /* Hunk_t's `total` member */
  138. #define usagemap(h) (((unsigned char *)(h))+sizeof(Hunk_t))
  139. #define hunk_ptr(h) (((char*)(h))+sizeof(Hunk_t)+ALIGN(DIV8(h->total+7)))
  140. #define hunk(h) ((Hunk_t*)(h))
  141. /* hunk_alloc allocates <= HUNK_MAXSIZE blocks */
  142. static void *hunk_alloc(int size)
  143. {
  144. Hunk_t *p;
  145. unsigned long *cpl;
  146. int i, c;
  147. if (size >= HUNK_THRESHOLD) size = ALIGN(size);
  148. /* Look for already allocated hunkblocks */
  149. if ((p = free_h[size]) == NULL)
  150. {
  151. if ((p = (Hunk_t*)mmap(HUNK_MSTART,HUNK_MSIZE,PROT_READ|PROT_WRITE,
  152. #ifdef __HAS_NO_MMU__
  153. MAP_SHARED|MAP_ANONYMOUS
  154. #else
  155. MAP_PRIVATE|MAP_ANONYMOUS
  156. #endif
  157. ,0,0)) == (Hunk_t*)MAP_FAILED)
  158. return NULL;
  159. memset(p,0,HUNK_MSIZE);
  160. p->id = HUNK_ID;
  161. p->total = total_h[size];
  162. /* p->used = 0; */
  163. p->size = size;
  164. /* p->next = (Hunk_t*)NULL; */
  165. /* memset(usagemap(p), 0, bound); */
  166. free_h[size] = p;
  167. }
  168. /* Locate free point in usagemap */
  169. for (cpl=(unsigned long*)usagemap(p);*cpl==0xFFFFFFFF;cpl++);
  170. i = ((unsigned char *)cpl) - usagemap(p);
  171. if (*(unsigned short*)cpl != 0xFFFF) {
  172. if (*(unsigned char*)cpl == 0xFF) {
  173. c = *(int*)(((unsigned char *)cpl)+1); i++;
  174. } else c = *(int*)(unsigned char *)cpl;
  175. } else {
  176. i+=2; c = *(((unsigned char *)cpl)+2);
  177. if (c == 0xFF) { c = *(int*)(((unsigned char *)cpl)+3); i++; }
  178. }
  179. EMUL8(i);
  180. if ((c & 0xF) == 0xF) { c >>= 4; i+=4; }
  181. if ((c & 0x3) == 0x3) { c >>= 2; i+=2; }
  182. if (c & 1) i++;
  183. usagemap(p)[DIV8(i)] |= (1 << (i & 7)); /* set bit */
  184. /* Increment counter and update hashes */
  185. if (++p->used == p->total)
  186. {
  187. free_h[p->size] = p->next;
  188. p->next = NULL;
  189. }
  190. return hunk_ptr(p)+i*p->size;
  191. }
  192. /* hunk_free frees blocks allocated by hunk_alloc */
  193. static void hunk_free(char *ptr)
  194. {
  195. unsigned char *up;
  196. int i, v;
  197. Hunk_t *h;
  198. if (!ptr) return;
  199. h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
  200. /* Validate `ptr` */
  201. if (h->id != HUNK_ID) return;
  202. v = ptr - hunk_ptr(h);
  203. i = v / h->size;
  204. if (v % h->size != 0 || i < 0 || i >= h->total) return;
  205. /* Update `usagemap` */
  206. up = &(usagemap(h)[DIV8(i)]);
  207. i = 1 << (i&7);
  208. if (!(*up & i)) return;
  209. *up ^= i;
  210. /* Update hunk counters */
  211. if (h->used == h->total)
  212. {
  213. if (--h->used)
  214. { /* insert into free_h */
  215. h->next = free_h[h->size];
  216. free_h[h->size] = h;
  217. } /* else - it will be unmapped */
  218. }
  219. else
  220. {
  221. if (!--h->used)
  222. { /* delete from free_h - will be bl_freed*/
  223. Hunk_t *p, *pp;
  224. for (p=free_h[h->size],pp=NULL;p!=h;pp=p,p=p->next);
  225. if (!pp)
  226. free_h[h->size] = p->next;
  227. else
  228. pp->next = p->next;
  229. }
  230. }
  231. /* Unmap empty Hunk_t */
  232. if (!h->used) munmap((void*)h,HUNK_MSIZE);
  233. }
  234. /* BLOCK MANAGER */
  235. typedef struct Block_s Block_t;
  236. struct Block_s /* 32-bytes long control structure (if 4-byte aligned) */
  237. {
  238. char *ptr; /* pointer to related data */
  239. Block_t *next; /* next in free_mem list */
  240. Block_t *l_free_mem, *r_free_mem; /* left & right subtrees of <free_mem> */
  241. Block_t *l_ptrs, *r_ptrs; /* left & right subtrees of <ptrs> */
  242. size_t size; /* size - divided by align */
  243. /* packed 4-byte attributes */
  244. /* { */
  245. char bal_free_mem : 8; /* balance of <free_mem> subtree */
  246. char bal_ptrs : 8; /* balance of <ptrs> subtree */
  247. unsigned int used : 1; /* used/free state of the block */
  248. unsigned int broken : 1; /* 1 if previous block can't be merged with it */
  249. /* } */
  250. };
  251. static Block_t *bl_last; /* last mmapped block */
  252. #define bl_get() hunk_alloc(sizeof(Block_t))
  253. #define bl_rel(p) hunk_free((char*)p)
  254. /* like C++ templates ;-) */
  255. #include "avlmacro.h"
  256. #define FREE_MEM_COMPARE(i,a,b) { i = (a)->size - (b)->size; }
  257. #define PTRS_COMPARE(i,a,b) { i = (a)->ptr - (b)->ptr; }
  258. Avl_Tree(free_mem,Block_t,free_mem,FREE_MEM_COMPARE)
  259. Avl_Tree(ptrs,Block_t,ptrs,PTRS_COMPARE)
  260. #define free_mem_root Avl_Root(Block_t, free_mem)
  261. #define ptrs_root Avl_Root(Block_t, ptrs)
  262. /* pp is freed block */
  263. #define FREE_MEM_DEL_BLOCK(pp) \
  264. { \
  265. for (p = free_mem_root;;) \
  266. if (p->size > pp->size) p = p->l_free_mem; \
  267. else if (p->size < pp->size) p = p->r_free_mem; \
  268. else break; \
  269. if (p == pp) \
  270. { \
  271. if (pp->next) free_mem_replace(pp->next); \
  272. else free_mem_del(pp); \
  273. } \
  274. else \
  275. { \
  276. for (;p->next != pp; p = p->next); \
  277. p->next = pp->next; \
  278. } \
  279. }
  280. #define FREE_MEM_INS_BLOCK(pp) \
  281. { \
  282. if ((p = free_mem_ins(pp)) != NULL)\
  283. {\
  284. pp->next = p->next;\
  285. p->next = pp;\
  286. }\
  287. else pp->next = NULL; \
  288. }
  289. /* `b` is current block, `pp` is next block */
  290. #define COMBINE_BLOCKS(b,pp) \
  291. {\
  292. ptrs_del(pp); \
  293. b->size += pp->size; \
  294. if (pp == bl_last) bl_last = b; \
  295. bl_rel(pp); \
  296. }
  297. /* initializes new block b */
  298. #define INIT_BLOCK(b, pppp, sz) \
  299. { \
  300. memset(b, 0, sizeof(Block_t)); \
  301. b->ptr = pppp; \
  302. b->size = sz; \
  303. ptrs_ins(b); \
  304. FREE_MEM_INS_BLOCK(b); \
  305. }
  306. /* `b` is current block, `sz` its new size */
  307. /* block `b` will be splitted to one busy & one free block */
  308. #define SPLIT_BLOCK(b,sz) \
  309. {\
  310. Block_t *bt; \
  311. bt = bl_get(); \
  312. INIT_BLOCK(bt, b->ptr + sz, b->size - sz); \
  313. b->size = sz; \
  314. if (bl_last == b) bl_last = bt; \
  315. bl_uncommit(bt);\
  316. }
  317. /* `b` is current block, `pp` is next free block, `sz` is needed size */
  318. #define SHRINK_BLOCK(b,pp,sz) \
  319. {\
  320. FREE_MEM_DEL_BLOCK(pp); \
  321. pp->ptr = b->ptr + sz; \
  322. pp->size += b->size - sz; \
  323. b->size = sz; \
  324. FREE_MEM_INS_BLOCK(pp); \
  325. bl_uncommit(pp); \
  326. }
  327. static Block_t *bl_mapnew(size_t size)
  328. {
  329. size_t map_size;
  330. Block_t *pp, *p;
  331. void *pt;
  332. map_size = PAGE_ALIGN(size);
  333. pt = mmap(LARGE_MSTART,map_size,PROT_READ|PROT_WRITE|PROT_EXEC,
  334. MAP_PRIVATE|MAP_ANON,0,0);
  335. if (pt == MAP_FAILED) return (Block_t*)NULL;
  336. bl_last = pp = bl_get();
  337. INIT_BLOCK(pp, (char*)pt, map_size);
  338. pp->broken = 1;
  339. return pp;
  340. }
  341. static void bl_uncommit(Block_t *b)
  342. {
  343. char *u_start, *u_end;
  344. u_start = PAGE_ALIGNP(b->ptr);
  345. u_end = PAGE_DOWNALIGNP(b->ptr+b->size);
  346. if (u_end <= u_start) return;
  347. #if M_DOTRIMMING
  348. mmap(u_start,u_end-u_start,PROT_READ|PROT_WRITE|PROT_EXEC,
  349. MAP_PRIVATE|MAP_ANON|MAP_FIXED,0,0);
  350. #endif
  351. }
  352. /* requested size must be aligned to ALIGNMENT */
  353. static Block_t *bl_alloc(size_t size)
  354. {
  355. Block_t *p, *pp;
  356. /* try to find needed space in existing memory */
  357. for (p = free_mem_root, pp = NULL;p;)
  358. {
  359. if (p->size > size) { pp = p; p = p->l_free_mem; }
  360. else if (p->size < size) p = p->r_free_mem;
  361. else { pp = p; break; }
  362. }
  363. if (!pp)
  364. { /* map some memory */
  365. if (!bl_last)
  366. { /* just do initial mmap */
  367. pp = bl_mapnew(size);
  368. if (!pp) return NULL;
  369. }
  370. else if (!bl_last->used)
  371. { /* try growing last unused */
  372. if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
  373. PAGE_ALIGNP(bl_last->ptr+bl_last->size) - PAGE_DOWNALIGNP(bl_last->ptr),
  374. PAGE_ALIGNP(bl_last->ptr+size)-PAGE_DOWNALIGNP(bl_last->ptr),
  375. 0) == MAP_FAILED)
  376. { /* unable to grow -- initiate new block */
  377. pp = bl_mapnew(size);
  378. if (!pp) return NULL;
  379. }
  380. else
  381. {
  382. pp = bl_last;
  383. FREE_MEM_DEL_BLOCK(pp);
  384. pp->size = PAGE_ALIGNP(pp->ptr+size) - pp->ptr;
  385. FREE_MEM_INS_BLOCK(pp);
  386. }
  387. }
  388. else
  389. { /* bl_last is used block */
  390. if (mremap(PAGE_DOWNALIGNP(bl_last->ptr),
  391. PAGE_ALIGNP(bl_last->ptr+bl_last->size)-PAGE_DOWNALIGNP(bl_last->ptr),
  392. PAGE_ALIGNP(bl_last->ptr+bl_last->size+size) - PAGE_DOWNALIGNP(bl_last->ptr),
  393. 0) == MAP_FAILED)
  394. {
  395. pp = bl_mapnew(size);
  396. if (!pp) return NULL;
  397. }
  398. else
  399. {
  400. pp = bl_get();
  401. INIT_BLOCK(pp,bl_last->ptr+bl_last->size,
  402. PAGE_ALIGNP(bl_last->ptr+bl_last->size+size)-bl_last->ptr-bl_last->size);
  403. bl_last = pp;
  404. }
  405. }
  406. }
  407. /* just delete this node from free_mem tree */
  408. if (pp->next) free_mem_replace(pp->next); else free_mem_del(pp);
  409. pp->used = 1;
  410. if (pp->size - size > MALLOC_ALIGN)
  411. { /* this block can be splitted (it is unused,not_broken) */
  412. SPLIT_BLOCK(pp,size);
  413. }
  414. return pp;
  415. }
  416. static void bl_free(Block_t *b)
  417. {
  418. Block_t *p, *bl_next, *bl_prev;
  419. /* Look for blocks before & after `b` */
  420. for (p = ptrs_root, bl_next = NULL, bl_prev = NULL; p;)
  421. {
  422. if (p->ptr > b->ptr) { bl_next = p; p = p->l_ptrs; }
  423. else if (p->ptr < b->ptr) { bl_prev = p; p = p->r_ptrs; }
  424. else break;
  425. }
  426. if (b->l_ptrs)
  427. for (bl_prev = b->l_ptrs; bl_prev->r_ptrs; bl_prev = bl_prev->r_ptrs);
  428. if (b->r_ptrs)
  429. for (bl_next = b->r_ptrs; bl_next->l_ptrs; bl_next = bl_next->l_ptrs);
  430. if (bl_next && !bl_next->broken && !bl_next->used)
  431. {
  432. FREE_MEM_DEL_BLOCK(bl_next)
  433. COMBINE_BLOCKS(b,bl_next)
  434. }
  435. if (bl_prev && !b->broken && !bl_prev->used)
  436. {
  437. FREE_MEM_DEL_BLOCK(bl_prev)
  438. COMBINE_BLOCKS(bl_prev,b)
  439. b = bl_prev;
  440. }
  441. b->used = 0;
  442. FREE_MEM_INS_BLOCK(b)
  443. bl_uncommit(b);
  444. }
  445. static void malloc_init(void)
  446. {
  447. int i, mapsize, x, old_x, gcount;
  448. mapsize = M_PAGESIZE;
  449. mmalloc_initialized = 0;
  450. bl_last = NULL;
  451. free_mem_root = NULL;
  452. ptrs_root = NULL;
  453. mapsize -= sizeof(Hunk_t);
  454. for (i = 1; i <= HUNK_MAXSIZE; i++)
  455. {
  456. free_h[i] = (Hunk_t*)NULL;
  457. for (x = mapsize/i, gcount = 0, old_x = 0; old_x != x;)
  458. {
  459. old_x = x;
  460. x = (mapsize - ALIGN(DIV8(old_x+7)))/i;
  461. if (gcount > 1 && x*i + ALIGN(DIV8(x+7)) <= mapsize) break;
  462. if (x*i + ALIGN(DIV8(x+7)) > mapsize) gcount++;
  463. }
  464. total_h[i] = x;
  465. }
  466. mutex_init(&malloc_lock);
  467. mmalloc_initialized = 1;
  468. }
  469. static void *mmalloc(size_t size)
  470. {
  471. void *p;
  472. if (size == 0) return NULL;
  473. if (mmalloc_initialized < 0) malloc_init();
  474. if (mmalloc_initialized) mutex_lock(&malloc_lock);
  475. if (size <= HUNK_MAXSIZE)
  476. p = hunk_alloc(size);
  477. else
  478. {
  479. if ((p = bl_alloc(ALIGN(size))) != NULL)
  480. p = ((Block_t*)p)->ptr;
  481. }
  482. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  483. return p;
  484. }
  485. static void mfree(void *ptr)
  486. {
  487. Block_t *p, *best;
  488. if (mmalloc_initialized < 0) return;
  489. if (mmalloc_initialized) mutex_lock(&malloc_lock);
  490. for (p = ptrs_root, best = NULL;p;)
  491. {
  492. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  493. else { best = p; p = p->r_ptrs; }
  494. }
  495. if (!best || !best->used || best->ptr != (char*)ptr)
  496. {
  497. hunk_free(ptr);
  498. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  499. return;
  500. }
  501. bl_free(best);
  502. if (mmalloc_initialized) mutex_unlock(&malloc_lock);
  503. }
  504. static void *mrealloc_no_move(void *ptr, size_t size)
  505. {
  506. Block_t *p, *best, *next;
  507. if (size <= HUNK_MAXSIZE) return NULL;
  508. if (mmalloc_initialized <= 0) return mmalloc(size);
  509. mutex_lock(&malloc_lock);
  510. /* Locate block */
  511. for (p = ptrs_root, best = NULL;p;)
  512. {
  513. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  514. else { best = p; p = p->r_ptrs; }
  515. }
  516. if (!best || !best->used || best->ptr != (char*)ptr)
  517. {
  518. mutex_unlock(&malloc_lock);
  519. return NULL;
  520. }
  521. size = ALIGN(size);
  522. if (size == best->size)
  523. {
  524. mutex_unlock(&malloc_lock);
  525. return ptr;
  526. }
  527. if (best->r_ptrs) /* get block just after */
  528. for (next = best->r_ptrs; next->l_ptrs; next = next->l_ptrs);
  529. else
  530. for (p = ptrs_root, next = NULL;p;)
  531. {
  532. if (p->ptr > best->ptr) { next = p; p = p->l_ptrs; }
  533. else if (p->ptr < best->ptr) p = p->r_ptrs;
  534. else break;
  535. }
  536. if (size < best->size)
  537. { /* shrink block */
  538. if (!next || next->used || next->broken)
  539. {
  540. if (best->size - size > MALLOC_ALIGN)
  541. { /* do split */
  542. SPLIT_BLOCK(best,size);
  543. }
  544. }
  545. else
  546. { /* just move border of next block */
  547. SHRINK_BLOCK(best,next,size);
  548. }
  549. }
  550. else if (next && !next->broken && !next->used)
  551. { /* can expand */
  552. if (best->size + next->size > size + HUNK_MAXSIZE)
  553. { /* shrink next free block */
  554. SHRINK_BLOCK(best,next,size);
  555. }
  556. else if (best->size + next->size >= size)
  557. { /* combine blocks (eat next one) */
  558. FREE_MEM_DEL_BLOCK(next);
  559. COMBINE_BLOCKS(best,next);
  560. }
  561. else
  562. { /* not enough memory in next block */
  563. mutex_unlock(&malloc_lock);
  564. return NULL;
  565. }
  566. }
  567. else
  568. { /* no next block */
  569. mutex_unlock(&malloc_lock);
  570. return NULL;
  571. }
  572. mutex_unlock(&malloc_lock);
  573. return best->ptr;
  574. }
  575. static void *mrealloc(void *ptr, size_t size)
  576. {
  577. void *tmp;
  578. tmp = mrealloc_no_move(ptr, size);
  579. if (!tmp)
  580. {
  581. Block_t *p, *best;
  582. mutex_lock(&malloc_lock);
  583. for (p = ptrs_root, best = NULL;p;)
  584. {
  585. if (p->ptr > (char*)ptr) p = p->l_ptrs;
  586. else { best = p; p = p->r_ptrs; }
  587. }
  588. if (!best || !best->used || best->ptr != (char*)ptr)
  589. {
  590. if (ptr)
  591. {
  592. Hunk_t *h;
  593. h = (Hunk_t*)PAGE_DOWNALIGNP(ptr);
  594. if (h->id == HUNK_ID)
  595. {
  596. mutex_unlock(&malloc_lock);
  597. if ((size >= HUNK_THRESHOLD && ALIGN(size) == h->size) ||
  598. size == h->size) return ptr;
  599. if ((tmp = mmalloc(size)) == NULL) return NULL;
  600. mutex_lock(&malloc_lock);
  601. memcpy(tmp,ptr,((size<h->size)?size:h->size));
  602. hunk_free(ptr);
  603. mutex_unlock(&malloc_lock);
  604. return tmp;
  605. }
  606. }
  607. mutex_unlock(&malloc_lock);
  608. return mmalloc(size);
  609. }
  610. mutex_unlock(&malloc_lock);
  611. /* copy whole block */
  612. if ((tmp = mmalloc(size)) == NULL) return NULL;
  613. memcpy(tmp,ptr,((size<best->size)?size:best->size));
  614. mutex_lock(&malloc_lock);
  615. bl_free(best);
  616. mutex_unlock(&malloc_lock);
  617. }
  618. return tmp;
  619. }
  620. static void *mcalloc(size_t unit, size_t quantity)
  621. {
  622. void *p;
  623. unit *= quantity;
  624. if ((p = mmalloc(unit)) == NULL) return NULL;
  625. memset(p,0,unit);
  626. return p;
  627. }
  628. /* PUBLIC functions */
  629. void *malloc(size_t size) {
  630. return mmalloc(size);
  631. }
  632. void *calloc(size_t unit, size_t quantity) {
  633. return mcalloc(unit,quantity);
  634. }
  635. void *realloc(void *ptr, size_t size) {
  636. return mrealloc(ptr,size);
  637. }
  638. void free(void *ptr) {
  639. return mfree(ptr);
  640. }