malloc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. /*
  2. malloc - heap manager based on heavy use of virtual memory management.
  3. Copyright (C) 1998 Valery Shchedrin
  4. This library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Library General Public
  6. License as published by the Free Software Foundation; either
  7. version 2 of the License, or (at your option) any later version.
  8. This library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Library General Public License for more details.
  12. You should have received a copy of the GNU Library General Public
  13. License along with this library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  15. MA 02111-1307, USA
  16. Public Functions:
  17. void *malloc(size_t size);
  18. Allocates `size` bytes
  19. returns NULL if no free memory available
  20. void *calloc(size_t unit, size_t quantity);
  21. Allocates `quantity*unit` zeroed bytes via internal malloc call
  22. void *realloc(void *ptr, size_t size);
  23. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  24. then it works as malloc. NULL is returned if no free memory available
  25. void *_realloc_no_move(void *ptr, size_t size);
  26. Reallocates already allocated block `ptr`, if `ptr` is not valid block
  27. or if reallocation can't be done with shrinking/expanding already
  28. allocated block NULL is returned
  29. void free(void *ptr);
  30. Frees already allocated block, if `ptr` is incorrect one nothing will
  31. happen.
  32. */
  33. /*
  34. * Manuel Novoa III Jan 2001
  35. *
  36. * Modified to decrease object sizes.
  37. * Broke into independent object files.
  38. * Converted INIT_BLOCK() and FREE_MEM_DEL_BLOCK() from macros to functions.
  39. */
  40. #define _POSIX_SOURCE
  41. #define _XOPEN_SOURCE
  42. #include <sys/types.h>
  43. #include <unistd.h>
  44. #include <limits.h>
  45. #include <sys/time.h>
  46. #include <asm/page.h>
  47. #include <unistd.h>
  48. #include <sys/mman.h>
  49. #include <string.h>
  50. #include "malloc.h"
  51. #define M_DOTRIMMING 1
  52. #define M_MULTITHREADED 0
  53. #define VALLOC_MSTART ((void*)0x1c000000)
  54. #define LARGE_MSTART ((void*)0x19000000)
  55. #define HUNK_MSTART ((void*)0x18000000)
  56. #define HUNK_MSIZE M_PAGESIZE
  57. #define HUNK_ID 0x99171713
  58. /* alignment of allocations > HUNK_THRESHOLD */
  59. #define MALLOC_ALIGN 4
  60. /* allocations < HUNK_THRESHOLD will not be aligned */
  61. #define HUNK_THRESHOLD 4
  62. /*up to HUNK_MAXSIZE blocks will be joined together to decrease memory waste*/
  63. #define HUNK_MAXSIZE 128
  64. /* returns value not less than size, aligned to MALLOC_ALIGN */
  65. #define ALIGN(size) (((size)+(MALLOC_ALIGN)-1)&(~((MALLOC_ALIGN)-1)))
  66. /* aligns s or p to page boundaries */
  67. #define PAGE_ALIGN(s) (((s)+M_PAGESIZE-1)&(~(M_PAGESIZE-1)))
  68. #define PAGE_ALIGNP(p) ((char*)PAGE_ALIGN((unsigned)(p)))
  69. #define PAGE_DOWNALIGNP(p) ((char*)(((unsigned)(p))&(~(M_PAGESIZE-1))))
  70. /* returns v * 2 for your machine (speed-up) */
  71. #define MUL2(v) ((v)*2)
  72. /* does v *= 8 for your machine (speed-up) */
  73. #define EMUL8(v) v*=8
  74. /* does v/8 for your machind (speed-up) */
  75. #define DIV8(v) ((v)/8)
  76. #if M_MULTITHREADED
  77. #error This version does not support threads
  78. #else
  79. typedef int mutex_t;
  80. #define mutex_lock(x)
  81. #define mutex_unlock(x)
  82. #define mutex_init(x)
  83. #define MUTEX_INITIALIZER 0
  84. //static mutex_t malloc_lock = MUTEX_INITIALIZER;
  85. #endif
  86. extern int __malloc_initialized;
  87. #ifdef L__malloc_init
  88. int __malloc_initialized = -1;
  89. /* -1 == uninitialized, 0 == initializing, 1 == initialized */
  90. #endif
  91. #ifndef MAP_FAILED
  92. #define MAP_FAILED ((void*)-1)
  93. #endif
  94. #if defined(MAP_ANONYMOUS) && !defined(MAP_ANON)
  95. #define MAP_ANON MAP_ANONYMOUS
  96. #endif
  97. #ifndef NULL
  98. #define NULL ((void*)0)
  99. #endif
  100. /* guess pagesize */
  101. #ifndef M_PAGESIZE
  102. #ifdef _SC_PAGESIZE
  103. #ifndef _SC_PAGE_SIZE
  104. #define _SC_PAGE_SIZE _SC_PAGESIZE
  105. #endif
  106. #endif
  107. #ifdef _SC_PAGE_SIZE
  108. #define M_PAGESIZE sysconf(_SC_PAGE_SIZE)
  109. #else /* !_SC_PAGESIZE */
  110. #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  111. extern size_t getpagesize();
  112. #define M_PAGESIZE getpagesize()
  113. #else /* !HAVE_GETPAGESIZE */
  114. #include <sys/param.h>
  115. #ifdef EXEC_PAGESIZE
  116. #define M_PAGESIZE EXEC_PAGESIZE
  117. #else /* !EXEC_PAGESIZE */
  118. #ifdef NBPG
  119. #ifndef CLSIZE
  120. #define M_PAGESIZE NBPG
  121. #else /* !CLSIZE */
  122. #define M_PAGESIZE (NBPG*CLSIZE)
  123. #endif /* CLSIZE */
  124. #else
  125. #ifdef NBPC
  126. #define M_PAGESIZE NBPC
  127. #else /* !NBPC */
  128. #ifdef PAGESIZE
  129. #define M_PAGESIZE PAGESIZE
  130. #else /* !PAGESIZE */
  131. #define M_PAGESIZE 4096
  132. #endif /* PAGESIZE */
  133. #endif /* NBPC */
  134. #endif /* NBPG */
  135. #endif /* EXEC_PAGESIZE */
  136. #endif /* HAVE_GETPAGESIZE */
  137. #endif /* _SC_PAGE_SIZE */
  138. #endif /* defined(M_PAGESIZE) */
  139. /* HUNK MANAGER */
  140. typedef struct Hunk_s Hunk_t;
  141. struct Hunk_s { /* Hunked block - 8 byte overhead */
  142. int id; /* unique id */
  143. unsigned int total:12, used:12, size:8;
  144. Hunk_t *next; /* next free in __free_h */
  145. };
  146. #define usagemap(h) (((unsigned char *)(h))+sizeof(Hunk_t))
  147. #define hunk_ptr(h) (((char*)(h))+sizeof(Hunk_t)+ALIGN(DIV8(h->total+7)))
  148. #define hunk(h) ((Hunk_t*)(h))
  149. extern Hunk_t *__free_h[HUNK_MAXSIZE + 1];
  150. extern int __total_h[HUNK_MAXSIZE + 1];
  151. #ifdef L__malloc_init
  152. Hunk_t *__free_h[HUNK_MAXSIZE + 1]; /* free hash */
  153. int __total_h[HUNK_MAXSIZE + 1]; /* Hunk_t's `total` member */
  154. #endif
  155. extern void *__hunk_alloc(int size);
  156. #ifdef L_malloc
  157. /* __hunk_alloc allocates <= HUNK_MAXSIZE blocks */
  158. void *__hunk_alloc(int size)
  159. {
  160. Hunk_t *p;
  161. unsigned long *cpl;
  162. int i, c;
  163. if (size >= HUNK_THRESHOLD)
  164. size = ALIGN(size);
  165. /* Look for already allocated hunkblocks */
  166. if ((p = __free_h[size]) == NULL) {
  167. if (
  168. (p =
  169. (Hunk_t *) mmap(HUNK_MSTART, HUNK_MSIZE,
  170. PROT_READ | PROT_WRITE,
  171. #ifdef __HAS_NO_MMU__
  172. MAP_SHARED | MAP_ANONYMOUS
  173. #else
  174. MAP_PRIVATE | MAP_ANONYMOUS
  175. #endif
  176. , 0, 0)) == (Hunk_t *) MAP_FAILED)
  177. return NULL;
  178. memset(p, 0, HUNK_MSIZE);
  179. p->id = HUNK_ID;
  180. p->total = __total_h[size];
  181. /* p->used = 0; */
  182. p->size = size;
  183. /* p->next = (Hunk_t*)NULL; */
  184. /* memset(usagemap(p), 0, bound); */
  185. __free_h[size] = p;
  186. }
  187. /* Locate free point in usagemap */
  188. for (cpl = (unsigned long *) usagemap(p); *cpl == 0xFFFFFFFF; cpl++);
  189. i = ((unsigned char *) cpl) - usagemap(p);
  190. if (*(unsigned short *) cpl != 0xFFFF) {
  191. if (*(unsigned char *) cpl == 0xFF) {
  192. c = *(int *) (((unsigned char *) cpl) + 1);
  193. i++;
  194. } else
  195. c = *(int *) (unsigned char *) cpl;
  196. } else {
  197. i += 2;
  198. c = *(((unsigned char *) cpl) + 2);
  199. if (c == 0xFF) {
  200. c = *(int *) (((unsigned char *) cpl) + 3);
  201. i++;
  202. }
  203. }
  204. EMUL8(i);
  205. if ((c & 0xF) == 0xF) {
  206. c >>= 4;
  207. i += 4;
  208. }
  209. if ((c & 0x3) == 0x3) {
  210. c >>= 2;
  211. i += 2;
  212. }
  213. if (c & 1)
  214. i++;
  215. usagemap(p)[DIV8(i)] |= (1 << (i & 7)); /* set bit */
  216. /* Increment counter and update hashes */
  217. if (++p->used == p->total) {
  218. __free_h[p->size] = p->next;
  219. p->next = NULL;
  220. }
  221. return hunk_ptr(p) + i * p->size;
  222. }
  223. #endif /* L_malloc */
  224. extern void __hunk_free(char *ptr);
  225. #ifdef L__free_support
  226. /* __hunk_free frees blocks allocated by __hunk_alloc */
  227. void __hunk_free(char *ptr)
  228. {
  229. unsigned char *up;
  230. int i, v;
  231. Hunk_t *h;
  232. if (!ptr)
  233. return;
  234. h = (Hunk_t *) PAGE_DOWNALIGNP(ptr);
  235. /* Validate `ptr` */
  236. if (h->id != HUNK_ID)
  237. return;
  238. v = ptr - hunk_ptr(h);
  239. i = v / h->size;
  240. if (v % h->size != 0 || i < 0 || i >= h->total)
  241. return;
  242. /* Update `usagemap` */
  243. up = &(usagemap(h)[DIV8(i)]);
  244. i = 1 << (i & 7);
  245. if (!(*up & i))
  246. return;
  247. *up ^= i;
  248. /* Update hunk counters */
  249. if (h->used == h->total) {
  250. if (--h->used) { /* insert into __free_h */
  251. h->next = __free_h[h->size];
  252. __free_h[h->size] = h;
  253. } /* else - it will be unmapped */
  254. } else {
  255. if (!--h->used) { /* delete from __free_h - will be __bl_freed */
  256. Hunk_t *p, *pp;
  257. for (p = __free_h[h->size], pp = NULL; p != h;
  258. pp = p, p = p->next);
  259. if (!pp)
  260. __free_h[h->size] = p->next;
  261. else
  262. pp->next = p->next;
  263. }
  264. }
  265. /* Unmap empty Hunk_t */
  266. if (!h->used)
  267. munmap((void *) h, HUNK_MSIZE);
  268. }
  269. #endif /* L__free_support */
  270. /* BLOCK MANAGER */
  271. typedef struct Block_s Block_t;
  272. struct Block_s { /* 32-bytes long control structure (if 4-byte aligned) */
  273. char *ptr; /* pointer to related data */
  274. Block_t *next; /* next in free_mem list */
  275. Block_t *l_free_mem, *r_free_mem; /* left & right subtrees of <free_mem> */
  276. Block_t *l_ptrs, *r_ptrs; /* left & right subtrees of <ptrs> */
  277. size_t size; /* size - divided by align */
  278. /* packed 4-byte attributes */
  279. /* { */
  280. signed char bal_free_mem:8; /* balance of <free_mem> subtree */
  281. signed char bal_ptrs:8; /* balance of <ptrs> subtree */
  282. unsigned int used:1; /* used/free state of the block */
  283. unsigned int broken:1; /* 1 if previous block can't be merged with it */
  284. /* } */
  285. };
  286. extern Block_t *__bl_last; /* last mmapped block */
  287. #ifdef L__malloc_init
  288. Block_t *__bl_last; /* last mmapped block */
  289. #endif
  290. #define bl_get() __hunk_alloc(sizeof(Block_t))
  291. #define bl_rel(p) __hunk_free((char*)p)
  292. extern Block_t *__Avl_Block_tfree_mem_tree;
  293. extern Block_t *__free_mem_ins(Block_t * data);
  294. extern void __free_mem_del(Block_t * data);
  295. extern void __free_mem_replace(Block_t * data);
  296. extern Block_t *__Avl_Block_tptrs_tree;
  297. extern Block_t *__ptrs_ins(Block_t * data);
  298. extern void __ptrs_del(Block_t * data);
  299. extern void __bl_uncommit(Block_t * b);
  300. extern void __bl_free(Block_t * b);
  301. /* like C++ templates ;-) */
  302. #include "avlmacro.h"
  303. #define FREE_MEM_COMPARE(i,a,b) \
  304. { \
  305. if ( (a)->size < (b)->size ) { \
  306. i = -1; \
  307. } else if ( (a)->size > (b)->size ) { \
  308. i = 1; \
  309. } else { \
  310. i = 0; \
  311. } \
  312. }
  313. #define PTRS_COMPARE(i,a,b) \
  314. { \
  315. if ( (a)->ptr < (b)->ptr ) { \
  316. i = -1; \
  317. } else if ( (a)->ptr > (b)->ptr ) { \
  318. i = 1; \
  319. } else { \
  320. i = 0; \
  321. } \
  322. }
  323. #ifdef L__avl_support
  324. Avl_Tree(free_mem, Block_t, free_mem, FREE_MEM_COMPARE)
  325. Avl_Tree_no_replace(ptrs, Block_t, ptrs, PTRS_COMPARE)
  326. #endif
  327. #define free_mem_root Avl_Root(Block_t, free_mem)
  328. #define ptrs_root Avl_Root(Block_t, ptrs)
  329. /* pp is freed block */
  330. #define FREE_MEM_DEL_BLOCK(pp,p) {p = __free_mem_del_block(pp,p);}
  331. extern Block_t *__free_mem_del_block(Block_t * pp, Block_t * p);
  332. #ifdef L_malloc
  333. Block_t *__free_mem_del_block(Block_t * pp, Block_t * p)
  334. {
  335. for (p = free_mem_root;;)
  336. if (p->size > pp->size)
  337. p = p->l_free_mem;
  338. else if (p->size < pp->size)
  339. p = p->r_free_mem;
  340. else
  341. break;
  342. if (p == pp) {
  343. if (pp->next)
  344. __free_mem_replace(pp->next);
  345. else
  346. __free_mem_del(pp);
  347. } else {
  348. for (; p->next != pp; p = p->next);
  349. p->next = pp->next;
  350. }
  351. return p;
  352. }
  353. #endif /* L_malloc */
  354. #define FREE_MEM_INS_BLOCK(pp) \
  355. { \
  356. if ((p = __free_mem_ins(pp)) != NULL)\
  357. {\
  358. pp->next = p->next;\
  359. p->next = pp;\
  360. }\
  361. else pp->next = NULL; \
  362. }
  363. /* `b` is current block, `pp` is next block */
  364. #define COMBINE_BLOCKS(b,pp) \
  365. {\
  366. __ptrs_del(pp); \
  367. b->size += pp->size; \
  368. if (pp == __bl_last) __bl_last = b; \
  369. bl_rel(pp); \
  370. }
  371. /* initializes new block b */
  372. #define INIT_BLOCK(b, pppp, sz) { p = __init_block(b, pppp, sz); }
  373. extern Block_t *__init_block(Block_t * b, char *pppp, size_t sz);
  374. #ifdef L_malloc
  375. Block_t *__init_block(Block_t * b, char *pppp, size_t sz)
  376. {
  377. Block_t *p;
  378. memset(b, 0, sizeof(Block_t));
  379. b->ptr = pppp;
  380. b->size = sz;
  381. __ptrs_ins(b);
  382. FREE_MEM_INS_BLOCK(b);
  383. return p;
  384. }
  385. #endif /* L_malloc */
  386. /* `b` is current block, `sz` its new size */
  387. /* block `b` will be splitted to one busy & one free block */
  388. #define SPLIT_BLOCK(b,sz) \
  389. {\
  390. Block_t *bt; \
  391. bt = bl_get(); \
  392. INIT_BLOCK(bt, b->ptr + sz, b->size - sz); \
  393. b->size = sz; \
  394. if (__bl_last == b) __bl_last = bt; \
  395. __bl_uncommit(bt);\
  396. }
  397. /* `b` is current block, `pp` is next free block, `sz` is needed size */
  398. #define SHRINK_BLOCK(b,pp,sz) \
  399. {\
  400. FREE_MEM_DEL_BLOCK(pp,p); \
  401. pp->ptr = b->ptr + sz; \
  402. pp->size += b->size - sz; \
  403. b->size = sz; \
  404. FREE_MEM_INS_BLOCK(pp); \
  405. __bl_uncommit(pp); \
  406. }
  407. #ifdef L_malloc
  408. static Block_t *bl_mapnew(size_t size)
  409. {
  410. size_t map_size;
  411. Block_t *pp, *p;
  412. void *pt;
  413. map_size = PAGE_ALIGN(size);
  414. pt = mmap(LARGE_MSTART, map_size, PROT_READ | PROT_WRITE | PROT_EXEC,
  415. #ifdef __HAS_NO_MMU__
  416. MAP_SHARED | MAP_ANONYMOUS
  417. #else
  418. MAP_PRIVATE | MAP_ANONYMOUS
  419. #endif
  420. 0, 0);
  421. if (pt == MAP_FAILED)
  422. return (Block_t *) NULL;
  423. __bl_last = pp = bl_get();
  424. INIT_BLOCK(pp, (char *) pt, map_size);
  425. pp->broken = 1;
  426. return pp;
  427. }
  428. void __bl_uncommit(Block_t * b)
  429. {
  430. char *u_start, *u_end;
  431. u_start = PAGE_ALIGNP(b->ptr);
  432. u_end = PAGE_DOWNALIGNP(b->ptr + b->size);
  433. if (u_end <= u_start)
  434. return;
  435. #if M_DOTRIMMING
  436. mmap(u_start, u_end - u_start, PROT_READ | PROT_WRITE | PROT_EXEC,
  437. #ifdef __HAS_NO_MMU__
  438. MAP_SHARED | MAP_ANONYMOUS |MAP_FIXED
  439. #else
  440. MAP_PRIVATE | MAP_ANONYMOUS |MAP_FIXED
  441. #endif
  442. 0, 0);
  443. #endif
  444. }
  445. /* requested size must be aligned to ALIGNMENT */
  446. static Block_t *bl_alloc(size_t size)
  447. {
  448. Block_t *p, *pp;
  449. /* try to find needed space in existing memory */
  450. for (p = free_mem_root, pp = NULL; p;) {
  451. if (p->size > size) {
  452. pp = p;
  453. p = p->l_free_mem;
  454. } else if (p->size < size)
  455. p = p->r_free_mem;
  456. else {
  457. pp = p;
  458. break;
  459. }
  460. }
  461. if (!pp) { /* map some memory */
  462. if (!__bl_last) { /* just do initial mmap */
  463. pp = bl_mapnew(size);
  464. if (!pp)
  465. return NULL;
  466. } else if (!__bl_last->used) { /* try growing last unused */
  467. if (mremap(PAGE_DOWNALIGNP(__bl_last->ptr),
  468. PAGE_ALIGNP(__bl_last->ptr + __bl_last->size) -
  469. PAGE_DOWNALIGNP(__bl_last->ptr),
  470. PAGE_ALIGNP(__bl_last->ptr + size) -
  471. PAGE_DOWNALIGNP(__bl_last->ptr), 0) == MAP_FAILED) { /* unable to grow -- initiate new block */
  472. pp = bl_mapnew(size);
  473. if (!pp)
  474. return NULL;
  475. } else {
  476. pp = __bl_last;
  477. FREE_MEM_DEL_BLOCK(pp, p);
  478. pp->size = PAGE_ALIGNP(pp->ptr + size) - pp->ptr;
  479. FREE_MEM_INS_BLOCK(pp);
  480. }
  481. } else { /* __bl_last is used block */
  482. if (mremap(PAGE_DOWNALIGNP(__bl_last->ptr),
  483. PAGE_ALIGNP(__bl_last->ptr + __bl_last->size) -
  484. PAGE_DOWNALIGNP(__bl_last->ptr),
  485. PAGE_ALIGNP(__bl_last->ptr + __bl_last->size +
  486. size) - PAGE_DOWNALIGNP(__bl_last->ptr),
  487. 0) == MAP_FAILED) {
  488. pp = bl_mapnew(size);
  489. if (!pp)
  490. return NULL;
  491. } else {
  492. pp = bl_get();
  493. INIT_BLOCK(pp, __bl_last->ptr + __bl_last->size,
  494. PAGE_ALIGNP(__bl_last->ptr + __bl_last->size +
  495. size) - __bl_last->ptr -
  496. __bl_last->size);
  497. __bl_last = pp;
  498. }
  499. }
  500. }
  501. /* just delete this node from free_mem tree */
  502. if (pp->next)
  503. __free_mem_replace(pp->next);
  504. else
  505. __free_mem_del(pp);
  506. pp->used = 1;
  507. if (pp->size - size > MALLOC_ALIGN) { /* this block can be splitted (it is unused,not_broken) */
  508. SPLIT_BLOCK(pp, size);
  509. }
  510. return pp;
  511. }
  512. #endif /* L_malloc */
  513. #ifdef L__free_support
  514. void __bl_free(Block_t * b)
  515. {
  516. Block_t *p, *bl_next, *bl_prev;
  517. /* Look for blocks before & after `b` */
  518. for (p = ptrs_root, bl_next = NULL, bl_prev = NULL; p;) {
  519. if (p->ptr > b->ptr) {
  520. bl_next = p;
  521. p = p->l_ptrs;
  522. } else if (p->ptr < b->ptr) {
  523. bl_prev = p;
  524. p = p->r_ptrs;
  525. } else
  526. break;
  527. }
  528. if (b->l_ptrs)
  529. for (bl_prev = b->l_ptrs; bl_prev->r_ptrs;
  530. bl_prev = bl_prev->r_ptrs);
  531. if (b->r_ptrs)
  532. for (bl_next = b->r_ptrs; bl_next->l_ptrs;
  533. bl_next = bl_next->l_ptrs);
  534. if (bl_next && !bl_next->broken && !bl_next->used) {
  535. FREE_MEM_DEL_BLOCK(bl_next, p)
  536. COMBINE_BLOCKS(b, bl_next)
  537. }
  538. if (bl_prev && !b->broken && !bl_prev->used) {
  539. FREE_MEM_DEL_BLOCK(bl_prev, p)
  540. COMBINE_BLOCKS(bl_prev, b)
  541. b = bl_prev;
  542. }
  543. b->used = 0;
  544. FREE_MEM_INS_BLOCK(b)
  545. __bl_uncommit(b);
  546. }
  547. #endif /* L__free_support */
  548. extern void __malloc_init(void);
  549. #ifdef L__malloc_init
  550. void __malloc_init(void)
  551. {
  552. int i, mapsize, x, old_x, gcount;
  553. mapsize = M_PAGESIZE;
  554. __malloc_initialized = 0;
  555. __bl_last = NULL;
  556. free_mem_root = NULL;
  557. ptrs_root = NULL;
  558. mapsize -= sizeof(Hunk_t);
  559. for (i = 1; i <= HUNK_MAXSIZE; i++) {
  560. __free_h[i] = (Hunk_t *) NULL;
  561. for (x = mapsize / i, gcount = 0, old_x = 0; old_x != x;) {
  562. old_x = x;
  563. x = (mapsize - ALIGN(DIV8(old_x + 7))) / i;
  564. if (gcount > 1 && x * i + ALIGN(DIV8(x + 7)) <= mapsize)
  565. break;
  566. if (x * i + ALIGN(DIV8(x + 7)) > mapsize)
  567. gcount++;
  568. }
  569. __total_h[i] = x;
  570. }
  571. mutex_init(&malloc_lock);
  572. __malloc_initialized = 1;
  573. }
  574. #endif /* L__malloc_init */
  575. #ifdef L_malloc
  576. void *malloc(size_t size)
  577. {
  578. void *p;
  579. if (size == 0)
  580. return NULL;
  581. if (__malloc_initialized < 0)
  582. __malloc_init();
  583. if (__malloc_initialized)
  584. mutex_lock(&malloc_lock);
  585. if (size <= HUNK_MAXSIZE)
  586. p = __hunk_alloc(size);
  587. else {
  588. if ((p = bl_alloc(ALIGN(size))) != NULL)
  589. p = ((Block_t *) p)->ptr;
  590. }
  591. if (__malloc_initialized)
  592. mutex_unlock(&malloc_lock);
  593. return p;
  594. }
  595. #endif /* L_malloc */
  596. #ifdef L_free
  597. void free(void *ptr)
  598. {
  599. Block_t *p, *best;
  600. if (__malloc_initialized < 0)
  601. return;
  602. if (__malloc_initialized)
  603. mutex_lock(&malloc_lock);
  604. for (p = ptrs_root, best = NULL; p;) {
  605. if (p->ptr > (char *) ptr)
  606. p = p->l_ptrs;
  607. else {
  608. best = p;
  609. p = p->r_ptrs;
  610. }
  611. }
  612. if (!best || !best->used || best->ptr != (char *) ptr) {
  613. __hunk_free(ptr);
  614. if (__malloc_initialized)
  615. mutex_unlock(&malloc_lock);
  616. return;
  617. }
  618. __bl_free(best);
  619. if (__malloc_initialized)
  620. mutex_unlock(&malloc_lock);
  621. }
  622. #endif /* L_free */
  623. extern void *_realloc_no_move(void *ptr, size_t size);
  624. #ifdef L__realloc_no_move
  625. void *_realloc_no_move(void *ptr, size_t size)
  626. {
  627. Block_t *p, *best, *next;
  628. if (size <= HUNK_MAXSIZE)
  629. return NULL;
  630. if (__malloc_initialized <= 0)
  631. return malloc(size);
  632. mutex_lock(&malloc_lock);
  633. /* Locate block */
  634. for (p = ptrs_root, best = NULL; p;) {
  635. if (p->ptr > (char *) ptr)
  636. p = p->l_ptrs;
  637. else {
  638. best = p;
  639. p = p->r_ptrs;
  640. }
  641. }
  642. if (!best || !best->used || best->ptr != (char *) ptr) {
  643. mutex_unlock(&malloc_lock);
  644. return NULL;
  645. }
  646. size = ALIGN(size);
  647. if (size == best->size) {
  648. mutex_unlock(&malloc_lock);
  649. return ptr;
  650. }
  651. if (best->r_ptrs) /* get block just after */
  652. for (next = best->r_ptrs; next->l_ptrs; next = next->l_ptrs);
  653. else
  654. for (p = ptrs_root, next = NULL; p;) {
  655. if (p->ptr > best->ptr) {
  656. next = p;
  657. p = p->l_ptrs;
  658. } else if (p->ptr < best->ptr)
  659. p = p->r_ptrs;
  660. else
  661. break;
  662. }
  663. if (size < best->size) { /* shrink block */
  664. if (!next || next->used || next->broken) {
  665. if (best->size - size > MALLOC_ALIGN) { /* do split */
  666. SPLIT_BLOCK(best, size);
  667. }
  668. } else { /* just move border of next block */
  669. SHRINK_BLOCK(best, next, size);
  670. }
  671. } else if (next && !next->broken && !next->used) { /* can expand */
  672. if (best->size + next->size > size + HUNK_MAXSIZE) { /* shrink next free block */
  673. SHRINK_BLOCK(best, next, size);
  674. } else if (best->size + next->size >= size) { /* combine blocks (eat next one) */
  675. FREE_MEM_DEL_BLOCK(next, p);
  676. COMBINE_BLOCKS(best, next);
  677. } else { /* not enough memory in next block */
  678. mutex_unlock(&malloc_lock);
  679. return NULL;
  680. }
  681. } else { /* no next block */
  682. mutex_unlock(&malloc_lock);
  683. return NULL;
  684. }
  685. mutex_unlock(&malloc_lock);
  686. return best->ptr;
  687. }
  688. #endif /* L__realloc_no_move */
  689. #ifdef L_realloc
  690. void *realloc(void *ptr, size_t size)
  691. {
  692. void *tmp;
  693. tmp = _realloc_no_move(ptr, size);
  694. if (!tmp) {
  695. Block_t *p, *best;
  696. mutex_lock(&malloc_lock);
  697. for (p = ptrs_root, best = NULL; p;) {
  698. if (p->ptr > (char *) ptr)
  699. p = p->l_ptrs;
  700. else {
  701. best = p;
  702. p = p->r_ptrs;
  703. }
  704. }
  705. if (!best || !best->used || best->ptr != (char *) ptr) {
  706. if (ptr) {
  707. Hunk_t *h;
  708. h = (Hunk_t *) PAGE_DOWNALIGNP(ptr);
  709. if (h->id == HUNK_ID) {
  710. mutex_unlock(&malloc_lock);
  711. if ((size >= HUNK_THRESHOLD && ALIGN(size) == h->size)
  712. || size == h->size)
  713. return ptr;
  714. if ((tmp = malloc(size)) == NULL)
  715. return NULL;
  716. mutex_lock(&malloc_lock);
  717. memcpy(tmp, ptr, ((size < h->size) ? size : h->size));
  718. __hunk_free(ptr);
  719. mutex_unlock(&malloc_lock);
  720. return tmp;
  721. }
  722. }
  723. mutex_unlock(&malloc_lock);
  724. return malloc(size);
  725. }
  726. mutex_unlock(&malloc_lock);
  727. /* copy whole block */
  728. if ((tmp = malloc(size)) == NULL)
  729. return NULL;
  730. memcpy(tmp, ptr, ((size < best->size) ? size : best->size));
  731. mutex_lock(&malloc_lock);
  732. __bl_free(best);
  733. mutex_unlock(&malloc_lock);
  734. }
  735. return tmp;
  736. }
  737. #endif /* L_realloc */
  738. #ifdef L_calloc
  739. void *calloc(size_t unit, size_t quantity)
  740. {
  741. void *p;
  742. unit *= quantity;
  743. if ((p = malloc(unit)) == NULL)
  744. return NULL;
  745. memset(p, 0, unit);
  746. return p;
  747. }
  748. #endif /* L_calloc */