free.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. This is a version (aka dlmalloc) of malloc/free/realloc written by
  3. Doug Lea and released to the public domain. Use, modify, and
  4. redistribute this code without permission or acknowledgement in any
  5. way you wish. Send questions, comments, complaints, performance
  6. data, etc to dl@cs.oswego.edu
  7. VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
  8. Note: There may be an updated version of this malloc obtainable at
  9. ftp://gee.cs.oswego.edu/pub/misc/malloc.c
  10. Check before installing!
  11. Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
  12. */
  13. #include "malloc.h"
  14. /* ------------------------- __malloc_trim -------------------------
  15. __malloc_trim is an inverse of sorts to __malloc_alloc. It gives memory
  16. back to the system (via negative arguments to sbrk) if there is unused
  17. memory at the `high' end of the malloc pool. It is called automatically by
  18. free() when top space exceeds the trim threshold. It is also called by the
  19. public malloc_trim routine. It returns 1 if it actually released any
  20. memory, else 0.
  21. */
  22. static int __malloc_trim(size_t pad, mstate av)
  23. {
  24. long top_size; /* Amount of top-most memory */
  25. long extra; /* Amount to release */
  26. long released; /* Amount actually released */
  27. char* current_brk; /* address returned by pre-check sbrk call */
  28. char* new_brk; /* address returned by post-check sbrk call */
  29. size_t pagesz;
  30. pagesz = av->pagesize;
  31. top_size = chunksize(av->top);
  32. /* Release in pagesize units, keeping at least one page */
  33. extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
  34. if (extra > 0) {
  35. /*
  36. Only proceed if end of memory is where we last set it.
  37. This avoids problems if there were foreign sbrk calls.
  38. */
  39. current_brk = (char*)(MORECORE(0));
  40. if (current_brk == (char*)(av->top) + top_size) {
  41. /*
  42. Attempt to release memory. We ignore MORECORE return value,
  43. and instead call again to find out where new end of memory is.
  44. This avoids problems if first call releases less than we asked,
  45. of if failure somehow altered brk value. (We could still
  46. encounter problems if it altered brk in some very bad way,
  47. but the only thing we can do is adjust anyway, which will cause
  48. some downstream failure.)
  49. */
  50. MORECORE(-extra);
  51. new_brk = (char*)(MORECORE(0));
  52. if (new_brk != (char*)MORECORE_FAILURE) {
  53. released = (long)(current_brk - new_brk);
  54. if (released != 0) {
  55. /* Success. Adjust top. */
  56. av->sbrked_mem -= released;
  57. set_head(av->top, (top_size - released) | PREV_INUSE);
  58. check_malloc_state();
  59. return 1;
  60. }
  61. }
  62. }
  63. }
  64. return 0;
  65. }
  66. /* ------------------------- malloc_trim -------------------------
  67. malloc_trim(size_t pad);
  68. If possible, gives memory back to the system (via negative
  69. arguments to sbrk) if there is unused memory at the `high' end of
  70. the malloc pool. You can call this after freeing large blocks of
  71. memory to potentially reduce the system-level memory requirements
  72. of a program. However, it cannot guarantee to reduce memory. Under
  73. some allocation patterns, some large free blocks of memory will be
  74. locked between two used chunks, so they cannot be given back to
  75. the system.
  76. The `pad' argument to malloc_trim represents the amount of free
  77. trailing space to leave untrimmed. If this argument is zero,
  78. only the minimum amount of memory to maintain internal data
  79. structures will be left (one page or less). Non-zero arguments
  80. can be supplied to maintain enough trailing space to service
  81. future expected allocations without having to re-obtain memory
  82. from the system.
  83. Malloc_trim returns 1 if it actually released any memory, else 0.
  84. On systems that do not support "negative sbrks", it will always
  85. return 0.
  86. */
  87. int malloc_trim(size_t pad)
  88. {
  89. int r;
  90. __MALLOC_LOCK;
  91. mstate av = get_malloc_state();
  92. __malloc_consolidate(av);
  93. r = __malloc_trim(pad, av);
  94. __MALLOC_UNLOCK;
  95. return r;
  96. }
  97. /*
  98. Initialize a malloc_state struct.
  99. This is called only from within __malloc_consolidate, which needs
  100. be called in the same contexts anyway. It is never called directly
  101. outside of __malloc_consolidate because some optimizing compilers try
  102. to inline it at all call points, which turns out not to be an
  103. optimization at all. (Inlining it in __malloc_consolidate is fine though.)
  104. */
  105. static void malloc_init_state(mstate av)
  106. {
  107. int i;
  108. mbinptr bin;
  109. /* Establish circular links for normal bins */
  110. for (i = 1; i < NBINS; ++i) {
  111. bin = bin_at(av,i);
  112. bin->fd = bin->bk = bin;
  113. }
  114. av->top_pad = DEFAULT_TOP_PAD;
  115. av->n_mmaps_max = DEFAULT_MMAP_MAX;
  116. av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  117. av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
  118. #if MORECORE_CONTIGUOUS
  119. set_contiguous(av);
  120. #else
  121. set_noncontiguous(av);
  122. #endif
  123. set_max_fast(av, DEFAULT_MXFAST);
  124. av->top = initial_top(av);
  125. av->pagesize = malloc_getpagesize;
  126. }
  127. /* ----------------------------------------------------------------------
  128. *
  129. * PUBLIC STUFF
  130. *
  131. * ----------------------------------------------------------------------*/
  132. /* ------------------------- __malloc_consolidate -------------------------
  133. __malloc_consolidate is a specialized version of free() that tears
  134. down chunks held in fastbins. Free itself cannot be used for this
  135. purpose since, among other things, it might place chunks back onto
  136. fastbins. So, instead, we need to use a minor variant of the same
  137. code.
  138. Also, because this routine needs to be called the first time through
  139. malloc anyway, it turns out to be the perfect place to trigger
  140. initialization code.
  141. */
  142. void attribute_hidden __malloc_consolidate(mstate av)
  143. {
  144. mfastbinptr* fb; /* current fastbin being consolidated */
  145. mfastbinptr* maxfb; /* last fastbin (for loop control) */
  146. mchunkptr p; /* current chunk being consolidated */
  147. mchunkptr nextp; /* next chunk to consolidate */
  148. mchunkptr unsorted_bin; /* bin header */
  149. mchunkptr first_unsorted; /* chunk to link to */
  150. /* These have same use as in free() */
  151. mchunkptr nextchunk;
  152. size_t size;
  153. size_t nextsize;
  154. size_t prevsize;
  155. int nextinuse;
  156. mchunkptr bck;
  157. mchunkptr fwd;
  158. /*
  159. If max_fast is 0, we know that av hasn't
  160. yet been initialized, in which case do so below
  161. */
  162. if (av->max_fast != 0) {
  163. clear_fastchunks(av);
  164. unsorted_bin = unsorted_chunks(av);
  165. /*
  166. Remove each chunk from fast bin and consolidate it, placing it
  167. then in unsorted bin. Among other reasons for doing this,
  168. placing in unsorted bin avoids needing to calculate actual bins
  169. until malloc is sure that chunks aren't immediately going to be
  170. reused anyway.
  171. */
  172. maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
  173. fb = &(av->fastbins[0]);
  174. do {
  175. if ( (p = *fb) != 0) {
  176. *fb = 0;
  177. do {
  178. CHECK_PTR(p);
  179. check_inuse_chunk(p);
  180. nextp = REVEAL_PTR(&p->fd, p->fd);
  181. /* Slightly streamlined version of consolidation code in free() */
  182. size = p->size & ~PREV_INUSE;
  183. nextchunk = chunk_at_offset(p, size);
  184. nextsize = chunksize(nextchunk);
  185. if (!prev_inuse(p)) {
  186. prevsize = p->prev_size;
  187. size += prevsize;
  188. p = chunk_at_offset(p, -((long) prevsize));
  189. unlink(p, bck, fwd);
  190. }
  191. if (nextchunk != av->top) {
  192. nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  193. set_head(nextchunk, nextsize);
  194. if (!nextinuse) {
  195. size += nextsize;
  196. unlink(nextchunk, bck, fwd);
  197. }
  198. first_unsorted = unsorted_bin->fd;
  199. unsorted_bin->fd = p;
  200. first_unsorted->bk = p;
  201. set_head(p, size | PREV_INUSE);
  202. p->bk = unsorted_bin;
  203. p->fd = first_unsorted;
  204. set_foot(p, size);
  205. }
  206. else {
  207. size += nextsize;
  208. set_head(p, size | PREV_INUSE);
  209. av->top = p;
  210. }
  211. } while ( (p = nextp) != 0);
  212. }
  213. } while (fb++ != maxfb);
  214. }
  215. else {
  216. malloc_init_state(av);
  217. check_malloc_state();
  218. }
  219. }
  220. /* ------------------------------ free ------------------------------ */
  221. void free(void* mem)
  222. {
  223. mstate av;
  224. mchunkptr p; /* chunk corresponding to mem */
  225. size_t size; /* its size */
  226. mfastbinptr* fb; /* associated fastbin */
  227. mchunkptr nextchunk; /* next contiguous chunk */
  228. size_t nextsize; /* its size */
  229. int nextinuse; /* true if nextchunk is used */
  230. size_t prevsize; /* size of previous contiguous chunk */
  231. mchunkptr bck; /* misc temp for linking */
  232. mchunkptr fwd; /* misc temp for linking */
  233. /* free(0) has no effect */
  234. if (mem == NULL)
  235. return;
  236. __MALLOC_LOCK;
  237. av = get_malloc_state();
  238. p = mem2chunk(mem);
  239. size = chunksize(p);
  240. check_inuse_chunk(p);
  241. /*
  242. If eligible, place chunk on a fastbin so it can be found
  243. and used quickly in malloc.
  244. */
  245. if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
  246. #if TRIM_FASTBINS
  247. /* If TRIM_FASTBINS set, don't place chunks
  248. bordering top into fastbins */
  249. && (chunk_at_offset(p, size) != av->top)
  250. #endif
  251. ) {
  252. set_fastchunks(av);
  253. fb = &(av->fastbins[fastbin_index(size)]);
  254. p->fd = PROTECT_PTR(&p->fd, *fb);
  255. *fb = p;
  256. }
  257. /*
  258. Consolidate other non-mmapped chunks as they arrive.
  259. */
  260. else if (!chunk_is_mmapped(p)) {
  261. set_anychunks(av);
  262. nextchunk = chunk_at_offset(p, size);
  263. nextsize = chunksize(nextchunk);
  264. /* consolidate backward */
  265. if (!prev_inuse(p)) {
  266. prevsize = p->prev_size;
  267. size += prevsize;
  268. p = chunk_at_offset(p, -((long) prevsize));
  269. unlink(p, bck, fwd);
  270. }
  271. if (nextchunk != av->top) {
  272. /* get and clear inuse bit */
  273. nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  274. set_head(nextchunk, nextsize);
  275. /* consolidate forward */
  276. if (!nextinuse) {
  277. unlink(nextchunk, bck, fwd);
  278. size += nextsize;
  279. }
  280. /*
  281. Place the chunk in unsorted chunk list. Chunks are
  282. not placed into regular bins until after they have
  283. been given one chance to be used in malloc.
  284. */
  285. bck = unsorted_chunks(av);
  286. fwd = bck->fd;
  287. p->bk = bck;
  288. p->fd = fwd;
  289. bck->fd = p;
  290. fwd->bk = p;
  291. set_head(p, size | PREV_INUSE);
  292. set_foot(p, size);
  293. check_free_chunk(p);
  294. }
  295. /*
  296. If the chunk borders the current high end of memory,
  297. consolidate into top
  298. */
  299. else {
  300. size += nextsize;
  301. set_head(p, size | PREV_INUSE);
  302. av->top = p;
  303. check_chunk(p);
  304. }
  305. /*
  306. If freeing a large space, consolidate possibly-surrounding
  307. chunks. Then, if the total unused topmost memory exceeds trim
  308. threshold, ask malloc_trim to reduce top.
  309. Unless max_fast is 0, we don't know if there are fastbins
  310. bordering top, so we cannot tell for sure whether threshold
  311. has been reached unless fastbins are consolidated. But we
  312. don't want to consolidate on each free. As a compromise,
  313. consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
  314. is reached.
  315. */
  316. if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
  317. if (have_fastchunks(av))
  318. __malloc_consolidate(av);
  319. if ((unsigned long)(chunksize(av->top)) >=
  320. (unsigned long)(av->trim_threshold))
  321. __malloc_trim(av->top_pad, av);
  322. }
  323. }
  324. /*
  325. If the chunk was allocated via mmap, release via munmap()
  326. Note that if HAVE_MMAP is false but chunk_is_mmapped is
  327. true, then user must have overwritten memory. There's nothing
  328. we can do to catch this error unless DEBUG is set, in which case
  329. check_inuse_chunk (above) will have triggered error.
  330. */
  331. else {
  332. size_t offset = p->prev_size;
  333. av->n_mmaps--;
  334. av->mmapped_mem -= (size + offset);
  335. munmap((char*)p - offset, size + offset);
  336. }
  337. __MALLOC_UNLOCK;
  338. }
  339. /* glibc compatibilty */
  340. weak_alias(free, __libc_free)