malloc.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * libc/stdlib/malloc/malloc.c -- malloc function
  3. *
  4. * Copyright (C) 2002,03 NEC Electronics Corporation
  5. * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
  6. *
  7. * This file is subject to the terms and conditions of the GNU Lesser
  8. * General Public License. See the file COPYING.LIB in the main
  9. * directory of this archive for more details.
  10. *
  11. * Written by Miles Bader <miles@gnu.org>
  12. */
  13. #include <stdlib.h>
  14. #include <unistd.h>
  15. #include <errno.h>
  16. #include <sys/mman.h>
  17. #include "malloc.h"
  18. #include "heap.h"
  19. /* The malloc heap. We provide a bit of initial static space so that
  20. programs can do a little mallocing without mmaping in more space. */
  21. HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
  22. struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
  23. #ifdef HEAP_USE_LOCKING
  24. __UCLIBC_MUTEX_INIT(__malloc_heap_lock,PTHREAD_MUTEX_INITIALIZER);
  25. #endif
  26. #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
  27. /* A lock protecting our use of sbrk. */
  28. __UCLIBC_MUTEX(__malloc_sbrk_lock);
  29. #endif /* MALLOC_USE_LOCKING && MALLOC_USE_SBRK */
  30. #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
  31. /* A list of all malloc_mmb structures describing blocks that
  32. malloc has mmapped, ordered by the block address. */
  33. struct malloc_mmb *__malloc_mmapped_blocks = 0;
  34. /* A heap used for allocating malloc_mmb structures. We could allocate
  35. them from the main heap, but that tends to cause heap fragmentation in
  36. annoying ways. */
  37. HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
  38. struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
  39. #ifdef HEAP_USE_LOCKING
  40. __UCLIBC_MUTEX_INIT(__malloc_mmb_heap_lock,PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
  41. #endif
  42. #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
  43. #ifdef HEAP_USE_LOCKING
  44. #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap, lck)
  45. #else
  46. #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap)
  47. #endif
  48. static void *
  49. __malloc_from_heap (size_t size, struct heap_free_area **heap
  50. #ifdef HEAP_USE_LOCKING
  51. , __UCLIBC_MUTEX_TYPE *heap_lock
  52. #endif
  53. )
  54. {
  55. void *mem;
  56. MALLOC_DEBUG (1, "malloc: %d bytes", size);
  57. /* Include extra space to record the size of the allocated block. */
  58. size += MALLOC_HEADER_SIZE;
  59. __heap_lock (heap_lock);
  60. /* First try to get memory that's already in our heap. */
  61. mem = __heap_alloc (heap, &size);
  62. __heap_unlock (heap_lock);
  63. if (unlikely (! mem))
  64. /* We couldn't allocate from the heap, so grab some more
  65. from the system, add it to the heap, and try again. */
  66. {
  67. /* If we're trying to allocate a block bigger than the default
  68. MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
  69. void *block;
  70. size_t block_size
  71. = (size < MALLOC_HEAP_EXTEND_SIZE
  72. ? MALLOC_HEAP_EXTEND_SIZE
  73. : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
  74. /* Allocate the new heap block. */
  75. #ifdef MALLOC_USE_SBRK
  76. __malloc_lock_sbrk ();
  77. /* Use sbrk we can, as it's faster than mmap, and guarantees
  78. contiguous allocation. */
  79. block = sbrk (block_size);
  80. if (likely (block != (void *)-1))
  81. {
  82. /* Because sbrk can return results of arbitrary
  83. alignment, align the result to a MALLOC_ALIGNMENT boundary. */
  84. long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
  85. if (block != (void *)aligned_block)
  86. /* Have to adjust. We should only have to actually do this
  87. the first time (after which we will have aligned the brk
  88. correctly). */
  89. {
  90. /* Move the brk to reflect the alignment; our next allocation
  91. should start on exactly the right alignment. */
  92. sbrk (aligned_block - (long)block);
  93. block = (void *)aligned_block;
  94. }
  95. }
  96. __malloc_unlock_sbrk ();
  97. #else /* !MALLOC_USE_SBRK */
  98. /* Otherwise, use mmap. */
  99. #ifdef __ARCH_USE_MMU__
  100. block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
  101. MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
  102. #else
  103. block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
  104. MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0);
  105. #endif
  106. #endif /* MALLOC_USE_SBRK */
  107. if (likely (block != (void *)-1))
  108. {
  109. #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
  110. struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
  111. #endif
  112. MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)",
  113. (long)block, (long)block + block_size, block_size);
  114. /* Get back the heap lock. */
  115. __heap_lock (heap_lock);
  116. /* Put BLOCK into the heap. */
  117. __heap_free (heap, block, block_size);
  118. MALLOC_DEBUG_INDENT (-1);
  119. /* Try again to allocate. */
  120. mem = __heap_alloc (heap, &size);
  121. #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
  122. /* Insert a record of BLOCK in sorted order into the
  123. __malloc_mmapped_blocks list. */
  124. new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
  125. for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
  126. mmb;
  127. prev_mmb = mmb, mmb = mmb->next)
  128. if (block < mmb->mem)
  129. break;
  130. new_mmb->next = mmb;
  131. new_mmb->mem = block;
  132. new_mmb->size = block_size;
  133. if (prev_mmb)
  134. prev_mmb->next = new_mmb;
  135. else
  136. __malloc_mmapped_blocks = new_mmb;
  137. MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
  138. (unsigned)new_mmb,
  139. (unsigned)new_mmb->mem, block_size);
  140. #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
  141. __heap_unlock (heap_lock);
  142. }
  143. }
  144. if (likely (mem))
  145. /* Record the size of the block and get the user address. */
  146. {
  147. mem = MALLOC_SETUP (mem, size);
  148. MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)",
  149. (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
  150. }
  151. else
  152. MALLOC_DEBUG (-1, "malloc: returning 0");
  153. return mem;
  154. }
  155. void *
  156. malloc (size_t size)
  157. {
  158. void *mem;
  159. #ifdef MALLOC_DEBUGGING
  160. static smallint debugging_initialized;
  161. if (! debugging_initialized)
  162. {
  163. debugging_initialized = 1;
  164. __malloc_debug_init ();
  165. }
  166. if (__malloc_check)
  167. __heap_check (__malloc_heap, "malloc");
  168. #endif
  169. #ifdef __MALLOC_GLIBC_COMPAT__
  170. if (unlikely (size == 0))
  171. size++;
  172. #else
  173. /* Some programs will call malloc (0). Lets be strict and return NULL */
  174. if (unlikely (size == 0))
  175. goto oom;
  176. #endif
  177. /* Check if they are doing something dumb like malloc(-1) */
  178. if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
  179. goto oom;
  180. mem = malloc_from_heap (size, &__malloc_heap, &__malloc_heap_lock);
  181. if (unlikely (!mem))
  182. {
  183. oom:
  184. __set_errno (ENOMEM);
  185. return 0;
  186. }
  187. return mem;
  188. }