alloc.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /* alloc.c
  2. *
  3. * Written by Erik Andersen <andersee@debian.org>
  4. * LGPLv2
  5. *
  6. * Parts of the memalign code were stolen from malloc-930716.
  7. */
  8. #define _GNU_SOURCE
  9. #include <features.h>
  10. #include <unistd.h>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <string.h>
  14. #include <unistd.h>
  15. #include <errno.h>
  16. #include <sys/mman.h>
  17. #ifdef L_malloc
  18. void *malloc(size_t size)
  19. {
  20. void *result;
  21. if (unlikely(size == 0)) {
  22. #if defined(__MALLOC_GLIBC_COMPAT__)
  23. size++;
  24. #else
  25. /* Some programs will call malloc (0). Lets be strict and return NULL */
  26. return 0;
  27. #endif
  28. }
  29. #ifdef __ARCH_HAS_MMU__
  30. result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE,
  31. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  32. if (result == MAP_FAILED)
  33. return 0;
  34. * (size_t *) result = size;
  35. return(result + sizeof(size_t));
  36. #else
  37. result = mmap((void *) 0, size, PROT_READ | PROT_WRITE,
  38. MAP_SHARED | MAP_ANONYMOUS, -1, 0);
  39. if (result == MAP_FAILED)
  40. return 0;
  41. return(result);
  42. #endif
  43. }
  44. #endif
  45. #ifdef L_calloc
  46. void * calloc(size_t nmemb, size_t lsize)
  47. {
  48. void *result;
  49. size_t size=lsize * nmemb;
  50. /* guard vs integer overflow, but allow nmemb
  51. * to fall through and call malloc(0) */
  52. if (nmemb && lsize != (size / nmemb)) {
  53. __set_errno(ENOMEM);
  54. return NULL;
  55. }
  56. result=malloc(size);
  57. #if 0
  58. /* Standard unix mmap using /dev/zero clears memory so calloc
  59. * doesn't need to actually zero anything....
  60. */
  61. if (result != NULL) {
  62. memset(result, 0, size);
  63. }
  64. #endif
  65. return result;
  66. }
  67. #endif
  68. #ifdef L_realloc
  69. void *realloc(void *ptr, size_t size)
  70. {
  71. void *newptr = NULL;
  72. if (!ptr)
  73. return malloc(size);
  74. if (!size) {
  75. free(ptr);
  76. return malloc(0);
  77. }
  78. newptr = malloc(size);
  79. if (newptr) {
  80. memcpy(newptr, ptr,
  81. #ifdef __ARCH_HAS_MMU__
  82. *((size_t *) (ptr - sizeof(size_t)))
  83. #else
  84. size
  85. #endif
  86. );
  87. free(ptr);
  88. }
  89. return newptr;
  90. }
  91. #endif
  92. #ifdef L_free
  93. extern int weak_function __libc_free_aligned(void *ptr);
  94. void free(void *ptr)
  95. {
  96. if (ptr == NULL)
  97. return;
  98. if (unlikely(__libc_free_aligned!=NULL)) {
  99. if (__libc_free_aligned(ptr)) {
  100. return;
  101. }
  102. }
  103. #ifdef __ARCH_HAS_MMU__
  104. ptr -= sizeof(size_t);
  105. munmap(ptr, * (size_t *) ptr + sizeof(size_t));
  106. #else
  107. munmap(ptr, 0);
  108. #endif
  109. }
  110. #endif
  111. #ifdef L_memalign
  112. #ifdef __UCLIBC_HAS_THREADS__
  113. #include <pthread.h>
  114. pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
  115. # define LOCK __pthread_mutex_lock(&__malloc_lock)
  116. # define UNLOCK __pthread_mutex_unlock(&__malloc_lock);
  117. #else
  118. # define LOCK
  119. # define UNLOCK
  120. #endif
  121. /* List of blocks allocated with memalign or valloc */
  122. struct alignlist
  123. {
  124. struct alignlist *next;
  125. __ptr_t aligned; /* The address that memaligned returned. */
  126. __ptr_t exact; /* The address that malloc returned. */
  127. };
  128. struct alignlist *_aligned_blocks;
  129. /* Return memory to the heap. */
  130. int __libc_free_aligned(void *ptr)
  131. {
  132. struct alignlist *l;
  133. if (ptr == NULL)
  134. return 0;
  135. LOCK;
  136. for (l = _aligned_blocks; l != NULL; l = l->next) {
  137. if (l->aligned == ptr) {
  138. /* Mark the block as free */
  139. l->aligned = NULL;
  140. ptr = l->exact;
  141. #ifdef __ARCH_HAS_MMU__
  142. ptr -= sizeof(size_t);
  143. munmap(ptr, * (size_t *) ptr + sizeof(size_t));
  144. #else
  145. munmap(ptr, 0);
  146. #endif
  147. return 1;
  148. }
  149. }
  150. UNLOCK;
  151. return 0;
  152. }
  153. void * memalign (size_t alignment, size_t size)
  154. {
  155. void * result;
  156. unsigned long int adj;
  157. result = malloc (size + alignment - 1);
  158. if (result == NULL)
  159. return NULL;
  160. adj = (unsigned long int) ((unsigned long int) ((char *) result -
  161. (char *) NULL)) % alignment;
  162. if (adj != 0)
  163. {
  164. struct alignlist *l;
  165. LOCK;
  166. for (l = _aligned_blocks; l != NULL; l = l->next)
  167. if (l->aligned == NULL)
  168. /* This slot is free. Use it. */
  169. break;
  170. if (l == NULL)
  171. {
  172. l = (struct alignlist *) malloc (sizeof (struct alignlist));
  173. if (l == NULL) {
  174. free(result);
  175. UNLOCK;
  176. return NULL;
  177. }
  178. l->next = _aligned_blocks;
  179. _aligned_blocks = l;
  180. }
  181. l->exact = result;
  182. result = l->aligned = (char *) result + alignment - adj;
  183. UNLOCK;
  184. }
  185. return result;
  186. }
  187. #endif