alloc.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /* alloc.c
  2. *
  3. * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org>
  4. *
  5. * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
  6. */
  7. /*
  8. * Parts of the memalign code were stolen from malloc-930716.
  9. */
  10. #include <features.h>
  11. #include <unistd.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <errno.h>
  16. #include <sys/mman.h>
  17. #include <malloc.h>
  18. /* Experimentally off - libc_hidden_proto(memcpy) */
  19. /*libc_hidden_proto(memset)*/
  20. libc_hidden_proto(mmap)
  21. libc_hidden_proto(munmap)
  22. #ifdef L_malloc
  23. void *malloc(size_t size)
  24. {
  25. void *result;
  26. if (unlikely(size == 0)) {
  27. #if defined(__MALLOC_GLIBC_COMPAT__)
  28. size++;
  29. #else
  30. /* Some programs will call malloc (0). Lets be strict and return NULL */
  31. __set_errno(ENOMEM);
  32. return NULL;
  33. #endif
  34. }
  35. #ifdef __ARCH_USE_MMU__
  36. # define MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
  37. #else
  38. # define MMAP_FLAGS MAP_SHARED | MAP_ANONYMOUS
  39. #endif
  40. result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE,
  41. MMAP_FLAGS, 0, 0);
  42. if (result == MAP_FAILED)
  43. return 0;
  44. * (size_t *) result = size;
  45. return(result + sizeof(size_t));
  46. }
  47. #endif
  48. #ifdef L_calloc
  49. void * calloc(size_t nmemb, size_t lsize)
  50. {
  51. void *result;
  52. size_t size=lsize * nmemb;
  53. /* guard vs integer overflow, but allow nmemb
  54. * to fall through and call malloc(0) */
  55. if (nmemb && lsize != (size / nmemb)) {
  56. __set_errno(ENOMEM);
  57. return NULL;
  58. }
  59. result=malloc(size);
  60. #if 0
  61. /* Standard unix mmap using /dev/zero clears memory so calloc
  62. * doesn't need to actually zero anything....
  63. */
  64. if (result != NULL) {
  65. memset(result, 0, size);
  66. }
  67. #endif
  68. return result;
  69. }
  70. #endif
  71. #ifdef L_realloc
  72. void *realloc(void *ptr, size_t size)
  73. {
  74. void *newptr = NULL;
  75. if (!ptr)
  76. return malloc(size);
  77. if (!size) {
  78. free(ptr);
  79. return malloc(0);
  80. }
  81. newptr = malloc(size);
  82. if (newptr) {
  83. size_t old_size = *((size_t *) (ptr - sizeof(size_t)));
  84. memcpy(newptr, ptr, (old_size < size ? old_size : size));
  85. free(ptr);
  86. }
  87. return newptr;
  88. }
  89. #endif
  90. #ifdef L_free
  91. extern int weak_function __libc_free_aligned(void *ptr);
  92. void free(void *ptr)
  93. {
  94. if (unlikely(ptr == NULL))
  95. return;
  96. if (unlikely(__libc_free_aligned != NULL)) {
  97. if (__libc_free_aligned(ptr))
  98. return;
  99. }
  100. ptr -= sizeof(size_t);
  101. munmap(ptr, * (size_t *) ptr + sizeof(size_t));
  102. }
  103. #endif
  104. #ifdef L_memalign
  105. #include <bits/uClibc_mutex.h>
  106. __UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
  107. #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
  108. #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
  109. /* List of blocks allocated with memalign or valloc */
  110. struct alignlist
  111. {
  112. struct alignlist *next;
  113. __ptr_t aligned; /* The address that memaligned returned. */
  114. __ptr_t exact; /* The address that malloc returned. */
  115. };
  116. struct alignlist *_aligned_blocks;
  117. /* Return memory to the heap. */
  118. int __libc_free_aligned(void *ptr)
  119. {
  120. struct alignlist *l;
  121. if (ptr == NULL)
  122. return 0;
  123. __MALLOC_LOCK;
  124. for (l = _aligned_blocks; l != NULL; l = l->next) {
  125. if (l->aligned == ptr) {
  126. /* Mark the block as free */
  127. l->aligned = NULL;
  128. ptr = l->exact;
  129. ptr -= sizeof(size_t);
  130. munmap(ptr, * (size_t *) ptr + sizeof(size_t));
  131. return 1;
  132. }
  133. }
  134. __MALLOC_UNLOCK;
  135. return 0;
  136. }
  137. void * memalign (size_t alignment, size_t size)
  138. {
  139. void * result;
  140. unsigned long int adj;
  141. result = malloc (size + alignment - 1);
  142. if (result == NULL)
  143. return NULL;
  144. adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment;
  145. if (adj != 0) {
  146. struct alignlist *l;
  147. __MALLOC_LOCK;
  148. for (l = _aligned_blocks; l != NULL; l = l->next)
  149. if (l->aligned == NULL)
  150. /* This slot is free. Use it. */
  151. break;
  152. if (l == NULL) {
  153. l = (struct alignlist *) malloc (sizeof (struct alignlist));
  154. if (l == NULL) {
  155. free(result);
  156. result = NULL;
  157. goto DONE;
  158. }
  159. l->next = _aligned_blocks;
  160. _aligned_blocks = l;
  161. }
  162. l->exact = result;
  163. result = l->aligned = (char *) result + alignment - adj;
  164. DONE:
  165. __MALLOC_UNLOCK;
  166. }
  167. return result;
  168. }
  169. #endif