allocatestack.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152
  1. /* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <assert.h>
  16. #include <errno.h>
  17. #include <signal.h>
  18. #include <stdint.h>
  19. #include <string.h>
  20. #include <unistd.h>
  21. #include <sys/mman.h>
  22. #include <sys/param.h>
  23. #include <tls.h>
  24. #include <lowlevellock.h>
  25. #include <link.h>
  26. #include <bits/kernel-features.h>
  27. #ifndef NEED_SEPARATE_REGISTER_STACK
  28. /* Most architectures have exactly one stack pointer. Some have more. */
  29. # define STACK_VARIABLES void *stackaddr = NULL
  30. /* How to pass the values to the 'create_thread' function. */
  31. # define STACK_VARIABLES_ARGS stackaddr
  32. /* How to declare function which gets there parameters. */
  33. # define STACK_VARIABLES_PARMS void *stackaddr
  34. /* How to declare allocate_stack. */
  35. # define ALLOCATE_STACK_PARMS void **stack
  36. /* This is how the function is called. We do it this way to allow
  37. other variants of the function to have more parameters. */
  38. # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
  39. #else
  40. /* We need two stacks. The kernel will place them but we have to tell
  41. the kernel about the size of the reserved address space. */
  42. # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
  43. /* How to pass the values to the 'create_thread' function. */
  44. # define STACK_VARIABLES_ARGS stackaddr, stacksize
  45. /* How to declare function which gets there parameters. */
  46. # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
  47. /* How to declare allocate_stack. */
  48. # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
  49. /* This is how the function is called. We do it this way to allow
  50. other variants of the function to have more parameters. */
  51. # define ALLOCATE_STACK(attr, pd) \
  52. allocate_stack (attr, pd, &stackaddr, &stacksize)
  53. #endif
  54. /* Default alignment of stack. */
  55. #ifndef STACK_ALIGN
  56. # define STACK_ALIGN __alignof__ (long double)
  57. #endif
  58. /* Default value for minimal stack size after allocating thread
  59. descriptor and guard. */
  60. #ifndef MINIMAL_REST_STACK
  61. # define MINIMAL_REST_STACK 4096
  62. #endif
  63. /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
  64. a stack. Use it when possible. */
  65. #ifndef MAP_STACK
  66. # define MAP_STACK 0
  67. #endif
  68. /* This yields the pointer that TLS support code calls the thread pointer. */
  69. #if defined(TLS_TCB_AT_TP)
  70. # define TLS_TPADJ(pd) (pd)
  71. #elif defined(TLS_DTV_AT_TP)
  72. # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
  73. #endif
  74. /* Cache handling for not-yet free stacks. */
  75. /*
  76. Maximum size in kB of cache. GNU libc default is 40MiB
  77. embedded systems don't have enough ram for big dirty stack caches,
  78. reduce it to 16MiB. 4 does not work, f.e. tst-kill4 segfaults.
  79. */
  80. static size_t stack_cache_maxsize = 16 * 1024 * 1024;
  81. static size_t stack_cache_actsize;
  82. /* Mutex protecting this variable. */
  83. static int stack_cache_lock = LLL_LOCK_INITIALIZER;
  84. /* List of queued stack frames. */
  85. static LIST_HEAD (stack_cache);
  86. /* List of the stacks in use. */
  87. static LIST_HEAD (stack_used);
  88. /* We need to record what list operations we are going to do so that,
  89. in case of an asynchronous interruption due to a fork() call, we
  90. can correct for the work. */
  91. static uintptr_t in_flight_stack;
  92. /* List of the threads with user provided stacks in use. No need to
  93. initialize this, since it's done in __pthread_initialize_minimal. */
  94. list_t __stack_user __attribute__ ((nocommon));
  95. hidden_data_def (__stack_user)
  96. #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
  97. /* Number of threads created. */
  98. static unsigned int nptl_ncreated;
  99. #endif
  100. /* Check whether the stack is still used or not. */
  101. #define FREE_P(descr) ((descr)->tid <= 0)
  102. static void
  103. stack_list_del (list_t *elem)
  104. {
  105. in_flight_stack = (uintptr_t) elem;
  106. atomic_write_barrier ();
  107. list_del (elem);
  108. atomic_write_barrier ();
  109. in_flight_stack = 0;
  110. }
  111. static void
  112. stack_list_add (list_t *elem, list_t *list)
  113. {
  114. in_flight_stack = (uintptr_t) elem | 1;
  115. atomic_write_barrier ();
  116. list_add (elem, list);
  117. atomic_write_barrier ();
  118. in_flight_stack = 0;
  119. }
  120. /* We create a double linked list of all cache entries. Double linked
  121. because this allows removing entries from the end. */
  122. /* Get a stack frame from the cache. We have to match by size since
  123. some blocks might be too small or far too large. */
  124. static struct pthread *
  125. get_cached_stack (size_t *sizep, void **memp)
  126. {
  127. size_t size = *sizep;
  128. struct pthread *result = NULL;
  129. list_t *entry;
  130. lll_lock (stack_cache_lock, LLL_PRIVATE);
  131. /* Search the cache for a matching entry. We search for the
  132. smallest stack which has at least the required size. Note that
  133. in normal situations the size of all allocated stacks is the
  134. same. As the very least there are only a few different sizes.
  135. Therefore this loop will exit early most of the time with an
  136. exact match. */
  137. list_for_each (entry, &stack_cache)
  138. {
  139. struct pthread *curr;
  140. curr = list_entry (entry, struct pthread, list);
  141. if (FREE_P (curr) && curr->stackblock_size >= size)
  142. {
  143. if (curr->stackblock_size == size)
  144. {
  145. result = curr;
  146. break;
  147. }
  148. if (result == NULL
  149. || result->stackblock_size > curr->stackblock_size)
  150. result = curr;
  151. }
  152. }
  153. if (__builtin_expect (result == NULL, 0)
  154. /* Make sure the size difference is not too excessive. In that
  155. case we do not use the block. */
  156. || __builtin_expect (result->stackblock_size > 4 * size, 0))
  157. {
  158. /* Release the lock. */
  159. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  160. return NULL;
  161. }
  162. /* Dequeue the entry. */
  163. stack_list_del (&result->list);
  164. /* And add to the list of stacks in use. */
  165. stack_list_add (&result->list, &stack_used);
  166. /* And decrease the cache size. */
  167. stack_cache_actsize -= result->stackblock_size;
  168. /* Release the lock early. */
  169. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  170. /* Report size and location of the stack to the caller. */
  171. *sizep = result->stackblock_size;
  172. *memp = result->stackblock;
  173. /* Cancellation handling is back to the default. */
  174. result->cancelhandling = 0;
  175. result->cleanup = NULL;
  176. /* No pending event. */
  177. result->nextevent = NULL;
  178. /* Clear the DTV. */
  179. dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
  180. memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
  181. /* Re-initialize the TLS. */
  182. _dl_allocate_tls_init (TLS_TPADJ (result));
  183. return result;
  184. }
  185. /* Free stacks until cache size is lower than LIMIT. */
  186. void
  187. __free_stacks (size_t limit)
  188. {
  189. /* We reduce the size of the cache. Remove the last entries until
  190. the size is below the limit. */
  191. list_t *entry;
  192. list_t *prev;
  193. /* Search from the end of the list. */
  194. list_for_each_prev_safe (entry, prev, &stack_cache)
  195. {
  196. struct pthread *curr;
  197. curr = list_entry (entry, struct pthread, list);
  198. if (FREE_P (curr))
  199. {
  200. /* Unlink the block. */
  201. stack_list_del (entry);
  202. /* Account for the freed memory. */
  203. stack_cache_actsize -= curr->stackblock_size;
  204. /* Free the memory associated with the ELF TLS. */
  205. _dl_deallocate_tls (TLS_TPADJ (curr), false);
  206. /* Remove this block. This should never fail. If it does
  207. something is really wrong. */
  208. if (munmap (curr->stackblock, curr->stackblock_size) != 0)
  209. abort ();
  210. /* Maybe we have freed enough. */
  211. if (stack_cache_actsize <= limit)
  212. break;
  213. }
  214. }
  215. }
  216. /* Add a stack frame which is not used anymore to the stack. Must be
  217. called with the cache lock held. */
  218. static inline void
  219. __attribute ((always_inline))
  220. queue_stack (struct pthread *stack)
  221. {
  222. /* We unconditionally add the stack to the list. The memory may
  223. still be in use but it will not be reused until the kernel marks
  224. the stack as not used anymore. */
  225. stack_list_add (&stack->list, &stack_cache);
  226. stack_cache_actsize += stack->stackblock_size;
  227. if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
  228. __free_stacks (stack_cache_maxsize);
  229. }
  230. static int
  231. internal_function
  232. change_stack_perm (struct pthread *pd
  233. #ifdef NEED_SEPARATE_REGISTER_STACK
  234. , size_t pagemask
  235. #endif
  236. )
  237. {
  238. #ifdef NEED_SEPARATE_REGISTER_STACK
  239. void *stack = (pd->stackblock
  240. + (((((pd->stackblock_size - pd->guardsize) / 2)
  241. & pagemask) + pd->guardsize) & pagemask));
  242. size_t len = pd->stackblock + pd->stackblock_size - stack;
  243. #elif defined _STACK_GROWS_DOWN
  244. void *stack = pd->stackblock + pd->guardsize;
  245. size_t len = pd->stackblock_size - pd->guardsize;
  246. #elif defined _STACK_GROWS_UP
  247. void *stack = pd->stackblock;
  248. size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
  249. #else
  250. # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
  251. #endif
  252. #ifdef __ARCH_USE_MMU__
  253. if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
  254. return errno;
  255. #endif
  256. return 0;
  257. }
  258. static int
  259. allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
  260. ALLOCATE_STACK_PARMS)
  261. {
  262. struct pthread *pd;
  263. size_t size;
  264. size_t pagesize_m1 = __getpagesize () - 1;
  265. void *stacktop;
  266. assert (attr != NULL);
  267. assert (powerof2 (pagesize_m1 + 1));
  268. assert (TCB_ALIGNMENT >= STACK_ALIGN);
  269. /* Get the stack size from the attribute if it is set. Otherwise we
  270. use the default we determined at start time. */
  271. size = attr->stacksize ?: __default_stacksize;
  272. /* Get memory for the stack. */
  273. if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
  274. {
  275. uintptr_t adj;
  276. /* If the user also specified the size of the stack make sure it
  277. is large enough. */
  278. if (attr->stacksize != 0
  279. && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
  280. return EINVAL;
  281. /* Adjust stack size for alignment of the TLS block. */
  282. #if defined(TLS_TCB_AT_TP)
  283. adj = ((uintptr_t) attr->stackaddr - TLS_TCB_SIZE)
  284. & __static_tls_align_m1;
  285. assert (size > adj + TLS_TCB_SIZE);
  286. #elif defined(TLS_DTV_AT_TP)
  287. adj = ((uintptr_t) attr->stackaddr - __static_tls_size)
  288. & __static_tls_align_m1;
  289. assert (size > adj);
  290. #endif
  291. /* The user provided some memory. Let's hope it matches the
  292. size... We do not allocate guard pages if the user provided
  293. the stack. It is the user's responsibility to do this if it
  294. is wanted. */
  295. #if defined(TLS_TCB_AT_TP)
  296. pd = (struct pthread *) ((uintptr_t) attr->stackaddr
  297. - TLS_TCB_SIZE - adj);
  298. #elif defined(TLS_DTV_AT_TP)
  299. pd = (struct pthread *) (((uintptr_t) attr->stackaddr
  300. - __static_tls_size - adj)
  301. - TLS_PRE_TCB_SIZE);
  302. #endif
  303. /* The user provided stack memory needs to be cleared. */
  304. memset (pd, '\0', sizeof (struct pthread));
  305. /* The first TSD block is included in the TCB. */
  306. pd->specific[0] = pd->specific_1stblock;
  307. /* Remember the stack-related values. */
  308. pd->stackblock = (char *) attr->stackaddr - size;
  309. pd->stackblock_size = size;
  310. /* This is a user-provided stack. It will not be queued in the
  311. stack cache nor will the memory (except the TLS memory) be freed. */
  312. pd->user_stack = true;
  313. /* This is at least the second thread. */
  314. pd->header.multiple_threads = 1;
  315. #ifndef TLS_MULTIPLE_THREADS_IN_TCB
  316. __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
  317. #endif
  318. #ifndef __ASSUME_PRIVATE_FUTEX
  319. /* The thread must know when private futexes are supported. */
  320. pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
  321. header.private_futex);
  322. #endif
  323. #ifdef NEED_DL_SYSINFO
  324. /* Copy the sysinfo value from the parent. */
  325. THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
  326. #endif
  327. /* Allocate the DTV for this thread. */
  328. if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
  329. {
  330. /* Something went wrong. */
  331. assert (errno == ENOMEM);
  332. return EAGAIN;
  333. }
  334. /* Prepare to modify global data. */
  335. lll_lock (stack_cache_lock, LLL_PRIVATE);
  336. /* And add to the list of stacks in use. */
  337. list_add (&pd->list, &__stack_user);
  338. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  339. }
  340. else
  341. {
  342. /* Allocate some anonymous memory. If possible use the cache. */
  343. size_t guardsize;
  344. size_t reqsize;
  345. void *mem = 0;
  346. const int prot = (PROT_READ | PROT_WRITE);
  347. #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
  348. /* Add one more page for stack coloring. Don't do it for stacks
  349. with 16 times pagesize or larger. This might just cause
  350. unnecessary misalignment. */
  351. if (size <= 16 * pagesize_m1)
  352. size += pagesize_m1 + 1;
  353. #endif
  354. /* Adjust the stack size for alignment. */
  355. size &= ~__static_tls_align_m1;
  356. assert (size != 0);
  357. /* Make sure the size of the stack is enough for the guard and
  358. eventually the thread descriptor. */
  359. guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
  360. if (__builtin_expect (size < ((guardsize + __static_tls_size
  361. + MINIMAL_REST_STACK + pagesize_m1)
  362. & ~pagesize_m1),
  363. 0))
  364. /* The stack is too small (or the guard too large). */
  365. return EINVAL;
  366. /* Try to get a stack from the cache. */
  367. reqsize = size;
  368. pd = get_cached_stack (&size, &mem);
  369. if (pd == NULL)
  370. {
  371. /* To avoid aliasing effects on a larger scale than pages we
  372. adjust the allocated stack size if necessary. This way
  373. allocations directly following each other will not have
  374. aliasing problems. */
  375. #if defined MULTI_PAGE_ALIASING && MULTI_PAGE_ALIASING != 0
  376. if ((size % MULTI_PAGE_ALIASING) == 0)
  377. size += pagesize_m1 + 1;
  378. #endif
  379. mem = mmap (NULL, size, prot,
  380. MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
  381. if (__builtin_expect (mem == MAP_FAILED, 0))
  382. {
  383. if (errno == ENOMEM)
  384. __set_errno (EAGAIN);
  385. return errno;
  386. }
  387. /* SIZE is guaranteed to be greater than zero.
  388. So we can never get a null pointer back from mmap. */
  389. assert (mem != NULL);
  390. #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
  391. /* Atomically increment NCREATED. */
  392. unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
  393. /* We chose the offset for coloring by incrementing it for
  394. every new thread by a fixed amount. The offset used
  395. module the page size. Even if coloring would be better
  396. relative to higher alignment values it makes no sense to
  397. do it since the mmap() interface does not allow us to
  398. specify any alignment for the returned memory block. */
  399. size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
  400. /* Make sure the coloring offsets does not disturb the alignment
  401. of the TCB and static TLS block. */
  402. if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
  403. coloring = (((coloring + __static_tls_align_m1)
  404. & ~(__static_tls_align_m1))
  405. & ~pagesize_m1);
  406. #else
  407. /* Unless specified we do not make any adjustments. */
  408. # define coloring 0
  409. #endif
  410. /* Place the thread descriptor at the end of the stack. */
  411. #if defined(TLS_TCB_AT_TP)
  412. pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
  413. #elif defined(TLS_DTV_AT_TP)
  414. pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
  415. - __static_tls_size)
  416. & ~__static_tls_align_m1)
  417. - TLS_PRE_TCB_SIZE);
  418. #endif
  419. /* Remember the stack-related values. */
  420. pd->stackblock = mem;
  421. pd->stackblock_size = size;
  422. /* We allocated the first block thread-specific data array.
  423. This address will not change for the lifetime of this
  424. descriptor. */
  425. pd->specific[0] = pd->specific_1stblock;
  426. /* This is at least the second thread. */
  427. pd->header.multiple_threads = 1;
  428. #ifndef TLS_MULTIPLE_THREADS_IN_TCB
  429. __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
  430. #endif
  431. #ifndef __ASSUME_PRIVATE_FUTEX
  432. /* The thread must know when private futexes are supported. */
  433. pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
  434. header.private_futex);
  435. #endif
  436. #ifdef NEED_DL_SYSINFO
  437. /* Copy the sysinfo value from the parent. */
  438. THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
  439. #endif
  440. /* Allocate the DTV for this thread. */
  441. if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
  442. {
  443. /* Something went wrong. */
  444. assert (errno == ENOMEM);
  445. /* Free the stack memory we just allocated. */
  446. (void) munmap (mem, size);
  447. return EAGAIN;
  448. }
  449. /* Prepare to modify global data. */
  450. lll_lock (stack_cache_lock, LLL_PRIVATE);
  451. /* And add to the list of stacks in use. */
  452. stack_list_add (&pd->list, &stack_used);
  453. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  454. /* Note that all of the stack and the thread descriptor is
  455. zeroed. This means we do not have to initialize fields
  456. with initial value zero. This is specifically true for
  457. the 'tid' field which is always set back to zero once the
  458. stack is not used anymore and for the 'guardsize' field
  459. which will be read next. */
  460. }
  461. /* Create or resize the guard area if necessary. */
  462. if (__builtin_expect (guardsize > pd->guardsize, 0))
  463. {
  464. #ifdef NEED_SEPARATE_REGISTER_STACK
  465. char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
  466. #elif defined _STACK_GROWS_DOWN
  467. char *guard = mem;
  468. #elif defined _STACK_GROWS_UP
  469. char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
  470. #endif
  471. #ifdef __ARCH_USE_MMU__
  472. if (mprotect (guard, guardsize, PROT_NONE) != 0)
  473. {
  474. int err;
  475. mprot_error:
  476. err = errno;
  477. lll_lock (stack_cache_lock, LLL_PRIVATE);
  478. /* Remove the thread from the list. */
  479. stack_list_del (&pd->list);
  480. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  481. /* Get rid of the TLS block we allocated. */
  482. _dl_deallocate_tls (TLS_TPADJ (pd), false);
  483. /* Free the stack memory regardless of whether the size
  484. of the cache is over the limit or not. If this piece
  485. of memory caused problems we better do not use it
  486. anymore. Uh, and we ignore possible errors. There
  487. is nothing we could do. */
  488. (void) munmap (mem, size);
  489. return err;
  490. }
  491. #endif
  492. pd->guardsize = guardsize;
  493. }
  494. else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
  495. 0))
  496. {
  497. /* The old guard area is too large. */
  498. #ifdef NEED_SEPARATE_REGISTER_STACK
  499. char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
  500. char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
  501. #ifdef __ARCH_USE_MMU__
  502. if (oldguard < guard
  503. && mprotect (oldguard, guard - oldguard, prot) != 0)
  504. goto mprot_error;
  505. if (mprotect (guard + guardsize,
  506. oldguard + pd->guardsize - guard - guardsize,
  507. prot) != 0)
  508. goto mprot_error;
  509. #elif defined _STACK_GROWS_DOWN
  510. if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
  511. prot) != 0)
  512. goto mprot_error;
  513. #elif defined _STACK_GROWS_UP
  514. if (mprotect ((char *) (((uintptr_t) pd - pd->guardsize) & ~pagesize_m1),
  515. pd->guardsize - guardsize, prot) != 0)
  516. goto mprot_error;
  517. #endif
  518. #endif
  519. pd->guardsize = guardsize;
  520. }
  521. /* The pthread_getattr_np() calls need to get passed the size
  522. requested in the attribute, regardless of how large the
  523. actually used guardsize is. */
  524. pd->reported_guardsize = guardsize;
  525. }
  526. /* Initialize the lock. We have to do this unconditionally since the
  527. stillborn thread could be canceled while the lock is taken. */
  528. pd->lock = LLL_LOCK_INITIALIZER;
  529. /* The robust mutex lists also need to be initialized
  530. unconditionally because the cleanup for the previous stack owner
  531. might have happened in the kernel. */
  532. pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
  533. - offsetof (pthread_mutex_t,
  534. __data.__list.__next));
  535. pd->robust_head.list_op_pending = NULL;
  536. #ifdef __PTHREAD_MUTEX_HAVE_PREV
  537. pd->robust_prev = &pd->robust_head;
  538. #endif
  539. pd->robust_head.list = &pd->robust_head;
  540. /* We place the thread descriptor at the end of the stack. */
  541. *pdp = pd;
  542. #if defined(TLS_TCB_AT_TP)
  543. /* The stack begins before the TCB and the static TLS block. */
  544. stacktop = ((char *) (pd + 1) - __static_tls_size);
  545. #elif defined(TLS_DTV_AT_TP)
  546. stacktop = (char *) (pd - 1);
  547. #endif
  548. #ifdef NEED_SEPARATE_REGISTER_STACK
  549. *stack = pd->stackblock;
  550. *stacksize = stacktop - *stack;
  551. #elif defined _STACK_GROWS_DOWN
  552. *stack = stacktop;
  553. #elif defined _STACK_GROWS_UP
  554. *stack = pd->stackblock;
  555. assert (*stack > 0);
  556. #endif
  557. return 0;
  558. }
  559. void
  560. internal_function
  561. __deallocate_stack (struct pthread *pd)
  562. {
  563. lll_lock (stack_cache_lock, LLL_PRIVATE);
  564. /* Remove the thread from the list of threads with user defined
  565. stacks. */
  566. stack_list_del (&pd->list);
  567. /* Not much to do. Just free the mmap()ed memory. Note that we do
  568. not reset the 'used' flag in the 'tid' field. This is done by
  569. the kernel. If no thread has been created yet this field is
  570. still zero. */
  571. if (__builtin_expect (! pd->user_stack, 1))
  572. (void) queue_stack (pd);
  573. else
  574. /* Free the memory associated with the ELF TLS. */
  575. _dl_deallocate_tls (TLS_TPADJ (pd), false);
  576. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  577. }
  578. int
  579. internal_function
  580. __make_stacks_executable (void **stack_endp)
  581. {
  582. /* First the main thread's stack. */
  583. int err = EPERM;
  584. if (err != 0)
  585. return err;
  586. #ifdef NEED_SEPARATE_REGISTER_STACK
  587. const size_t pagemask = ~(__getpagesize () - 1);
  588. #endif
  589. lll_lock (stack_cache_lock, LLL_PRIVATE);
  590. list_t *runp;
  591. list_for_each (runp, &stack_used)
  592. {
  593. err = change_stack_perm (list_entry (runp, struct pthread, list)
  594. #ifdef NEED_SEPARATE_REGISTER_STACK
  595. , pagemask
  596. #endif
  597. );
  598. if (err != 0)
  599. break;
  600. }
  601. /* Also change the permission for the currently unused stacks. This
  602. might be wasted time but better spend it here than adding a check
  603. in the fast path. */
  604. if (err == 0)
  605. list_for_each (runp, &stack_cache)
  606. {
  607. err = change_stack_perm (list_entry (runp, struct pthread, list)
  608. #ifdef NEED_SEPARATE_REGISTER_STACK
  609. , pagemask
  610. #endif
  611. );
  612. if (err != 0)
  613. break;
  614. }
  615. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  616. return err;
  617. }
  618. /* In case of a fork() call the memory allocation in the child will be
  619. the same but only one thread is running. All stacks except that of
  620. the one running thread are not used anymore. We have to recycle
  621. them. */
  622. void
  623. __reclaim_stacks (void)
  624. {
  625. struct pthread *self = (struct pthread *) THREAD_SELF;
  626. /* No locking necessary. The caller is the only stack in use. But
  627. we have to be aware that we might have interrupted a list
  628. operation. */
  629. if (in_flight_stack != 0)
  630. {
  631. bool add_p = in_flight_stack & 1;
  632. list_t *elem = (list_t *)(uintptr_t)(in_flight_stack & ~UINTMAX_C (1));
  633. if (add_p)
  634. {
  635. /* We always add at the beginning of the list. So in this
  636. case we only need to check the beginning of these lists. */
  637. int check_list (list_t *l)
  638. {
  639. if (l->next->prev != l)
  640. {
  641. assert (l->next->prev == elem);
  642. elem->next = l->next;
  643. elem->prev = l;
  644. l->next = elem;
  645. return 1;
  646. }
  647. return 0;
  648. }
  649. if (check_list (&stack_used) == 0)
  650. (void) check_list (&stack_cache);
  651. }
  652. else
  653. {
  654. /* We can simply always replay the delete operation. */
  655. elem->next->prev = elem->prev;
  656. elem->prev->next = elem->next;
  657. }
  658. }
  659. /* Mark all stacks except the still running one as free. */
  660. list_t *runp;
  661. list_for_each (runp, &stack_used)
  662. {
  663. struct pthread *curp = list_entry (runp, struct pthread, list);
  664. if (curp != self)
  665. {
  666. /* This marks the stack as free. */
  667. curp->tid = 0;
  668. /* Account for the size of the stack. */
  669. stack_cache_actsize += curp->stackblock_size;
  670. if (curp->specific_used)
  671. {
  672. /* Clear the thread-specific data. */
  673. memset (curp->specific_1stblock, '\0',
  674. sizeof (curp->specific_1stblock));
  675. curp->specific_used = false;
  676. size_t cnt;
  677. for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
  678. if (curp->specific[cnt] != NULL)
  679. {
  680. memset (curp->specific[cnt], '\0',
  681. sizeof (curp->specific_1stblock));
  682. /* We have allocated the block which we do not
  683. free here so re-set the bit. */
  684. curp->specific_used = true;
  685. }
  686. }
  687. }
  688. }
  689. /* Add the stack of all running threads to the cache. */
  690. list_splice (&stack_used, &stack_cache);
  691. /* Remove the entry for the current thread to from the cache list
  692. and add it to the list of running threads. Which of the two
  693. lists is decided by the user_stack flag. */
  694. stack_list_del (&self->list);
  695. /* Re-initialize the lists for all the threads. */
  696. INIT_LIST_HEAD (&stack_used);
  697. INIT_LIST_HEAD (&__stack_user);
  698. if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
  699. list_add (&self->list, &__stack_user);
  700. else
  701. list_add (&self->list, &stack_used);
  702. /* There is one thread running. */
  703. __nptl_nthreads = 1;
  704. in_flight_stack = 0;
  705. /* Initialize the lock. */
  706. stack_cache_lock = LLL_LOCK_INITIALIZER;
  707. }
  708. static void
  709. internal_function
  710. setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
  711. {
  712. int ch;
  713. /* Don't let the thread exit before the setxid handler runs. */
  714. t->setxid_futex = 0;
  715. do
  716. {
  717. ch = t->cancelhandling;
  718. /* If the thread is exiting right now, ignore it. */
  719. if ((ch & EXITING_BITMASK) != 0)
  720. return;
  721. }
  722. while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
  723. ch | SETXID_BITMASK, ch));
  724. }
  725. static void
  726. internal_function
  727. setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
  728. {
  729. int ch;
  730. do
  731. {
  732. ch = t->cancelhandling;
  733. if ((ch & SETXID_BITMASK) == 0)
  734. return;
  735. }
  736. while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
  737. ch & ~SETXID_BITMASK, ch));
  738. /* Release the futex just in case. */
  739. t->setxid_futex = 1;
  740. lll_futex_wake (&t->setxid_futex, 1, LLL_PRIVATE);
  741. }
  742. static int
  743. internal_function
  744. setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
  745. {
  746. if ((t->cancelhandling & SETXID_BITMASK) == 0)
  747. return 0;
  748. int val;
  749. pid_t pid = getpid ();
  750. INTERNAL_SYSCALL_DECL (err);
  751. val = INTERNAL_SYSCALL (tgkill, err, 3, pid, t->tid, SIGSETXID);
  752. /* If this failed, it must have had not started yet or else exited. */
  753. if (!INTERNAL_SYSCALL_ERROR_P (val, err))
  754. {
  755. atomic_increment (&cmdp->cntr);
  756. return 1;
  757. }
  758. else
  759. return 0;
  760. }
  761. int
  762. attribute_hidden
  763. __nptl_setxid (struct xid_command *cmdp)
  764. {
  765. int signalled;
  766. int result;
  767. lll_lock (stack_cache_lock, LLL_PRIVATE);
  768. __xidcmd = cmdp;
  769. cmdp->cntr = 0;
  770. struct pthread *self = THREAD_SELF;
  771. /* Iterate over the list with system-allocated threads first. */
  772. list_t *runp;
  773. list_for_each (runp, &stack_used)
  774. {
  775. struct pthread *t = list_entry (runp, struct pthread, list);
  776. if (t == self)
  777. continue;
  778. setxid_mark_thread (cmdp, t);
  779. }
  780. /* Now the list with threads using user-allocated stacks. */
  781. list_for_each (runp, &__stack_user)
  782. {
  783. struct pthread *t = list_entry (runp, struct pthread, list);
  784. if (t == self)
  785. continue;
  786. setxid_mark_thread (cmdp, t);
  787. }
  788. /* Iterate until we don't succeed in signalling anyone. That means
  789. we have gotten all running threads, and their children will be
  790. automatically correct once started. */
  791. do
  792. {
  793. signalled = 0;
  794. list_for_each (runp, &stack_used)
  795. {
  796. struct pthread *t = list_entry (runp, struct pthread, list);
  797. if (t == self)
  798. continue;
  799. signalled += setxid_signal_thread (cmdp, t);
  800. }
  801. list_for_each (runp, &__stack_user)
  802. {
  803. struct pthread *t = list_entry (runp, struct pthread, list);
  804. if (t == self)
  805. continue;
  806. signalled += setxid_signal_thread (cmdp, t);
  807. }
  808. int cur = cmdp->cntr;
  809. while (cur != 0)
  810. {
  811. lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
  812. cur = cmdp->cntr;
  813. }
  814. }
  815. while (signalled != 0);
  816. /* Clean up flags, so that no thread blocks during exit waiting
  817. for a signal which will never come. */
  818. list_for_each (runp, &stack_used)
  819. {
  820. struct pthread *t = list_entry (runp, struct pthread, list);
  821. if (t == self)
  822. continue;
  823. setxid_unmark_thread (cmdp, t);
  824. }
  825. list_for_each (runp, &__stack_user)
  826. {
  827. struct pthread *t = list_entry (runp, struct pthread, list);
  828. if (t == self)
  829. continue;
  830. setxid_unmark_thread (cmdp, t);
  831. }
  832. /* This must be last, otherwise the current thread might not have
  833. permissions to send SIGSETXID syscall to the other threads. */
  834. INTERNAL_SYSCALL_DECL (err);
  835. result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
  836. cmdp->id[0], cmdp->id[1], cmdp->id[2]);
  837. if (INTERNAL_SYSCALL_ERROR_P (result, err))
  838. {
  839. __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
  840. result = -1;
  841. }
  842. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  843. return result;
  844. }
  845. static inline void __attribute__((always_inline))
  846. init_one_static_tls (struct pthread *curp, struct link_map *map)
  847. {
  848. dtv_t *dtv = GET_DTV (TLS_TPADJ (curp));
  849. # if defined(TLS_TCB_AT_TP)
  850. void *dest = (char *) curp - map->l_tls_offset;
  851. # elif defined(TLS_DTV_AT_TP)
  852. void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
  853. # else
  854. # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
  855. # endif
  856. /* Fill in the DTV slot so that a later LD/GD access will find it. */
  857. dtv[map->l_tls_modid].pointer.val = dest;
  858. dtv[map->l_tls_modid].pointer.is_static = true;
  859. /* Initialize the memory. */
  860. memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
  861. '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
  862. }
  863. void
  864. attribute_hidden
  865. __pthread_init_static_tls (struct link_map *map)
  866. {
  867. lll_lock (stack_cache_lock, LLL_PRIVATE);
  868. /* Iterate over the list with system-allocated threads first. */
  869. list_t *runp;
  870. list_for_each (runp, &stack_used)
  871. init_one_static_tls (list_entry (runp, struct pthread, list), map);
  872. /* Now the list with threads using user-allocated stacks. */
  873. list_for_each (runp, &__stack_user)
  874. init_one_static_tls (list_entry (runp, struct pthread, list), map);
  875. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  876. }
  877. void
  878. attribute_hidden
  879. __wait_lookup_done (void)
  880. {
  881. lll_lock (stack_cache_lock, LLL_PRIVATE);
  882. struct pthread *self = THREAD_SELF;
  883. /* Iterate over the list with system-allocated threads first. */
  884. list_t *runp;
  885. list_for_each (runp, &stack_used)
  886. {
  887. struct pthread *t = list_entry (runp, struct pthread, list);
  888. if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
  889. continue;
  890. int *const gscope_flagp = &t->header.gscope_flag;
  891. /* We have to wait until this thread is done with the global
  892. scope. First tell the thread that we are waiting and
  893. possibly have to be woken. */
  894. if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
  895. THREAD_GSCOPE_FLAG_WAIT,
  896. THREAD_GSCOPE_FLAG_USED))
  897. continue;
  898. do
  899. lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
  900. while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
  901. }
  902. /* Now the list with threads using user-allocated stacks. */
  903. list_for_each (runp, &__stack_user)
  904. {
  905. struct pthread *t = list_entry (runp, struct pthread, list);
  906. if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
  907. continue;
  908. int *const gscope_flagp = &t->header.gscope_flag;
  909. /* We have to wait until this thread is done with the global
  910. scope. First tell the thread that we are waiting and
  911. possibly have to be woken. */
  912. if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
  913. THREAD_GSCOPE_FLAG_WAIT,
  914. THREAD_GSCOPE_FLAG_USED))
  915. continue;
  916. do
  917. lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
  918. while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
  919. }
  920. lll_unlock (stack_cache_lock, LLL_PRIVATE);
  921. }