tls.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /* Definition for thread-local data handling. nptl/x86_64 version.
  2. Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #ifndef _TLS_H
  16. #define _TLS_H 1
  17. #ifndef __ASSEMBLER__
  18. # include <asm/prctl.h> /* For ARCH_SET_FS. */
  19. # include <stdbool.h>
  20. # include <stddef.h>
  21. # include <stdint.h>
  22. # include <stdlib.h>
  23. # include <sysdep.h>
  24. # include <bits/kernel-features.h>
  25. # include <bits/wordsize.h>
  26. # include <xmmintrin.h>
  27. /* Type for the dtv. */
  28. typedef union dtv
  29. {
  30. size_t counter;
  31. struct
  32. {
  33. void *val;
  34. bool is_static;
  35. } pointer;
  36. } dtv_t;
  37. typedef struct
  38. {
  39. void *tcb; /* Pointer to the TCB. Not necessarily the
  40. thread descriptor used by libpthread. */
  41. dtv_t *dtv;
  42. void *self; /* Pointer to the thread descriptor. */
  43. int multiple_threads;
  44. int gscope_flag;
  45. uintptr_t sysinfo;
  46. uintptr_t stack_guard;
  47. uintptr_t pointer_guard;
  48. unsigned long int vgetcpu_cache[2];
  49. # ifndef __ASSUME_PRIVATE_FUTEX
  50. int private_futex;
  51. # else
  52. int __unused1;
  53. # endif
  54. # if __WORDSIZE == 64
  55. int rtld_must_xmm_save;
  56. # endif
  57. /* Reservation of some values for the TM ABI. */
  58. void *__private_tm[5];
  59. # if __WORDSIZE == 64
  60. long int __unused2;
  61. /* Have space for the post-AVX register size. */
  62. __m128 rtld_savespace_sse[8][4];
  63. void *__padding[8];
  64. # endif
  65. } tcbhead_t;
  66. #else /* __ASSEMBLER__ */
  67. # include <tcb-offsets.h>
  68. #endif
  69. /* We require TLS support in the tools. */
  70. #define HAVE_TLS_SUPPORT 1
  71. #define HAVE___THREAD 1
  72. #define HAVE_TLS_MODEL_ATTRIBUTE 1
  73. /* Signal that TLS support is available. */
  74. #define USE_TLS 1
  75. /* Alignment requirement for the stack. */
  76. #define STACK_ALIGN 16
  77. #ifndef __ASSEMBLER__
  78. /* Get system call information. */
  79. # include <sysdep.h>
  80. /* Get the thread descriptor definition. */
  81. # include <descr.h>
  82. #ifndef LOCK_PREFIX
  83. # ifdef UP
  84. # define LOCK_PREFIX /* nothing */
  85. # else
  86. # define LOCK_PREFIX "lock;"
  87. # endif
  88. #endif
  89. /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
  90. because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
  91. struct pthread even when not linked with -lpthread. */
  92. # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
  93. /* Alignment requirements for the initial TCB. */
  94. # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
  95. /* This is the size of the TCB. */
  96. # define TLS_TCB_SIZE sizeof (struct pthread)
  97. /* Alignment requirements for the TCB. */
  98. //# define TLS_TCB_ALIGN __alignof__ (struct pthread)
  99. // Normally the above would be correct But we have to store post-AVX
  100. // vector registers in the TCB and we want the storage to be aligned.
  101. // unfortunately there isn't yet a type for these values and hence no
  102. // 32-byte alignment requirement. Make this explicit, for now.
  103. # define TLS_TCB_ALIGN 32
  104. /* The TCB can have any size and the memory following the address the
  105. thread pointer points to is unspecified. Allocate the TCB there. */
  106. # define TLS_TCB_AT_TP 1
  107. /* Install the dtv pointer. The pointer passed is to the element with
  108. index -1 which contain the length. */
  109. # define INSTALL_DTV(descr, dtvp) \
  110. ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
  111. /* Install new dtv for current thread. */
  112. # define INSTALL_NEW_DTV(dtvp) \
  113. ({ struct pthread *__pd; \
  114. THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
  115. /* Return dtv of given thread descriptor. */
  116. # define GET_DTV(descr) \
  117. (((tcbhead_t *) (descr))->dtv)
  118. /* Macros to load from and store into segment registers. */
  119. # define TLS_GET_FS() \
  120. ({ int __seg; __asm__ ("movl %%fs, %0" : "=q" (__seg)); __seg; })
  121. # define TLS_SET_FS(val) \
  122. __asm__ ("movl %0, %%fs" :: "q" (val))
  123. /* Code to initially initialize the thread pointer. This might need
  124. special attention since 'errno' is not yet available and if the
  125. operation can cause a failure 'errno' must not be touched.
  126. We have to make the syscall for both uses of the macro since the
  127. address might be (and probably is) different. */
  128. # define TLS_INIT_TP(thrdescr, secondcall) \
  129. ({ void *_thrdescr = (thrdescr); \
  130. tcbhead_t *_head = _thrdescr; \
  131. int _result; \
  132. \
  133. _head->tcb = _thrdescr; \
  134. /* For now the thread descriptor is at the same address. */ \
  135. _head->self = _thrdescr; \
  136. \
  137. /* It is a simple syscall to set the %fs value for the thread. */ \
  138. __asm__ __volatile__ ("syscall" \
  139. : "=a" (_result) \
  140. : "0" ((unsigned long int) __NR_arch_prctl), \
  141. "D" ((unsigned long int) ARCH_SET_FS), \
  142. "S" (_thrdescr) \
  143. : "memory", "cc", "r11", "cx"); \
  144. \
  145. _result ? "cannot set %fs base address for thread-local storage" : 0; \
  146. })
  147. /* Return the address of the dtv for the current thread. */
  148. # define THREAD_DTV() \
  149. ({ struct pthread *__pd; \
  150. THREAD_GETMEM (__pd, header.dtv); })
  151. /* Return the thread descriptor for the current thread.
  152. The contained asm must *not* be marked __volatile__ since otherwise
  153. assignments like
  154. pthread_descr self = thread_self();
  155. do not get optimized away. */
  156. # define THREAD_SELF \
  157. ({ struct pthread *__self; \
  158. __asm__ ("movq %%fs:%c1,%q0" : "=r" (__self) \
  159. : "i" (offsetof (struct pthread, header.self))); \
  160. __self;})
  161. /* Magic for libthread_db to know how to do THREAD_SELF. */
  162. # define DB_THREAD_SELF_INCLUDE <sys/reg.h> /* For the FS constant. */
  163. # define DB_THREAD_SELF CONST_THREAD_AREA (64, FS)
  164. /* Read member of the thread descriptor directly. */
  165. # define THREAD_GETMEM(descr, member) \
  166. ({ __typeof (descr->member) __value; \
  167. if (sizeof (__value) == 1) \
  168. __asm__ __volatile__ ("movb %%fs:%P2,%b0" \
  169. : "=q" (__value) \
  170. : "0" (0), "i" (offsetof (struct pthread, member))); \
  171. else if (sizeof (__value) == 4) \
  172. __asm__ __volatile__ ("movl %%fs:%P1,%0" \
  173. : "=r" (__value) \
  174. : "i" (offsetof (struct pthread, member))); \
  175. else \
  176. { \
  177. if (sizeof (__value) != 8) \
  178. /* There should not be any value with a size other than 1, \
  179. 4 or 8. */ \
  180. abort (); \
  181. \
  182. __asm__ __volatile__ ("movq %%fs:%P1,%q0" \
  183. : "=r" (__value) \
  184. : "i" (offsetof (struct pthread, member))); \
  185. } \
  186. __value; })
  187. /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
  188. # define THREAD_GETMEM_NC(descr, member, idx) \
  189. ({ __typeof (descr->member[0]) __value; \
  190. if (sizeof (__value) == 1) \
  191. __asm__ __volatile__ ("movb %%fs:%P2(%q3),%b0" \
  192. : "=q" (__value) \
  193. : "0" (0), "i" (offsetof (struct pthread, member[0])), \
  194. "r" (idx)); \
  195. else if (sizeof (__value) == 4) \
  196. __asm__ __volatile__ ("movl %%fs:%P1(,%q2,4),%0" \
  197. : "=r" (__value) \
  198. : "i" (offsetof (struct pthread, member[0])), "r" (idx));\
  199. else \
  200. { \
  201. if (sizeof (__value) != 8) \
  202. /* There should not be any value with a size other than 1, \
  203. 4 or 8. */ \
  204. abort (); \
  205. \
  206. __asm__ __volatile__ ("movq %%fs:%P1(,%q2,8),%q0" \
  207. : "=r" (__value) \
  208. : "i" (offsetof (struct pthread, member[0])), \
  209. "r" (idx)); \
  210. } \
  211. __value; })
  212. /* Loading addresses of objects on x86-64 needs to be treated special
  213. when generating PIC code. */
  214. #ifdef __pic__
  215. # define IMM_MODE "nr"
  216. #else
  217. # define IMM_MODE "ir"
  218. #endif
  219. /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
  220. # define THREAD_SETMEM(descr, member, value) \
  221. ({ if (sizeof (descr->member) == 1) \
  222. __asm__ __volatile__ ("movb %b0,%%fs:%P1" : \
  223. : "iq" (value), \
  224. "i" (offsetof (struct pthread, member))); \
  225. else if (sizeof (descr->member) == 4) \
  226. __asm__ __volatile__ ("movl %0,%%fs:%P1" : \
  227. : IMM_MODE (value), \
  228. "i" (offsetof (struct pthread, member))); \
  229. else \
  230. { \
  231. if (sizeof (descr->member) != 8) \
  232. /* There should not be any value with a size other than 1, \
  233. 4 or 8. */ \
  234. abort (); \
  235. \
  236. __asm__ __volatile__ ("movq %q0,%%fs:%P1" : \
  237. : IMM_MODE ((unsigned long int) value), \
  238. "i" (offsetof (struct pthread, member))); \
  239. }})
  240. /* Set member of the thread descriptor directly. */
  241. # define THREAD_SETMEM_NC(descr, member, idx, value) \
  242. ({ if (sizeof (descr->member[0]) == 1) \
  243. __asm__ __volatile__ ("movb %b0,%%fs:%P1(%q2)" : \
  244. : "iq" (value), \
  245. "i" (offsetof (struct pthread, member[0])), \
  246. "r" (idx)); \
  247. else if (sizeof (descr->member[0]) == 4) \
  248. __asm__ __volatile__ ("movl %0,%%fs:%P1(,%q2,4)" : \
  249. : IMM_MODE (value), \
  250. "i" (offsetof (struct pthread, member[0])), \
  251. "r" (idx)); \
  252. else \
  253. { \
  254. if (sizeof (descr->member[0]) != 8) \
  255. /* There should not be any value with a size other than 1, \
  256. 4 or 8. */ \
  257. abort (); \
  258. \
  259. __asm__ __volatile__ ("movq %q0,%%fs:%P1(,%q2,8)" : \
  260. : IMM_MODE ((unsigned long int) value), \
  261. "i" (offsetof (struct pthread, member[0])), \
  262. "r" (idx)); \
  263. }})
  264. /* Atomic compare and exchange on TLS, returning old value. */
  265. # define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
  266. ({ __typeof (descr->member) __ret; \
  267. __typeof (oldval) __old = (oldval); \
  268. if (sizeof (descr->member) == 4) \
  269. __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
  270. : "=a" (__ret) \
  271. : "0" (__old), "r" (newval), \
  272. "i" (offsetof (struct pthread, member))); \
  273. else \
  274. /* Not necessary for other sizes in the moment. */ \
  275. abort (); \
  276. __ret; })
  277. /* Atomic logical and. */
  278. # define THREAD_ATOMIC_AND(descr, member, val) \
  279. (void) ({ if (sizeof ((descr)->member) == 4) \
  280. __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%fs:%P0" \
  281. :: "i" (offsetof (struct pthread, member)), \
  282. "ir" (val)); \
  283. else \
  284. /* Not necessary for other sizes in the moment. */ \
  285. abort (); })
  286. /* Atomic set bit. */
  287. # define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
  288. (void) ({ if (sizeof ((descr)->member) == 4) \
  289. __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%fs:%P0" \
  290. :: "i" (offsetof (struct pthread, member)), \
  291. "ir" (1 << (bit))); \
  292. else \
  293. /* Not necessary for other sizes in the moment. */ \
  294. abort (); })
  295. /* Set the stack guard field in TCB head. */
  296. # define THREAD_SET_STACK_GUARD(value) \
  297. THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
  298. # define THREAD_COPY_STACK_GUARD(descr) \
  299. ((descr)->header.stack_guard \
  300. = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
  301. /* Set the pointer guard field in the TCB head. */
  302. # define THREAD_SET_POINTER_GUARD(value) \
  303. THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
  304. # define THREAD_COPY_POINTER_GUARD(descr) \
  305. ((descr)->header.pointer_guard \
  306. = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
  307. /* Get and set the global scope generation counter in the TCB head. */
  308. # define THREAD_GSCOPE_FLAG_UNUSED 0
  309. # define THREAD_GSCOPE_FLAG_USED 1
  310. # define THREAD_GSCOPE_FLAG_WAIT 2
  311. # define THREAD_GSCOPE_RESET_FLAG() \
  312. do \
  313. { int __res; \
  314. __asm__ __volatile__ ("xchgl %0, %%fs:%P1" \
  315. : "=r" (__res) \
  316. : "i" (offsetof (struct pthread, header.gscope_flag)), \
  317. "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
  318. if (__res == THREAD_GSCOPE_FLAG_WAIT) \
  319. lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
  320. } \
  321. while (0)
  322. # define THREAD_GSCOPE_SET_FLAG() \
  323. THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
  324. # define THREAD_GSCOPE_WAIT() \
  325. GL(dl_wait_lookup_done) ()
  326. # ifdef SHARED
  327. /* Defined in dl-trampoline.S. */
  328. extern void _dl_x86_64_save_sse (void);
  329. extern void _dl_x86_64_restore_sse (void);
  330. # define RTLD_CHECK_FOREIGN_CALL \
  331. (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) != 0)
  332. /* NB: Don't use the xchg operation because that would imply a lock
  333. prefix which is expensive and unnecessary. The cache line is also
  334. not contested at all. */
  335. # define RTLD_ENABLE_FOREIGN_CALL \
  336. int old_rtld_must_xmm_save = THREAD_GETMEM (THREAD_SELF, \
  337. header.rtld_must_xmm_save); \
  338. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 1)
  339. # define RTLD_PREPARE_FOREIGN_CALL \
  340. do if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save)) \
  341. { \
  342. _dl_x86_64_save_sse (); \
  343. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 0); \
  344. } \
  345. while (0)
  346. # define RTLD_FINALIZE_FOREIGN_CALL \
  347. do { \
  348. if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) == 0) \
  349. _dl_x86_64_restore_sse (); \
  350. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, \
  351. old_rtld_must_xmm_save); \
  352. } while (0)
  353. # endif
  354. #endif /* __ASSEMBLER__ */
  355. #endif /* tls.h */