tls.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /* Definition for thread-local data handling. nptl/x86_64 version.
  2. Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #ifndef _TLS_H
  17. #define _TLS_H 1
  18. #ifndef __ASSEMBLER__
  19. # include <asm/prctl.h> /* For ARCH_SET_FS. */
  20. # include <stdbool.h>
  21. # include <stddef.h>
  22. # include <stdint.h>
  23. # include <stdlib.h>
  24. # include <sysdep.h>
  25. # include <bits/kernel-features.h>
  26. # include <bits/wordsize.h>
  27. # include <xmmintrin.h>
  28. /* Type for the dtv. */
  29. typedef union dtv
  30. {
  31. size_t counter;
  32. struct
  33. {
  34. void *val;
  35. bool is_static;
  36. } pointer;
  37. } dtv_t;
  38. typedef struct
  39. {
  40. void *tcb; /* Pointer to the TCB. Not necessarily the
  41. thread descriptor used by libpthread. */
  42. dtv_t *dtv;
  43. void *self; /* Pointer to the thread descriptor. */
  44. int multiple_threads;
  45. int gscope_flag;
  46. uintptr_t sysinfo;
  47. uintptr_t stack_guard;
  48. uintptr_t pointer_guard;
  49. unsigned long int vgetcpu_cache[2];
  50. # ifndef __ASSUME_PRIVATE_FUTEX
  51. int private_futex;
  52. # else
  53. int __unused1;
  54. # endif
  55. # if __WORDSIZE == 64
  56. int rtld_must_xmm_save;
  57. # endif
  58. /* Reservation of some values for the TM ABI. */
  59. void *__private_tm[5];
  60. # if __WORDSIZE == 64
  61. long int __unused2;
  62. /* Have space for the post-AVX register size. */
  63. __m128 rtld_savespace_sse[8][4];
  64. void *__padding[8];
  65. # endif
  66. } tcbhead_t;
  67. #else /* __ASSEMBLER__ */
  68. # include <tcb-offsets.h>
  69. #endif
  70. /* We require TLS support in the tools. */
  71. #define HAVE_TLS_SUPPORT 1
  72. #define HAVE___THREAD 1
  73. #define HAVE_TLS_MODEL_ATTRIBUTE 1
  74. /* Signal that TLS support is available. */
  75. #define USE_TLS 1
  76. /* Alignment requirement for the stack. */
  77. #define STACK_ALIGN 16
  78. #ifndef __ASSEMBLER__
  79. /* Get system call information. */
  80. # include <sysdep.h>
  81. /* Get the thread descriptor definition. */
  82. # include <descr.h>
  83. #ifndef LOCK_PREFIX
  84. # ifdef UP
  85. # define LOCK_PREFIX /* nothing */
  86. # else
  87. # define LOCK_PREFIX "lock;"
  88. # endif
  89. #endif
  90. /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
  91. because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
  92. struct pthread even when not linked with -lpthread. */
  93. # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
  94. /* Alignment requirements for the initial TCB. */
  95. # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
  96. /* This is the size of the TCB. */
  97. # define TLS_TCB_SIZE sizeof (struct pthread)
  98. /* Alignment requirements for the TCB. */
  99. //# define TLS_TCB_ALIGN __alignof__ (struct pthread)
  100. // Normally the above would be correct But we have to store post-AVX
  101. // vector registers in the TCB and we want the storage to be aligned.
  102. // unfortunately there isn't yet a type for these values and hence no
  103. // 32-byte alignment requirement. Make this explicit, for now.
  104. # define TLS_TCB_ALIGN 32
  105. /* The TCB can have any size and the memory following the address the
  106. thread pointer points to is unspecified. Allocate the TCB there. */
  107. # define TLS_TCB_AT_TP 1
  108. /* Install the dtv pointer. The pointer passed is to the element with
  109. index -1 which contain the length. */
  110. # define INSTALL_DTV(descr, dtvp) \
  111. ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
  112. /* Install new dtv for current thread. */
  113. # define INSTALL_NEW_DTV(dtvp) \
  114. ({ struct pthread *__pd; \
  115. THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
  116. /* Return dtv of given thread descriptor. */
  117. # define GET_DTV(descr) \
  118. (((tcbhead_t *) (descr))->dtv)
  119. /* Macros to load from and store into segment registers. */
  120. # define TLS_GET_FS() \
  121. ({ int __seg; __asm__ ("movl %%fs, %0" : "=q" (__seg)); __seg; })
  122. # define TLS_SET_FS(val) \
  123. __asm__ ("movl %0, %%fs" :: "q" (val))
  124. /* Code to initially initialize the thread pointer. This might need
  125. special attention since 'errno' is not yet available and if the
  126. operation can cause a failure 'errno' must not be touched.
  127. We have to make the syscall for both uses of the macro since the
  128. address might be (and probably is) different. */
  129. # define TLS_INIT_TP(thrdescr, secondcall) \
  130. ({ void *_thrdescr = (thrdescr); \
  131. tcbhead_t *_head = _thrdescr; \
  132. int _result; \
  133. \
  134. _head->tcb = _thrdescr; \
  135. /* For now the thread descriptor is at the same address. */ \
  136. _head->self = _thrdescr; \
  137. \
  138. /* It is a simple syscall to set the %fs value for the thread. */ \
  139. __asm__ volatile ("syscall" \
  140. : "=a" (_result) \
  141. : "0" ((unsigned long int) __NR_arch_prctl), \
  142. "D" ((unsigned long int) ARCH_SET_FS), \
  143. "S" (_thrdescr) \
  144. : "memory", "cc", "r11", "cx"); \
  145. \
  146. _result ? "cannot set %fs base address for thread-local storage" : 0; \
  147. })
  148. /* Return the address of the dtv for the current thread. */
  149. # define THREAD_DTV() \
  150. ({ struct pthread *__pd; \
  151. THREAD_GETMEM (__pd, header.dtv); })
  152. /* Return the thread descriptor for the current thread.
  153. The contained asm must *not* be marked volatile since otherwise
  154. assignments like
  155. pthread_descr self = thread_self();
  156. do not get optimized away. */
  157. # define THREAD_SELF \
  158. ({ struct pthread *__self; \
  159. __asm__ ("movq %%fs:%c1,%q0" : "=r" (__self) \
  160. : "i" (offsetof (struct pthread, header.self))); \
  161. __self;})
  162. /* Magic for libthread_db to know how to do THREAD_SELF. */
  163. # define DB_THREAD_SELF_INCLUDE <sys/reg.h> /* For the FS constant. */
  164. # define DB_THREAD_SELF CONST_THREAD_AREA (64, FS)
  165. /* Read member of the thread descriptor directly. */
  166. # define THREAD_GETMEM(descr, member) \
  167. ({ __typeof (descr->member) __value; \
  168. if (sizeof (__value) == 1) \
  169. __asm__ volatile ("movb %%fs:%P2,%b0" \
  170. : "=q" (__value) \
  171. : "0" (0), "i" (offsetof (struct pthread, member))); \
  172. else if (sizeof (__value) == 4) \
  173. __asm__ volatile ("movl %%fs:%P1,%0" \
  174. : "=r" (__value) \
  175. : "i" (offsetof (struct pthread, member))); \
  176. else \
  177. { \
  178. if (sizeof (__value) != 8) \
  179. /* There should not be any value with a size other than 1, \
  180. 4 or 8. */ \
  181. abort (); \
  182. \
  183. __asm__ volatile ("movq %%fs:%P1,%q0" \
  184. : "=r" (__value) \
  185. : "i" (offsetof (struct pthread, member))); \
  186. } \
  187. __value; })
  188. /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
  189. # define THREAD_GETMEM_NC(descr, member, idx) \
  190. ({ __typeof (descr->member[0]) __value; \
  191. if (sizeof (__value) == 1) \
  192. __asm__ volatile ("movb %%fs:%P2(%q3),%b0" \
  193. : "=q" (__value) \
  194. : "0" (0), "i" (offsetof (struct pthread, member[0])), \
  195. "r" (idx)); \
  196. else if (sizeof (__value) == 4) \
  197. __asm__ volatile ("movl %%fs:%P1(,%q2,4),%0" \
  198. : "=r" (__value) \
  199. : "i" (offsetof (struct pthread, member[0])), "r" (idx));\
  200. else \
  201. { \
  202. if (sizeof (__value) != 8) \
  203. /* There should not be any value with a size other than 1, \
  204. 4 or 8. */ \
  205. abort (); \
  206. \
  207. __asm__ volatile ("movq %%fs:%P1(,%q2,8),%q0" \
  208. : "=r" (__value) \
  209. : "i" (offsetof (struct pthread, member[0])), \
  210. "r" (idx)); \
  211. } \
  212. __value; })
  213. /* Loading addresses of objects on x86-64 needs to be treated special
  214. when generating PIC code. */
  215. #ifdef __pic__
  216. # define IMM_MODE "nr"
  217. #else
  218. # define IMM_MODE "ir"
  219. #endif
  220. /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
  221. # define THREAD_SETMEM(descr, member, value) \
  222. ({ if (sizeof (descr->member) == 1) \
  223. __asm__ volatile ("movb %b0,%%fs:%P1" : \
  224. : "iq" (value), \
  225. "i" (offsetof (struct pthread, member))); \
  226. else if (sizeof (descr->member) == 4) \
  227. __asm__ volatile ("movl %0,%%fs:%P1" : \
  228. : IMM_MODE (value), \
  229. "i" (offsetof (struct pthread, member))); \
  230. else \
  231. { \
  232. if (sizeof (descr->member) != 8) \
  233. /* There should not be any value with a size other than 1, \
  234. 4 or 8. */ \
  235. abort (); \
  236. \
  237. __asm__ volatile ("movq %q0,%%fs:%P1" : \
  238. : IMM_MODE ((unsigned long int) value), \
  239. "i" (offsetof (struct pthread, member))); \
  240. }})
  241. /* Set member of the thread descriptor directly. */
  242. # define THREAD_SETMEM_NC(descr, member, idx, value) \
  243. ({ if (sizeof (descr->member[0]) == 1) \
  244. __asm__ volatile ("movb %b0,%%fs:%P1(%q2)" : \
  245. : "iq" (value), \
  246. "i" (offsetof (struct pthread, member[0])), \
  247. "r" (idx)); \
  248. else if (sizeof (descr->member[0]) == 4) \
  249. __asm__ volatile ("movl %0,%%fs:%P1(,%q2,4)" : \
  250. : IMM_MODE (value), \
  251. "i" (offsetof (struct pthread, member[0])), \
  252. "r" (idx)); \
  253. else \
  254. { \
  255. if (sizeof (descr->member[0]) != 8) \
  256. /* There should not be any value with a size other than 1, \
  257. 4 or 8. */ \
  258. abort (); \
  259. \
  260. __asm__ volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
  261. : IMM_MODE ((unsigned long int) value), \
  262. "i" (offsetof (struct pthread, member[0])), \
  263. "r" (idx)); \
  264. }})
  265. /* Atomic compare and exchange on TLS, returning old value. */
  266. # define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
  267. ({ __typeof (descr->member) __ret; \
  268. __typeof (oldval) __old = (oldval); \
  269. if (sizeof (descr->member) == 4) \
  270. __asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
  271. : "=a" (__ret) \
  272. : "0" (__old), "r" (newval), \
  273. "i" (offsetof (struct pthread, member))); \
  274. else \
  275. /* Not necessary for other sizes in the moment. */ \
  276. abort (); \
  277. __ret; })
  278. /* Atomic logical and. */
  279. # define THREAD_ATOMIC_AND(descr, member, val) \
  280. (void) ({ if (sizeof ((descr)->member) == 4) \
  281. __asm__ volatile (LOCK_PREFIX "andl %1, %%fs:%P0" \
  282. :: "i" (offsetof (struct pthread, member)), \
  283. "ir" (val)); \
  284. else \
  285. /* Not necessary for other sizes in the moment. */ \
  286. abort (); })
  287. /* Atomic set bit. */
  288. # define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
  289. (void) ({ if (sizeof ((descr)->member) == 4) \
  290. __asm__ volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
  291. :: "i" (offsetof (struct pthread, member)), \
  292. "ir" (1 << (bit))); \
  293. else \
  294. /* Not necessary for other sizes in the moment. */ \
  295. abort (); })
  296. # define CALL_THREAD_FCT(descr) \
  297. ({ void *__res; \
  298. __asm__ volatile ("movq %%fs:%P2, %%rdi\n\t" \
  299. "callq *%%fs:%P1" \
  300. : "=a" (__res) \
  301. : "i" (offsetof (struct pthread, start_routine)), \
  302. "i" (offsetof (struct pthread, arg)) \
  303. : "di", "si", "cx", "dx", "r8", "r9", "r10", "r11", \
  304. "memory", "cc"); \
  305. __res; })
  306. /* Set the stack guard field in TCB head. */
  307. # define THREAD_SET_STACK_GUARD(value) \
  308. THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
  309. # define THREAD_COPY_STACK_GUARD(descr) \
  310. ((descr)->header.stack_guard \
  311. = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
  312. /* Set the pointer guard field in the TCB head. */
  313. # define THREAD_SET_POINTER_GUARD(value) \
  314. THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
  315. # define THREAD_COPY_POINTER_GUARD(descr) \
  316. ((descr)->header.pointer_guard \
  317. = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
  318. /* Get and set the global scope generation counter in the TCB head. */
  319. # define THREAD_GSCOPE_FLAG_UNUSED 0
  320. # define THREAD_GSCOPE_FLAG_USED 1
  321. # define THREAD_GSCOPE_FLAG_WAIT 2
  322. # define THREAD_GSCOPE_RESET_FLAG() \
  323. do \
  324. { int __res; \
  325. __asm__ volatile ("xchgl %0, %%fs:%P1" \
  326. : "=r" (__res) \
  327. : "i" (offsetof (struct pthread, header.gscope_flag)), \
  328. "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
  329. if (__res == THREAD_GSCOPE_FLAG_WAIT) \
  330. lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
  331. } \
  332. while (0)
  333. # define THREAD_GSCOPE_SET_FLAG() \
  334. THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
  335. # define THREAD_GSCOPE_WAIT() \
  336. GL(dl_wait_lookup_done) ()
  337. # ifdef SHARED
  338. /* Defined in dl-trampoline.S. */
  339. extern void _dl_x86_64_save_sse (void);
  340. extern void _dl_x86_64_restore_sse (void);
  341. # define RTLD_CHECK_FOREIGN_CALL \
  342. (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) != 0)
  343. /* NB: Don't use the xchg operation because that would imply a lock
  344. prefix which is expensive and unnecessary. The cache line is also
  345. not contested at all. */
  346. # define RTLD_ENABLE_FOREIGN_CALL \
  347. int old_rtld_must_xmm_save = THREAD_GETMEM (THREAD_SELF, \
  348. header.rtld_must_xmm_save); \
  349. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 1)
  350. # define RTLD_PREPARE_FOREIGN_CALL \
  351. do if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save)) \
  352. { \
  353. _dl_x86_64_save_sse (); \
  354. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 0); \
  355. } \
  356. while (0)
  357. # define RTLD_FINALIZE_FOREIGN_CALL \
  358. do { \
  359. if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) == 0) \
  360. _dl_x86_64_restore_sse (); \
  361. THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, \
  362. old_rtld_must_xmm_save); \
  363. } while (0)
  364. # endif
  365. #endif /* __ASSEMBLER__ */
  366. #endif /* tls.h */