useldt.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* Special definitions for ix86 machine using segment register based
  2. thread descriptor.
  3. Copyright (C) 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by Ulrich Drepper <drepper@cygnus.com>.
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public License as
  8. published by the Free Software Foundation; either version 2.1 of the
  9. License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; see the file COPYING.LIB. If not,
  16. see <http://www.gnu.org/licenses/>. */
  17. #ifndef __ASSEMBLER__
  18. #include <stddef.h> /* For offsetof. */
  19. #include <stdlib.h> /* For abort(). */
  20. /* We don't want to include the kernel header. So duplicate the
  21. information. */
  22. /* Structure passed on `modify_ldt' call. */
  23. struct modify_ldt_ldt_s
  24. {
  25. unsigned int entry_number;
  26. unsigned long int base_addr;
  27. unsigned int limit;
  28. unsigned int seg_32bit:1;
  29. unsigned int contents:2;
  30. unsigned int read_exec_only:1;
  31. unsigned int limit_in_pages:1;
  32. unsigned int seg_not_present:1;
  33. unsigned int useable:1;
  34. unsigned int empty:25;
  35. };
  36. /* System call to set LDT entry. */
  37. extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
  38. /* Return the thread descriptor for the current thread.
  39. The contained asm must *not* be marked volatile since otherwise
  40. assignments like
  41. pthread_descr self = thread_self();
  42. do not get optimized away. */
  43. #define THREAD_SELF \
  44. ({ \
  45. register pthread_descr __self; \
  46. __asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
  47. : "i" (offsetof (struct _pthread_descr_struct, \
  48. p_header.data.self))); \
  49. __self; \
  50. })
  51. /* Initialize the thread-unique value. Two possible ways to do it. */
  52. #define DO_MODIFY_LDT(descr, nr) \
  53. ({ \
  54. struct modify_ldt_ldt_s ldt_entry = \
  55. { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
  56. 1, 0, 0, 1, 0, 1, 0 }; \
  57. if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) \
  58. abort (); \
  59. __asm__ ("movw %w0, %%gs" : : "q" (nr * 8 + 7)); \
  60. })
  61. #ifdef __PIC__
  62. # define USETLS_EBX_ARG "r"
  63. # define USETLS_LOAD_EBX "xchgl %3, %%ebx\n\t"
  64. #else
  65. # define USETLS_EBX_ARG "b"
  66. # define USETLS_LOAD_EBX
  67. #endif
  68. /* When using the new set_thread_area call, we don't need to change %gs
  69. because we inherited the value set up in the main thread by TLS setup.
  70. We need to extract that value and set up the same segment in this
  71. thread. */
  72. #if USE_TLS
  73. # define DO_SET_THREAD_AREA_REUSE(nr) 1
  74. #else
  75. /* Without TLS, we do the initialization of the main thread, where NR == 0. */
  76. # define DO_SET_THREAD_AREA_REUSE(nr) (!__builtin_constant_p (nr) || (nr))
  77. #endif
  78. #define DO_SET_THREAD_AREA(descr, nr) \
  79. ({ \
  80. int __gs; \
  81. if (DO_SET_THREAD_AREA_REUSE (nr)) \
  82. { \
  83. __asm__ ("movw %%gs, %w0" : "=q" (__gs)); \
  84. struct modify_ldt_ldt_s ldt_entry = \
  85. { (__gs & 0xffff) >> 3, \
  86. (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
  87. 1, 0, 0, 1, 0, 1, 0 }; \
  88. \
  89. int __result; \
  90. __asm__ (USETLS_LOAD_EBX \
  91. "movl %2, %%eax\n\t" \
  92. "int $0x80\n\t" \
  93. USETLS_LOAD_EBX \
  94. : "&a" (__result) \
  95. : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \
  96. if (__result == 0) \
  97. __asm__ ("movw %w0, %%gs" :: "q" (__gs)); \
  98. else \
  99. __gs = -1; \
  100. } \
  101. else \
  102. { \
  103. struct modify_ldt_ldt_s ldt_entry = \
  104. { -1, \
  105. (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
  106. 1, 0, 0, 1, 0, 1, 0 }; \
  107. int __result; \
  108. __asm__ (USETLS_LOAD_EBX \
  109. "movl %2, %%eax\n\t" \
  110. "int $0x80\n\t" \
  111. USETLS_LOAD_EBX \
  112. : "&a" (__result) \
  113. : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \
  114. if (__result == 0) \
  115. { \
  116. __gs = (ldt_entry.entry_number << 3) + 3; \
  117. __asm__ ("movw %w0, %%gs" : : "q" (__gs)); \
  118. } \
  119. else \
  120. __gs = -1; \
  121. } \
  122. __gs; \
  123. })
  124. #if defined __ASSUME_SET_THREAD_AREA_SYSCALL
  125. # define INIT_THREAD_SELF(descr, nr) DO_SET_THREAD_AREA (descr, nr)
  126. #elif defined __NR_set_thread_area
  127. # define INIT_THREAD_SELF(descr, nr) \
  128. ({ \
  129. if (__builtin_expect (__have_no_set_thread_area, 0) \
  130. || (DO_SET_THREAD_AREA (descr, DO_SET_THREAD_AREA_REUSE (nr)) == -1 \
  131. && (__have_no_set_thread_area = 1))) \
  132. DO_MODIFY_LDT (descr, nr); \
  133. })
  134. /* Defined in pspinlock.c. */
  135. extern int __have_no_set_thread_area;
  136. #else
  137. # define INIT_THREAD_SELF(descr, nr) DO_MODIFY_LDT (descr, nr)
  138. #endif
  139. /* Free resources associated with thread descriptor. */
  140. #ifdef __ASSUME_SET_THREAD_AREA_SYSCALL
  141. #define FREE_THREAD(descr, nr) do { } while (0)
  142. #elif defined __NR_set_thread_area
  143. #define FREE_THREAD(descr, nr) \
  144. { \
  145. int __gs; \
  146. __asm__ __volatile__ ("movw %%gs, %w0" : "=q" (__gs)); \
  147. if (__builtin_expect (__gs & 4, 0)) \
  148. { \
  149. struct modify_ldt_ldt_s ldt_entry = \
  150. { nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
  151. __modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
  152. } \
  153. }
  154. #else
  155. #define FREE_THREAD(descr, nr) \
  156. { \
  157. struct modify_ldt_ldt_s ldt_entry = \
  158. { nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
  159. __modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
  160. }
  161. #endif
  162. /* Read member of the thread descriptor directly. */
  163. #define THREAD_GETMEM(descr, member) \
  164. ({ \
  165. __typeof__ (descr->member) __value; \
  166. if (sizeof (__value) == 1) \
  167. __asm__ __volatile__ ("movb %%gs:%P2,%b0" \
  168. : "=q" (__value) \
  169. : "0" (0), \
  170. "i" (offsetof (struct _pthread_descr_struct, \
  171. member))); \
  172. else if (sizeof (__value) == 4) \
  173. __asm__ __volatile__ ("movl %%gs:%P1,%0" \
  174. : "=r" (__value) \
  175. : "i" (offsetof (struct _pthread_descr_struct, \
  176. member))); \
  177. else \
  178. { \
  179. if (sizeof (__value) != 8) \
  180. /* There should not be any value with a size other than 1, 4 or 8. */\
  181. abort (); \
  182. \
  183. __asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t" \
  184. "movl %%gs:%P2,%%edx" \
  185. : "=A" (__value) \
  186. : "i" (offsetof (struct _pthread_descr_struct, \
  187. member)), \
  188. "i" (offsetof (struct _pthread_descr_struct, \
  189. member) + 4)); \
  190. } \
  191. __value; \
  192. })
  193. /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
  194. #define THREAD_GETMEM_NC(descr, member) \
  195. ({ \
  196. __typeof__ (descr->member) __value; \
  197. if (sizeof (__value) == 1) \
  198. __asm__ __volatile__ ("movb %%gs:(%2),%b0" \
  199. : "=q" (__value) \
  200. : "0" (0), \
  201. "r" (offsetof (struct _pthread_descr_struct, \
  202. member))); \
  203. else if (sizeof (__value) == 4) \
  204. __asm__ __volatile__ ("movl %%gs:(%1),%0" \
  205. : "=r" (__value) \
  206. : "r" (offsetof (struct _pthread_descr_struct, \
  207. member))); \
  208. else \
  209. { \
  210. if (sizeof (__value) != 8) \
  211. /* There should not be any value with a size other than 1, 4 or 8. */\
  212. abort (); \
  213. \
  214. __asm__ __volatile__ ("movl %%gs:(%1),%%eax\n\t" \
  215. "movl %%gs:4(%1),%%edx" \
  216. : "=&A" (__value) \
  217. : "r" (offsetof (struct _pthread_descr_struct, \
  218. member))); \
  219. } \
  220. __value; \
  221. })
  222. /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
  223. #define THREAD_SETMEM(descr, member, value) \
  224. ({ \
  225. __typeof__ (descr->member) __value = (value); \
  226. if (sizeof (__value) == 1) \
  227. __asm__ __volatile__ ("movb %0,%%gs:%P1" : \
  228. : "q" (__value), \
  229. "i" (offsetof (struct _pthread_descr_struct, \
  230. member))); \
  231. else if (sizeof (__value) == 4) \
  232. __asm__ __volatile__ ("movl %0,%%gs:%P1" : \
  233. : "r" (__value), \
  234. "i" (offsetof (struct _pthread_descr_struct, \
  235. member))); \
  236. else \
  237. { \
  238. if (sizeof (__value) != 8) \
  239. /* There should not be any value with a size other than 1, 4 or 8. */\
  240. abort (); \
  241. \
  242. __asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\n" \
  243. "movl %%edx,%%gs:%P2" : \
  244. : "A" (__value), \
  245. "i" (offsetof (struct _pthread_descr_struct, \
  246. member)), \
  247. "i" (offsetof (struct _pthread_descr_struct, \
  248. member) + 4)); \
  249. } \
  250. })
  251. /* Set member of the thread descriptor directly. */
  252. #define THREAD_SETMEM_NC(descr, member, value) \
  253. ({ \
  254. __typeof__ (descr->member) __value = (value); \
  255. if (sizeof (__value) == 1) \
  256. __asm__ __volatile__ ("movb %0,%%gs:(%1)" : \
  257. : "q" (__value), \
  258. "r" (offsetof (struct _pthread_descr_struct, \
  259. member))); \
  260. else if (sizeof (__value) == 4) \
  261. __asm__ __volatile__ ("movl %0,%%gs:(%1)" : \
  262. : "r" (__value), \
  263. "r" (offsetof (struct _pthread_descr_struct, \
  264. member))); \
  265. else \
  266. { \
  267. if (sizeof (__value) != 8) \
  268. /* There should not be any value with a size other than 1, 4 or 8. */\
  269. abort (); \
  270. \
  271. __asm__ __volatile__ ("movl %%eax,%%gs:(%1)\n\t" \
  272. "movl %%edx,%%gs:4(%1)" : \
  273. : "A" (__value), \
  274. "r" (offsetof (struct _pthread_descr_struct, \
  275. member))); \
  276. } \
  277. })
  278. #endif
  279. /* We want the OS to assign stack addresses. */
  280. #define FLOATING_STACKS 1
  281. /* Maximum size of the stack if the rlimit is unlimited. */
  282. #define ARCH_STACK_MAX_SIZE 8*1024*1024