lowlevellock.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009
  2. Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
  5. The GNU C Library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public
  7. License as published by the Free Software Foundation; either
  8. version 2.1 of the License, or (at your option) any later version.
  9. The GNU C Library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public
  14. License along with the GNU C Libr \ary; if not, write to the Free
  15. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  16. 02111-1307 USA. */
  17. #ifndef _LOWLEVELLOCK_H
  18. #define _LOWLEVELLOCK_H 1
  19. #include <time.h>
  20. #include <sys/param.h>
  21. #include <bits/pthreadtypes.h>
  22. #include <atomic.h>
  23. #include <sysdep.h>
  24. #include <bits/kernel-features.h>
  25. #define FUTEX_WAIT 0
  26. #define FUTEX_WAKE 1
  27. #define FUTEX_REQUEUE 3
  28. #define FUTEX_CMP_REQUEUE 4
  29. #define FUTEX_WAKE_OP 5
  30. #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
  31. #define FUTEX_LOCK_PI 6
  32. #define FUTEX_UNLOCK_PI 7
  33. #define FUTEX_TRYLOCK_PI 8
  34. #define FUTEX_WAIT_BITSET 9
  35. #define FUTEX_WAKE_BITSET 10
  36. #define FUTEX_PRIVATE_FLAG 128
  37. #define FUTEX_CLOCK_REALTIME 256
  38. #define FUTEX_BITSET_MATCH_ANY 0xffffffff
  39. /* Values for 'private' parameter of locking macros. Yes, the
  40. definition seems to be backwards. But it is not. The bit will be
  41. reversed before passing to the system call. */
  42. #define LLL_PRIVATE 0
  43. #define LLL_SHARED FUTEX_PRIVATE_FLAG
  44. #if !defined NOT_IN_libc || defined IS_IN_rtld
  45. /* In libc.so or ld.so all futexes are private. */
  46. # ifdef __ASSUME_PRIVATE_FUTEX
  47. # define __lll_private_flag(fl, private) \
  48. ((fl) | FUTEX_PRIVATE_FLAG)
  49. # else
  50. # define __lll_private_flag(fl, private) \
  51. ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
  52. # endif
  53. #else
  54. # ifdef __ASSUME_PRIVATE_FUTEX
  55. # define __lll_private_flag(fl, private) \
  56. (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
  57. # else
  58. # define __lll_private_flag(fl, private) \
  59. (__builtin_constant_p (private) \
  60. ? ((private) == 0 \
  61. ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
  62. : (fl)) \
  63. : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
  64. & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
  65. # endif
  66. #endif
  67. #define lll_futex_wait(futexp, val, private) \
  68. lll_futex_timed_wait (futexp, val, NULL, private)
  69. #define lll_futex_timed_wait(futexp, val, timespec, private) \
  70. ({ \
  71. INTERNAL_SYSCALL_DECL (__err); \
  72. long int __ret; \
  73. \
  74. __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
  75. __lll_private_flag (FUTEX_WAIT, private), \
  76. (val), (timespec)); \
  77. __ret; \
  78. })
  79. #define lll_futex_wake(futexp, nr, private) \
  80. ({ \
  81. INTERNAL_SYSCALL_DECL (__err); \
  82. long int __ret; \
  83. \
  84. __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
  85. __lll_private_flag (FUTEX_WAKE, private), \
  86. (nr), 0); \
  87. __ret; \
  88. })
  89. /* Returns non-zero if error happened, zero if success. */
  90. #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
  91. ({ \
  92. INTERNAL_SYSCALL_DECL (__err); \
  93. long int __ret; \
  94. \
  95. __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
  96. __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
  97. (nr_wake), (nr_move), (mutex), (val)); \
  98. INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
  99. })
  100. #define lll_robust_dead(futexv, private) \
  101. do \
  102. { \
  103. int *__futexp = &(futexv); \
  104. atomic_or (__futexp, FUTEX_OWNER_DIED); \
  105. lll_futex_wake (__futexp, 1, private); \
  106. } \
  107. while (0)
  108. /* Returns non-zero if error happened, zero if success. */
  109. #ifdef __sparc32_atomic_do_lock
  110. /* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs. */
  111. # define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) 1
  112. #else
  113. # define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
  114. ({ \
  115. INTERNAL_SYSCALL_DECL (__err); \
  116. long int __ret; \
  117. \
  118. __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
  119. __lll_private_flag (FUTEX_WAKE_OP, private), \
  120. (nr_wake), (nr_wake2), (futexp2), \
  121. FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
  122. INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
  123. })
  124. #endif
  125. static inline int
  126. __attribute__ ((always_inline))
  127. __lll_trylock (int *futex)
  128. {
  129. return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
  130. }
  131. #define lll_trylock(futex) __lll_trylock (&(futex))
  132. static inline int
  133. __attribute__ ((always_inline))
  134. __lll_cond_trylock (int *futex)
  135. {
  136. return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
  137. }
  138. #define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
  139. static inline int
  140. __attribute__ ((always_inline))
  141. __lll_robust_trylock (int *futex, int id)
  142. {
  143. return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
  144. }
  145. #define lll_robust_trylock(futex, id) \
  146. __lll_robust_trylock (&(futex), id)
  147. extern void __lll_lock_wait_private (int *futex) attribute_hidden;
  148. extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
  149. extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
  150. static inline void
  151. __attribute__ ((always_inline))
  152. __lll_lock (int *futex, int private)
  153. {
  154. int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
  155. if (__builtin_expect (val != 0, 0))
  156. {
  157. if (__builtin_constant_p (private) && private == LLL_PRIVATE)
  158. __lll_lock_wait_private (futex);
  159. else
  160. __lll_lock_wait (futex, private);
  161. }
  162. }
  163. #define lll_lock(futex, private) __lll_lock (&(futex), private)
  164. static inline int
  165. __attribute__ ((always_inline))
  166. __lll_robust_lock (int *futex, int id, int private)
  167. {
  168. int result = 0;
  169. if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
  170. result = __lll_robust_lock_wait (futex, private);
  171. return result;
  172. }
  173. #define lll_robust_lock(futex, id, private) \
  174. __lll_robust_lock (&(futex), id, private)
  175. static inline void
  176. __attribute__ ((always_inline))
  177. __lll_cond_lock (int *futex, int private)
  178. {
  179. int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
  180. if (__builtin_expect (val != 0, 0))
  181. __lll_lock_wait (futex, private);
  182. }
  183. #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
  184. #define lll_robust_cond_lock(futex, id, private) \
  185. __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
  186. extern int __lll_timedlock_wait (int *futex, const struct timespec *,
  187. int private) attribute_hidden;
  188. extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
  189. int private) attribute_hidden;
  190. static inline int
  191. __attribute__ ((always_inline))
  192. __lll_timedlock (int *futex, const struct timespec *abstime, int private)
  193. {
  194. int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
  195. int result = 0;
  196. if (__builtin_expect (val != 0, 0))
  197. result = __lll_timedlock_wait (futex, abstime, private);
  198. return result;
  199. }
  200. #define lll_timedlock(futex, abstime, private) \
  201. __lll_timedlock (&(futex), abstime, private)
  202. static inline int
  203. __attribute__ ((always_inline))
  204. __lll_robust_timedlock (int *futex, const struct timespec *abstime,
  205. int id, int private)
  206. {
  207. int result = 0;
  208. if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
  209. result = __lll_robust_timedlock_wait (futex, abstime, private);
  210. return result;
  211. }
  212. #define lll_robust_timedlock(futex, abstime, id, private) \
  213. __lll_robust_timedlock (&(futex), abstime, id, private)
  214. #define lll_unlock(lock, private) \
  215. ((void) ({ \
  216. int *__futex = &(lock); \
  217. int __val = atomic_exchange_24_rel (__futex, 0); \
  218. if (__builtin_expect (__val > 1, 0)) \
  219. lll_futex_wake (__futex, 1, private); \
  220. }))
  221. #define lll_robust_unlock(lock, private) \
  222. ((void) ({ \
  223. int *__futex = &(lock); \
  224. int __val = atomic_exchange_rel (__futex, 0); \
  225. if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
  226. lll_futex_wake (__futex, 1, private); \
  227. }))
  228. #define lll_islocked(futex) \
  229. (futex != 0)
  230. /* Initializers for lock. */
  231. #define LLL_LOCK_INITIALIZER (0)
  232. #define LLL_LOCK_INITIALIZER_LOCKED (1)
  233. /* The kernel notifies a process with uses CLONE_CLEARTID via futex
  234. wakeup when the clone terminates. The memory location contains the
  235. thread ID while the clone is running and is reset to zero
  236. afterwards. */
  237. #define lll_wait_tid(tid) \
  238. do \
  239. { \
  240. __typeof (tid) __tid; \
  241. while ((__tid = (tid)) != 0) \
  242. lll_futex_wait (&(tid), __tid, LLL_SHARED); \
  243. } \
  244. while (0)
  245. extern int __lll_timedwait_tid (int *, const struct timespec *)
  246. attribute_hidden;
  247. #define lll_timedwait_tid(tid, abstime) \
  248. ({ \
  249. int __res = 0; \
  250. if ((tid) != 0) \
  251. __res = __lll_timedwait_tid (&(tid), (abstime)); \
  252. __res; \
  253. })
  254. #endif /* lowlevellock.h */