lowlevellock.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #ifndef _LOWLEVELLOCK_H
  17. #define _LOWLEVELLOCK_H 1
  18. #include <time.h>
  19. #include <sys/param.h>
  20. #include <bits/pthreadtypes.h>
  21. #include <atomic.h>
  22. #include <bits/kernel-features.h>
  23. #include <sysdep.h>
  24. #ifndef __NR_futex
  25. # define __NR_futex 221
  26. #endif
  27. #define FUTEX_WAIT 0
  28. #define FUTEX_WAKE 1
  29. #define FUTEX_REQUEUE 3
  30. #define FUTEX_CMP_REQUEUE 4
  31. #define FUTEX_WAKE_OP 5
  32. #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
  33. #define FUTEX_LOCK_PI 6
  34. #define FUTEX_UNLOCK_PI 7
  35. #define FUTEX_TRYLOCK_PI 8
  36. #define FUTEX_WAIT_BITSET 9
  37. #define FUTEX_WAKE_BITSET 10
  38. #define FUTEX_PRIVATE_FLAG 128
  39. #define FUTEX_CLOCK_REALTIME 256
  40. #define FUTEX_BITSET_MATCH_ANY 0xffffffff
  41. /* Values for 'private' parameter of locking macros. Yes, the
  42. definition seems to be backwards. But it is not. The bit will be
  43. reversed before passing to the system call. */
  44. #define LLL_PRIVATE 0
  45. #define LLL_SHARED FUTEX_PRIVATE_FLAG
  46. #if !defined NOT_IN_libc || defined IS_IN_rtld
  47. /* In libc.so or ld.so all futexes are private. */
  48. # ifdef __ASSUME_PRIVATE_FUTEX
  49. # define __lll_private_flag(fl, private) \
  50. ((fl) | FUTEX_PRIVATE_FLAG)
  51. # else
  52. # define __lll_private_flag(fl, private) \
  53. ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
  54. # endif
  55. #else
  56. # ifdef __ASSUME_PRIVATE_FUTEX
  57. # define __lll_private_flag(fl, private) \
  58. (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
  59. # else
  60. # define __lll_private_flag(fl, private) \
  61. (__builtin_constant_p (private) \
  62. ? ((private) == 0 \
  63. ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
  64. : (fl)) \
  65. : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
  66. & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
  67. # endif
  68. #endif
  69. #define lll_futex_wait(futexp, val, private) \
  70. lll_futex_timed_wait (futexp, val, NULL, private)
  71. #define lll_futex_timed_wait(futexp, val, timespec, private) \
  72. ({ \
  73. INTERNAL_SYSCALL_DECL (__err); \
  74. long int __ret; \
  75. \
  76. __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
  77. __lll_private_flag (FUTEX_WAIT, private), \
  78. (val), (timespec)); \
  79. INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
  80. })
  81. #define lll_futex_wake(futexp, nr, private) \
  82. ({ \
  83. INTERNAL_SYSCALL_DECL (__err); \
  84. long int __ret; \
  85. \
  86. __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
  87. __lll_private_flag (FUTEX_WAKE, private), \
  88. (nr), 0); \
  89. INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
  90. })
  91. #define lll_robust_dead(futexv, private) \
  92. do \
  93. { \
  94. INTERNAL_SYSCALL_DECL (__err); \
  95. int *__futexp = &(futexv); \
  96. \
  97. atomic_or (__futexp, FUTEX_OWNER_DIED); \
  98. INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
  99. __lll_private_flag (FUTEX_WAKE, private), 1, 0); \
  100. } \
  101. while (0)
  102. /* Returns non-zero if error happened, zero if success. */
  103. #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
  104. ({ \
  105. INTERNAL_SYSCALL_DECL (__err); \
  106. long int __ret; \
  107. \
  108. __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
  109. __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
  110. (nr_wake), (nr_move), (mutex), (val)); \
  111. INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
  112. })
  113. /* Returns non-zero if error happened, zero if success. */
  114. #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
  115. ({ \
  116. INTERNAL_SYSCALL_DECL (__err); \
  117. long int __ret; \
  118. \
  119. __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
  120. __lll_private_flag (FUTEX_WAKE_OP, private), \
  121. (nr_wake), (nr_wake2), (futexp2), \
  122. FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
  123. INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
  124. })
  125. #ifdef UP
  126. # define __lll_acq_instr ""
  127. # define __lll_rel_instr ""
  128. #else
  129. # define __lll_acq_instr "isync"
  130. # ifdef _ARCH_PWR4
  131. /*
  132. * Newer powerpc64 processors support the new "light weight" sync (lwsync)
  133. * So if the build is using -mcpu=[power4,power5,power5+,970] we can
  134. * safely use lwsync.
  135. */
  136. # define __lll_rel_instr "lwsync"
  137. # else
  138. /*
  139. * Older powerpc32 processors don't support the new "light weight"
  140. * sync (lwsync). So the only safe option is to use normal sync
  141. * for all powerpc32 applications.
  142. */
  143. # define __lll_rel_instr "sync"
  144. # endif
  145. #endif
  146. /* Set *futex to ID if it is 0, atomically. Returns the old value */
  147. #define __lll_robust_trylock(futex, id) \
  148. ({ int __val; \
  149. __asm__ __volatile__ ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
  150. " cmpwi 0,%0,0\n" \
  151. " bne 2f\n" \
  152. " stwcx. %3,0,%2\n" \
  153. " bne- 1b\n" \
  154. "2: " __lll_acq_instr \
  155. : "=&r" (__val), "=m" (*futex) \
  156. : "r" (futex), "r" (id), "m" (*futex) \
  157. : "cr0", "memory"); \
  158. __val; \
  159. })
  160. #define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
  161. /* Set *futex to 1 if it is 0, atomically. Returns the old value */
  162. #define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
  163. #define lll_trylock(lock) __lll_trylock (&(lock))
  164. /* Set *futex to 2 if it is 0, atomically. Returns the old value */
  165. #define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
  166. #define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
  167. extern void __lll_lock_wait_private (int *futex) attribute_hidden;
  168. extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
  169. extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
  170. #define lll_lock(lock, private) \
  171. (void) ({ \
  172. int *__futex = &(lock); \
  173. if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
  174. 0) != 0) \
  175. { \
  176. if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
  177. __lll_lock_wait_private (__futex); \
  178. else \
  179. __lll_lock_wait (__futex, private); \
  180. } \
  181. })
  182. #define lll_robust_lock(lock, id, private) \
  183. ({ \
  184. int *__futex = &(lock); \
  185. int __val = 0; \
  186. if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
  187. 0), 0)) \
  188. __val = __lll_robust_lock_wait (__futex, private); \
  189. __val; \
  190. })
  191. #define lll_cond_lock(lock, private) \
  192. (void) ({ \
  193. int *__futex = &(lock); \
  194. if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
  195. 0) != 0) \
  196. __lll_lock_wait (__futex, private); \
  197. })
  198. #define lll_robust_cond_lock(lock, id, private) \
  199. ({ \
  200. int *__futex = &(lock); \
  201. int __val = 0; \
  202. int __id = id | FUTEX_WAITERS; \
  203. if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
  204. 0), 0)) \
  205. __val = __lll_robust_lock_wait (__futex, private); \
  206. __val; \
  207. })
  208. extern int __lll_timedlock_wait
  209. (int *futex, const struct timespec *, int private) attribute_hidden;
  210. extern int __lll_robust_timedlock_wait
  211. (int *futex, const struct timespec *, int private) attribute_hidden;
  212. #define lll_timedlock(lock, abstime, private) \
  213. ({ \
  214. int *__futex = &(lock); \
  215. int __val = 0; \
  216. if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
  217. 0) != 0) \
  218. __val = __lll_timedlock_wait (__futex, abstime, private); \
  219. __val; \
  220. })
  221. #define lll_robust_timedlock(lock, abstime, id, private) \
  222. ({ \
  223. int *__futex = &(lock); \
  224. int __val = 0; \
  225. if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
  226. 0), 0)) \
  227. __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
  228. __val; \
  229. })
  230. #define lll_unlock(lock, private) \
  231. ((void) ({ \
  232. int *__futex = &(lock); \
  233. int __val = atomic_exchange_rel (__futex, 0); \
  234. if (__builtin_expect (__val > 1, 0)) \
  235. lll_futex_wake (__futex, 1, private); \
  236. }))
  237. #define lll_robust_unlock(lock, private) \
  238. ((void) ({ \
  239. int *__futex = &(lock); \
  240. int __val = atomic_exchange_rel (__futex, 0); \
  241. if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
  242. lll_futex_wake (__futex, 1, private); \
  243. }))
  244. #define lll_islocked(futex) \
  245. (futex != 0)
  246. /* Initializers for lock. */
  247. #define LLL_LOCK_INITIALIZER (0)
  248. #define LLL_LOCK_INITIALIZER_LOCKED (1)
  249. /* The states of a lock are:
  250. 0 - untaken
  251. 1 - taken by one user
  252. >1 - taken by more users */
  253. /* The kernel notifies a process which uses CLONE_CLEARTID via futex
  254. wakeup when the clone terminates. The memory location contains the
  255. thread ID while the clone is running and is reset to zero
  256. afterwards. */
  257. #define lll_wait_tid(tid) \
  258. do { \
  259. __typeof (tid) __tid; \
  260. while ((__tid = (tid)) != 0) \
  261. lll_futex_wait (&(tid), __tid, LLL_SHARED); \
  262. } while (0)
  263. extern int __lll_timedwait_tid (int *, const struct timespec *)
  264. attribute_hidden;
  265. #define lll_timedwait_tid(tid, abstime) \
  266. ({ \
  267. int __res = 0; \
  268. if ((tid) != 0) \
  269. __res = __lll_timedwait_tid (&(tid), (abstime)); \
  270. __res; \
  271. })
  272. #endif /* lowlevellock.h */