pthread_mutex_trylock.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #include <assert.h>
  17. #include <errno.h>
  18. #include <stdlib.h>
  19. #include "pthreadP.h"
  20. #include <lowlevellock.h>
  21. int
  22. __pthread_mutex_trylock (
  23. pthread_mutex_t *mutex)
  24. {
  25. int oldval;
  26. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  27. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  28. PTHREAD_MUTEX_TIMED_NP))
  29. {
  30. /* Recursive mutex. */
  31. case PTHREAD_MUTEX_RECURSIVE_NP:
  32. /* Check whether we already hold the mutex. */
  33. if (mutex->__data.__owner == id)
  34. {
  35. /* Just bump the counter. */
  36. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  37. /* Overflow of the counter. */
  38. return EAGAIN;
  39. ++mutex->__data.__count;
  40. return 0;
  41. }
  42. if (lll_trylock (mutex->__data.__lock) == 0)
  43. {
  44. /* Record the ownership. */
  45. mutex->__data.__owner = id;
  46. mutex->__data.__count = 1;
  47. ++mutex->__data.__nusers;
  48. return 0;
  49. }
  50. break;
  51. case PTHREAD_MUTEX_ERRORCHECK_NP:
  52. case PTHREAD_MUTEX_TIMED_NP:
  53. case PTHREAD_MUTEX_ADAPTIVE_NP:
  54. /* Normal mutex. */
  55. if (lll_trylock (mutex->__data.__lock) != 0)
  56. break;
  57. /* Record the ownership. */
  58. mutex->__data.__owner = id;
  59. ++mutex->__data.__nusers;
  60. return 0;
  61. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  62. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  63. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  64. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  65. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  66. &mutex->__data.__list.__next);
  67. oldval = mutex->__data.__lock;
  68. do
  69. {
  70. again:
  71. if ((oldval & FUTEX_OWNER_DIED) != 0)
  72. {
  73. /* The previous owner died. Try locking the mutex. */
  74. int newval = id | (oldval & FUTEX_WAITERS);
  75. newval
  76. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  77. newval, oldval);
  78. if (newval != oldval)
  79. {
  80. oldval = newval;
  81. goto again;
  82. }
  83. /* We got the mutex. */
  84. mutex->__data.__count = 1;
  85. /* But it is inconsistent unless marked otherwise. */
  86. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  87. ENQUEUE_MUTEX (mutex);
  88. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  89. /* Note that we deliberately exist here. If we fall
  90. through to the end of the function __nusers would be
  91. incremented which is not correct because the old
  92. owner has to be discounted. */
  93. return EOWNERDEAD;
  94. }
  95. /* Check whether we already hold the mutex. */
  96. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  97. {
  98. int kind = PTHREAD_MUTEX_TYPE (mutex);
  99. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  100. {
  101. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  102. NULL);
  103. return EDEADLK;
  104. }
  105. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  106. {
  107. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  108. NULL);
  109. /* Just bump the counter. */
  110. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  111. /* Overflow of the counter. */
  112. return EAGAIN;
  113. ++mutex->__data.__count;
  114. return 0;
  115. }
  116. }
  117. oldval = lll_robust_trylock (mutex->__data.__lock, id);
  118. if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
  119. {
  120. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  121. return EBUSY;
  122. }
  123. if (__builtin_expect (mutex->__data.__owner
  124. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  125. {
  126. /* This mutex is now not recoverable. */
  127. mutex->__data.__count = 0;
  128. if (oldval == id)
  129. lll_unlock (mutex->__data.__lock,
  130. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  131. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  132. return ENOTRECOVERABLE;
  133. }
  134. }
  135. while ((oldval & FUTEX_OWNER_DIED) != 0);
  136. ENQUEUE_MUTEX (mutex);
  137. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  138. mutex->__data.__owner = id;
  139. ++mutex->__data.__nusers;
  140. mutex->__data.__count = 1;
  141. return 0;
  142. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  143. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  144. case PTHREAD_MUTEX_PI_NORMAL_NP:
  145. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  146. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  147. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  148. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  149. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  150. {
  151. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  152. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  153. if (robust)
  154. /* Note: robust PI futexes are signaled by setting bit 0. */
  155. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  156. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  157. | 1));
  158. oldval = mutex->__data.__lock;
  159. /* Check whether we already hold the mutex. */
  160. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  161. {
  162. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  163. {
  164. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  165. return EDEADLK;
  166. }
  167. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  168. {
  169. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  170. /* Just bump the counter. */
  171. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  172. /* Overflow of the counter. */
  173. return EAGAIN;
  174. ++mutex->__data.__count;
  175. return 0;
  176. }
  177. }
  178. oldval
  179. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  180. id, 0);
  181. if (oldval != 0)
  182. {
  183. if ((oldval & FUTEX_OWNER_DIED) == 0)
  184. {
  185. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  186. return EBUSY;
  187. }
  188. assert (robust);
  189. /* The mutex owner died. The kernel will now take care of
  190. everything. */
  191. int private = (robust
  192. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  193. : PTHREAD_MUTEX_PSHARED (mutex));
  194. INTERNAL_SYSCALL_DECL (__err);
  195. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  196. __lll_private_flag (FUTEX_TRYLOCK_PI,
  197. private), 0, 0);
  198. if (INTERNAL_SYSCALL_ERROR_P (e, __err)
  199. && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
  200. {
  201. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  202. return EBUSY;
  203. }
  204. oldval = mutex->__data.__lock;
  205. }
  206. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  207. {
  208. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  209. /* We got the mutex. */
  210. mutex->__data.__count = 1;
  211. /* But it is inconsistent unless marked otherwise. */
  212. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  213. ENQUEUE_MUTEX (mutex);
  214. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  215. /* Note that we deliberately exit here. If we fall
  216. through to the end of the function __nusers would be
  217. incremented which is not correct because the old owner
  218. has to be discounted. */
  219. return EOWNERDEAD;
  220. }
  221. if (robust
  222. && __builtin_expect (mutex->__data.__owner
  223. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  224. {
  225. /* This mutex is now not recoverable. */
  226. mutex->__data.__count = 0;
  227. INTERNAL_SYSCALL_DECL (__err);
  228. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  229. __lll_private_flag (FUTEX_UNLOCK_PI,
  230. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  231. 0, 0);
  232. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  233. return ENOTRECOVERABLE;
  234. }
  235. if (robust)
  236. {
  237. ENQUEUE_MUTEX_PI (mutex);
  238. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  239. }
  240. mutex->__data.__owner = id;
  241. ++mutex->__data.__nusers;
  242. mutex->__data.__count = 1;
  243. return 0;
  244. }
  245. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  246. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  247. case PTHREAD_MUTEX_PP_NORMAL_NP:
  248. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  249. {
  250. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  251. oldval = mutex->__data.__lock;
  252. /* Check whether we already hold the mutex. */
  253. if (mutex->__data.__owner == id)
  254. {
  255. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  256. return EDEADLK;
  257. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  258. {
  259. /* Just bump the counter. */
  260. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  261. /* Overflow of the counter. */
  262. return EAGAIN;
  263. ++mutex->__data.__count;
  264. return 0;
  265. }
  266. }
  267. int oldprio = -1, ceilval;
  268. do
  269. {
  270. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  271. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  272. if (__pthread_current_priority () > ceiling)
  273. {
  274. if (oldprio != -1)
  275. __pthread_tpp_change_priority (oldprio, -1);
  276. return EINVAL;
  277. }
  278. int retval = __pthread_tpp_change_priority (oldprio, ceiling);
  279. if (retval)
  280. return retval;
  281. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  282. oldprio = ceiling;
  283. oldval
  284. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  285. ceilval | 1, ceilval);
  286. if (oldval == ceilval)
  287. break;
  288. }
  289. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  290. if (oldval != ceilval)
  291. {
  292. __pthread_tpp_change_priority (oldprio, -1);
  293. break;
  294. }
  295. assert (mutex->__data.__owner == 0);
  296. /* Record the ownership. */
  297. mutex->__data.__owner = id;
  298. ++mutex->__data.__nusers;
  299. mutex->__data.__count = 1;
  300. return 0;
  301. }
  302. break;
  303. default:
  304. /* Correct code cannot set any other type. */
  305. return EINVAL;
  306. }
  307. return EBUSY;
  308. }
  309. strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)