pthread_mutex_timedlock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <assert.h>
  16. #include <errno.h>
  17. #include <time.h>
  18. #include "pthreadP.h"
  19. #include <lowlevellock.h>
  20. #include <not-cancel.h>
  21. /* We need to build this function with optimization to avoid
  22. * lll_timedlock erroring out with
  23. * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
  24. */
  25. int
  26. attribute_optimize("Os")
  27. pthread_mutex_timedlock (
  28. pthread_mutex_t *mutex,
  29. const struct timespec *abstime)
  30. {
  31. int oldval;
  32. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  33. int result = 0;
  34. /* We must not check ABSTIME here. If the thread does not block
  35. abstime must not be checked for a valid value. */
  36. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  37. PTHREAD_MUTEX_TIMED_NP))
  38. {
  39. /* Recursive mutex. */
  40. case PTHREAD_MUTEX_RECURSIVE_NP:
  41. /* Check whether we already hold the mutex. */
  42. if (mutex->__data.__owner == id)
  43. {
  44. /* Just bump the counter. */
  45. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  46. /* Overflow of the counter. */
  47. return EAGAIN;
  48. ++mutex->__data.__count;
  49. goto out;
  50. }
  51. /* We have to get the mutex. */
  52. result = lll_timedlock (mutex->__data.__lock, abstime,
  53. PTHREAD_MUTEX_PSHARED (mutex));
  54. if (result != 0)
  55. goto out;
  56. /* Only locked once so far. */
  57. mutex->__data.__count = 1;
  58. break;
  59. /* Error checking mutex. */
  60. case PTHREAD_MUTEX_ERRORCHECK_NP:
  61. /* Check whether we already hold the mutex. */
  62. if (__builtin_expect (mutex->__data.__owner == id, 0))
  63. return EDEADLK;
  64. /* FALLTHROUGH */
  65. case PTHREAD_MUTEX_TIMED_NP:
  66. simple:
  67. /* Normal mutex. */
  68. result = lll_timedlock (mutex->__data.__lock, abstime,
  69. PTHREAD_MUTEX_PSHARED (mutex));
  70. break;
  71. case PTHREAD_MUTEX_ADAPTIVE_NP:
  72. if (! __is_smp)
  73. goto simple;
  74. if (lll_trylock (mutex->__data.__lock) != 0)
  75. {
  76. int cnt = 0;
  77. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  78. mutex->__data.__spins * 2 + 10);
  79. do
  80. {
  81. if (cnt++ >= max_cnt)
  82. {
  83. result = lll_timedlock (mutex->__data.__lock, abstime,
  84. PTHREAD_MUTEX_PSHARED (mutex));
  85. break;
  86. }
  87. #ifdef BUSY_WAIT_NOP
  88. BUSY_WAIT_NOP;
  89. #endif
  90. }
  91. while (lll_trylock (mutex->__data.__lock) != 0);
  92. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  93. }
  94. break;
  95. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  96. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  97. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  98. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  99. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  100. &mutex->__data.__list.__next);
  101. oldval = mutex->__data.__lock;
  102. do
  103. {
  104. again:
  105. if ((oldval & FUTEX_OWNER_DIED) != 0)
  106. {
  107. /* The previous owner died. Try locking the mutex. */
  108. int newval = id | (oldval & FUTEX_WAITERS);
  109. newval
  110. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  111. newval, oldval);
  112. if (newval != oldval)
  113. {
  114. oldval = newval;
  115. goto again;
  116. }
  117. /* We got the mutex. */
  118. mutex->__data.__count = 1;
  119. /* But it is inconsistent unless marked otherwise. */
  120. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  121. ENQUEUE_MUTEX (mutex);
  122. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  123. /* Note that we deliberately exit here. If we fall
  124. through to the end of the function __nusers would be
  125. incremented which is not correct because the old
  126. owner has to be discounted. */
  127. return EOWNERDEAD;
  128. }
  129. /* Check whether we already hold the mutex. */
  130. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  131. {
  132. int kind = PTHREAD_MUTEX_TYPE (mutex);
  133. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  134. {
  135. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  136. NULL);
  137. return EDEADLK;
  138. }
  139. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  140. {
  141. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  142. NULL);
  143. /* Just bump the counter. */
  144. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  145. /* Overflow of the counter. */
  146. return EAGAIN;
  147. ++mutex->__data.__count;
  148. return 0;
  149. }
  150. }
  151. result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
  152. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  153. if (__builtin_expect (mutex->__data.__owner
  154. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  155. {
  156. /* This mutex is now not recoverable. */
  157. mutex->__data.__count = 0;
  158. lll_unlock (mutex->__data.__lock,
  159. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  160. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  161. return ENOTRECOVERABLE;
  162. }
  163. if (result == ETIMEDOUT || result == EINVAL)
  164. goto out;
  165. oldval = result;
  166. }
  167. while ((oldval & FUTEX_OWNER_DIED) != 0);
  168. mutex->__data.__count = 1;
  169. ENQUEUE_MUTEX (mutex);
  170. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  171. break;
  172. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  173. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  174. case PTHREAD_MUTEX_PI_NORMAL_NP:
  175. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  176. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  177. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  178. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  179. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  180. {
  181. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  182. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  183. if (robust)
  184. /* Note: robust PI futexes are signaled by setting bit 0. */
  185. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  186. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  187. | 1));
  188. oldval = mutex->__data.__lock;
  189. /* Check whether we already hold the mutex. */
  190. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  191. {
  192. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  193. {
  194. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  195. return EDEADLK;
  196. }
  197. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  198. {
  199. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  200. /* Just bump the counter. */
  201. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  202. /* Overflow of the counter. */
  203. return EAGAIN;
  204. ++mutex->__data.__count;
  205. return 0;
  206. }
  207. }
  208. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  209. id, 0);
  210. if (oldval != 0)
  211. {
  212. /* The mutex is locked. The kernel will now take care of
  213. everything. The timeout value must be a relative value.
  214. Convert it. */
  215. int private = (robust
  216. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  217. : PTHREAD_MUTEX_PSHARED (mutex));
  218. INTERNAL_SYSCALL_DECL (__err);
  219. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  220. __lll_private_flag (FUTEX_LOCK_PI,
  221. private), 1,
  222. abstime);
  223. if (INTERNAL_SYSCALL_ERROR_P (e, __err))
  224. {
  225. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
  226. return ETIMEDOUT;
  227. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  228. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
  229. {
  230. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  231. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  232. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  233. /* ESRCH can happen only for non-robust PI mutexes where
  234. the owner of the lock died. */
  235. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
  236. || !robust);
  237. /* Delay the thread until the timeout is reached.
  238. Then return ETIMEDOUT. */
  239. struct timespec reltime;
  240. struct timespec now;
  241. INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
  242. &now);
  243. reltime.tv_sec = abstime->tv_sec - now.tv_sec;
  244. reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
  245. if (reltime.tv_nsec < 0)
  246. {
  247. reltime.tv_nsec += 1000000000;
  248. --reltime.tv_sec;
  249. }
  250. if (reltime.tv_sec >= 0)
  251. while (nanosleep_not_cancel (&reltime, &reltime) != 0)
  252. continue;
  253. return ETIMEDOUT;
  254. }
  255. return INTERNAL_SYSCALL_ERRNO (e, __err);
  256. }
  257. oldval = mutex->__data.__lock;
  258. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  259. }
  260. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  261. {
  262. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  263. /* We got the mutex. */
  264. mutex->__data.__count = 1;
  265. /* But it is inconsistent unless marked otherwise. */
  266. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  267. ENQUEUE_MUTEX_PI (mutex);
  268. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  269. /* Note that we deliberately exit here. If we fall
  270. through to the end of the function __nusers would be
  271. incremented which is not correct because the old owner
  272. has to be discounted. */
  273. return EOWNERDEAD;
  274. }
  275. if (robust
  276. && __builtin_expect (mutex->__data.__owner
  277. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  278. {
  279. /* This mutex is now not recoverable. */
  280. mutex->__data.__count = 0;
  281. INTERNAL_SYSCALL_DECL (__err);
  282. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  283. __lll_private_flag (FUTEX_UNLOCK_PI,
  284. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  285. 0, 0);
  286. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  287. return ENOTRECOVERABLE;
  288. }
  289. mutex->__data.__count = 1;
  290. if (robust)
  291. {
  292. ENQUEUE_MUTEX_PI (mutex);
  293. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  294. }
  295. }
  296. break;
  297. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  298. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  299. case PTHREAD_MUTEX_PP_NORMAL_NP:
  300. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  301. {
  302. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  303. oldval = mutex->__data.__lock;
  304. /* Check whether we already hold the mutex. */
  305. if (mutex->__data.__owner == id)
  306. {
  307. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  308. return EDEADLK;
  309. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  310. {
  311. /* Just bump the counter. */
  312. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  313. /* Overflow of the counter. */
  314. return EAGAIN;
  315. ++mutex->__data.__count;
  316. return 0;
  317. }
  318. }
  319. int oldprio = -1, ceilval;
  320. do
  321. {
  322. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  323. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  324. if (__pthread_current_priority () > ceiling)
  325. {
  326. result = EINVAL;
  327. failpp:
  328. if (oldprio != -1)
  329. __pthread_tpp_change_priority (oldprio, -1);
  330. return result;
  331. }
  332. result = __pthread_tpp_change_priority (oldprio, ceiling);
  333. if (result)
  334. return result;
  335. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  336. oldprio = ceiling;
  337. oldval
  338. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  339. ceilval | 1, ceilval);
  340. if (oldval == ceilval)
  341. break;
  342. do
  343. {
  344. oldval
  345. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  346. ceilval | 2,
  347. ceilval | 1);
  348. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  349. break;
  350. if (oldval != ceilval)
  351. {
  352. /* Reject invalid timeouts. */
  353. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  354. {
  355. result = EINVAL;
  356. goto failpp;
  357. }
  358. struct timeval tv;
  359. struct timespec rt;
  360. /* Get the current time. */
  361. (void) gettimeofday (&tv, NULL);
  362. /* Compute relative timeout. */
  363. rt.tv_sec = abstime->tv_sec - tv.tv_sec;
  364. rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
  365. if (rt.tv_nsec < 0)
  366. {
  367. rt.tv_nsec += 1000000000;
  368. --rt.tv_sec;
  369. }
  370. /* Already timed out? */
  371. if (rt.tv_sec < 0)
  372. {
  373. result = ETIMEDOUT;
  374. goto failpp;
  375. }
  376. lll_futex_timed_wait (&mutex->__data.__lock,
  377. ceilval | 2, &rt,
  378. PTHREAD_MUTEX_PSHARED (mutex));
  379. }
  380. }
  381. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  382. ceilval | 2, ceilval)
  383. != ceilval);
  384. }
  385. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  386. assert (mutex->__data.__owner == 0);
  387. mutex->__data.__count = 1;
  388. }
  389. break;
  390. default:
  391. /* Correct code cannot set any other type. */
  392. return EINVAL;
  393. }
  394. if (result == 0)
  395. {
  396. /* Record the ownership. */
  397. mutex->__data.__owner = id;
  398. ++mutex->__data.__nusers;
  399. }
  400. out:
  401. return result;
  402. }