pthread_mutex_timedlock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #include <assert.h>
  17. #include <errno.h>
  18. #include <time.h>
  19. #include "pthreadP.h"
  20. #include <lowlevellock.h>
  21. #include <not-cancel.h>
  22. /* We need to build this function with optimization to avoid
  23. * lll_timedlock erroring out with
  24. * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
  25. */
  26. int
  27. attribute_optimize("Os")
  28. pthread_mutex_timedlock (
  29. pthread_mutex_t *mutex,
  30. const struct timespec *abstime)
  31. {
  32. int oldval;
  33. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  34. int result = 0;
  35. /* We must not check ABSTIME here. If the thread does not block
  36. abstime must not be checked for a valid value. */
  37. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  38. PTHREAD_MUTEX_TIMED_NP))
  39. {
  40. /* Recursive mutex. */
  41. case PTHREAD_MUTEX_RECURSIVE_NP:
  42. /* Check whether we already hold the mutex. */
  43. if (mutex->__data.__owner == id)
  44. {
  45. /* Just bump the counter. */
  46. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  47. /* Overflow of the counter. */
  48. return EAGAIN;
  49. ++mutex->__data.__count;
  50. goto out;
  51. }
  52. /* We have to get the mutex. */
  53. result = lll_timedlock (mutex->__data.__lock, abstime,
  54. PTHREAD_MUTEX_PSHARED (mutex));
  55. if (result != 0)
  56. goto out;
  57. /* Only locked once so far. */
  58. mutex->__data.__count = 1;
  59. break;
  60. /* Error checking mutex. */
  61. case PTHREAD_MUTEX_ERRORCHECK_NP:
  62. /* Check whether we already hold the mutex. */
  63. if (__builtin_expect (mutex->__data.__owner == id, 0))
  64. return EDEADLK;
  65. /* FALLTHROUGH */
  66. case PTHREAD_MUTEX_TIMED_NP:
  67. simple:
  68. /* Normal mutex. */
  69. result = lll_timedlock (mutex->__data.__lock, abstime,
  70. PTHREAD_MUTEX_PSHARED (mutex));
  71. break;
  72. case PTHREAD_MUTEX_ADAPTIVE_NP:
  73. if (! __is_smp)
  74. goto simple;
  75. if (lll_trylock (mutex->__data.__lock) != 0)
  76. {
  77. int cnt = 0;
  78. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  79. mutex->__data.__spins * 2 + 10);
  80. do
  81. {
  82. if (cnt++ >= max_cnt)
  83. {
  84. result = lll_timedlock (mutex->__data.__lock, abstime,
  85. PTHREAD_MUTEX_PSHARED (mutex));
  86. break;
  87. }
  88. #ifdef BUSY_WAIT_NOP
  89. BUSY_WAIT_NOP;
  90. #endif
  91. }
  92. while (lll_trylock (mutex->__data.__lock) != 0);
  93. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  94. }
  95. break;
  96. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  97. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  98. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  99. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  100. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  101. &mutex->__data.__list.__next);
  102. oldval = mutex->__data.__lock;
  103. do
  104. {
  105. again:
  106. if ((oldval & FUTEX_OWNER_DIED) != 0)
  107. {
  108. /* The previous owner died. Try locking the mutex. */
  109. int newval = id | (oldval & FUTEX_WAITERS);
  110. newval
  111. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  112. newval, oldval);
  113. if (newval != oldval)
  114. {
  115. oldval = newval;
  116. goto again;
  117. }
  118. /* We got the mutex. */
  119. mutex->__data.__count = 1;
  120. /* But it is inconsistent unless marked otherwise. */
  121. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  122. ENQUEUE_MUTEX (mutex);
  123. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  124. /* Note that we deliberately exit here. If we fall
  125. through to the end of the function __nusers would be
  126. incremented which is not correct because the old
  127. owner has to be discounted. */
  128. return EOWNERDEAD;
  129. }
  130. /* Check whether we already hold the mutex. */
  131. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  132. {
  133. int kind = PTHREAD_MUTEX_TYPE (mutex);
  134. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  135. {
  136. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  137. NULL);
  138. return EDEADLK;
  139. }
  140. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  141. {
  142. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  143. NULL);
  144. /* Just bump the counter. */
  145. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  146. /* Overflow of the counter. */
  147. return EAGAIN;
  148. ++mutex->__data.__count;
  149. return 0;
  150. }
  151. }
  152. result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
  153. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  154. if (__builtin_expect (mutex->__data.__owner
  155. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  156. {
  157. /* This mutex is now not recoverable. */
  158. mutex->__data.__count = 0;
  159. lll_unlock (mutex->__data.__lock,
  160. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  161. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  162. return ENOTRECOVERABLE;
  163. }
  164. if (result == ETIMEDOUT || result == EINVAL)
  165. goto out;
  166. oldval = result;
  167. }
  168. while ((oldval & FUTEX_OWNER_DIED) != 0);
  169. mutex->__data.__count = 1;
  170. ENQUEUE_MUTEX (mutex);
  171. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  172. break;
  173. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  174. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  175. case PTHREAD_MUTEX_PI_NORMAL_NP:
  176. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  177. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  178. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  179. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  180. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  181. {
  182. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  183. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  184. if (robust)
  185. /* Note: robust PI futexes are signaled by setting bit 0. */
  186. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  187. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  188. | 1));
  189. oldval = mutex->__data.__lock;
  190. /* Check whether we already hold the mutex. */
  191. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  192. {
  193. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  194. {
  195. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  196. return EDEADLK;
  197. }
  198. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  199. {
  200. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  201. /* Just bump the counter. */
  202. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  203. /* Overflow of the counter. */
  204. return EAGAIN;
  205. ++mutex->__data.__count;
  206. return 0;
  207. }
  208. }
  209. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  210. id, 0);
  211. if (oldval != 0)
  212. {
  213. /* The mutex is locked. The kernel will now take care of
  214. everything. The timeout value must be a relative value.
  215. Convert it. */
  216. int private = (robust
  217. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  218. : PTHREAD_MUTEX_PSHARED (mutex));
  219. INTERNAL_SYSCALL_DECL (__err);
  220. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  221. __lll_private_flag (FUTEX_LOCK_PI,
  222. private), 1,
  223. abstime);
  224. if (INTERNAL_SYSCALL_ERROR_P (e, __err))
  225. {
  226. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
  227. return ETIMEDOUT;
  228. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  229. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
  230. {
  231. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  232. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  233. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  234. /* ESRCH can happen only for non-robust PI mutexes where
  235. the owner of the lock died. */
  236. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
  237. || !robust);
  238. /* Delay the thread until the timeout is reached.
  239. Then return ETIMEDOUT. */
  240. struct timespec reltime;
  241. struct timespec now;
  242. INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
  243. &now);
  244. reltime.tv_sec = abstime->tv_sec - now.tv_sec;
  245. reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
  246. if (reltime.tv_nsec < 0)
  247. {
  248. reltime.tv_nsec += 1000000000;
  249. --reltime.tv_sec;
  250. }
  251. if (reltime.tv_sec >= 0)
  252. while (nanosleep_not_cancel (&reltime, &reltime) != 0)
  253. continue;
  254. return ETIMEDOUT;
  255. }
  256. return INTERNAL_SYSCALL_ERRNO (e, __err);
  257. }
  258. oldval = mutex->__data.__lock;
  259. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  260. }
  261. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  262. {
  263. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  264. /* We got the mutex. */
  265. mutex->__data.__count = 1;
  266. /* But it is inconsistent unless marked otherwise. */
  267. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  268. ENQUEUE_MUTEX_PI (mutex);
  269. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  270. /* Note that we deliberately exit here. If we fall
  271. through to the end of the function __nusers would be
  272. incremented which is not correct because the old owner
  273. has to be discounted. */
  274. return EOWNERDEAD;
  275. }
  276. if (robust
  277. && __builtin_expect (mutex->__data.__owner
  278. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  279. {
  280. /* This mutex is now not recoverable. */
  281. mutex->__data.__count = 0;
  282. INTERNAL_SYSCALL_DECL (__err);
  283. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  284. __lll_private_flag (FUTEX_UNLOCK_PI,
  285. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  286. 0, 0);
  287. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  288. return ENOTRECOVERABLE;
  289. }
  290. mutex->__data.__count = 1;
  291. if (robust)
  292. {
  293. ENQUEUE_MUTEX_PI (mutex);
  294. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  295. }
  296. }
  297. break;
  298. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  299. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  300. case PTHREAD_MUTEX_PP_NORMAL_NP:
  301. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  302. {
  303. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  304. oldval = mutex->__data.__lock;
  305. /* Check whether we already hold the mutex. */
  306. if (mutex->__data.__owner == id)
  307. {
  308. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  309. return EDEADLK;
  310. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  311. {
  312. /* Just bump the counter. */
  313. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  314. /* Overflow of the counter. */
  315. return EAGAIN;
  316. ++mutex->__data.__count;
  317. return 0;
  318. }
  319. }
  320. int oldprio = -1, ceilval;
  321. do
  322. {
  323. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  324. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  325. if (__pthread_current_priority () > ceiling)
  326. {
  327. result = EINVAL;
  328. failpp:
  329. if (oldprio != -1)
  330. __pthread_tpp_change_priority (oldprio, -1);
  331. return result;
  332. }
  333. result = __pthread_tpp_change_priority (oldprio, ceiling);
  334. if (result)
  335. return result;
  336. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  337. oldprio = ceiling;
  338. oldval
  339. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  340. ceilval | 1, ceilval);
  341. if (oldval == ceilval)
  342. break;
  343. do
  344. {
  345. oldval
  346. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  347. ceilval | 2,
  348. ceilval | 1);
  349. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  350. break;
  351. if (oldval != ceilval)
  352. {
  353. /* Reject invalid timeouts. */
  354. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  355. {
  356. result = EINVAL;
  357. goto failpp;
  358. }
  359. struct timeval tv;
  360. struct timespec rt;
  361. /* Get the current time. */
  362. (void) gettimeofday (&tv, NULL);
  363. /* Compute relative timeout. */
  364. rt.tv_sec = abstime->tv_sec - tv.tv_sec;
  365. rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
  366. if (rt.tv_nsec < 0)
  367. {
  368. rt.tv_nsec += 1000000000;
  369. --rt.tv_sec;
  370. }
  371. /* Already timed out? */
  372. if (rt.tv_sec < 0)
  373. {
  374. result = ETIMEDOUT;
  375. goto failpp;
  376. }
  377. lll_futex_timed_wait (&mutex->__data.__lock,
  378. ceilval | 2, &rt,
  379. PTHREAD_MUTEX_PSHARED (mutex));
  380. }
  381. }
  382. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  383. ceilval | 2, ceilval)
  384. != ceilval);
  385. }
  386. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  387. assert (mutex->__data.__owner == 0);
  388. mutex->__data.__count = 1;
  389. }
  390. break;
  391. default:
  392. /* Correct code cannot set any other type. */
  393. return EINVAL;
  394. }
  395. if (result == 0)
  396. {
  397. /* Record the ownership. */
  398. mutex->__data.__owner = id;
  399. ++mutex->__data.__nusers;
  400. }
  401. out:
  402. return result;
  403. }