pthread_mutex_timedlock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #include <assert.h>
  17. #include <errno.h>
  18. #include <time.h>
  19. #include "pthreadP.h"
  20. #include <lowlevellock.h>
  21. #include <not-cancel.h>
  22. int
  23. pthread_mutex_timedlock (
  24. pthread_mutex_t *mutex,
  25. const struct timespec *abstime)
  26. {
  27. int oldval;
  28. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  29. int result = 0;
  30. /* We must not check ABSTIME here. If the thread does not block
  31. abstime must not be checked for a valid value. */
  32. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  33. PTHREAD_MUTEX_TIMED_NP))
  34. {
  35. /* Recursive mutex. */
  36. case PTHREAD_MUTEX_RECURSIVE_NP:
  37. /* Check whether we already hold the mutex. */
  38. if (mutex->__data.__owner == id)
  39. {
  40. /* Just bump the counter. */
  41. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  42. /* Overflow of the counter. */
  43. return EAGAIN;
  44. ++mutex->__data.__count;
  45. goto out;
  46. }
  47. /* We have to get the mutex. */
  48. result = lll_timedlock (mutex->__data.__lock, abstime,
  49. PTHREAD_MUTEX_PSHARED (mutex));
  50. if (result != 0)
  51. goto out;
  52. /* Only locked once so far. */
  53. mutex->__data.__count = 1;
  54. break;
  55. /* Error checking mutex. */
  56. case PTHREAD_MUTEX_ERRORCHECK_NP:
  57. /* Check whether we already hold the mutex. */
  58. if (__builtin_expect (mutex->__data.__owner == id, 0))
  59. return EDEADLK;
  60. /* FALLTHROUGH */
  61. case PTHREAD_MUTEX_TIMED_NP:
  62. simple:
  63. /* Normal mutex. */
  64. result = lll_timedlock (mutex->__data.__lock, abstime,
  65. PTHREAD_MUTEX_PSHARED (mutex));
  66. break;
  67. case PTHREAD_MUTEX_ADAPTIVE_NP:
  68. if (! __is_smp)
  69. goto simple;
  70. if (lll_trylock (mutex->__data.__lock) != 0)
  71. {
  72. int cnt = 0;
  73. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  74. mutex->__data.__spins * 2 + 10);
  75. do
  76. {
  77. if (cnt++ >= max_cnt)
  78. {
  79. result = lll_timedlock (mutex->__data.__lock, abstime,
  80. PTHREAD_MUTEX_PSHARED (mutex));
  81. break;
  82. }
  83. #ifdef BUSY_WAIT_NOP
  84. BUSY_WAIT_NOP;
  85. #endif
  86. }
  87. while (lll_trylock (mutex->__data.__lock) != 0);
  88. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  89. }
  90. break;
  91. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  92. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  93. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  94. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  95. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  96. &mutex->__data.__list.__next);
  97. oldval = mutex->__data.__lock;
  98. do
  99. {
  100. again:
  101. if ((oldval & FUTEX_OWNER_DIED) != 0)
  102. {
  103. /* The previous owner died. Try locking the mutex. */
  104. int newval = id | (oldval & FUTEX_WAITERS);
  105. newval
  106. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  107. newval, oldval);
  108. if (newval != oldval)
  109. {
  110. oldval = newval;
  111. goto again;
  112. }
  113. /* We got the mutex. */
  114. mutex->__data.__count = 1;
  115. /* But it is inconsistent unless marked otherwise. */
  116. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  117. ENQUEUE_MUTEX (mutex);
  118. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  119. /* Note that we deliberately exit here. If we fall
  120. through to the end of the function __nusers would be
  121. incremented which is not correct because the old
  122. owner has to be discounted. */
  123. return EOWNERDEAD;
  124. }
  125. /* Check whether we already hold the mutex. */
  126. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  127. {
  128. int kind = PTHREAD_MUTEX_TYPE (mutex);
  129. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  130. {
  131. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  132. NULL);
  133. return EDEADLK;
  134. }
  135. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  136. {
  137. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  138. NULL);
  139. /* Just bump the counter. */
  140. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  141. /* Overflow of the counter. */
  142. return EAGAIN;
  143. ++mutex->__data.__count;
  144. return 0;
  145. }
  146. }
  147. result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
  148. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  149. if (__builtin_expect (mutex->__data.__owner
  150. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  151. {
  152. /* This mutex is now not recoverable. */
  153. mutex->__data.__count = 0;
  154. lll_unlock (mutex->__data.__lock,
  155. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  156. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  157. return ENOTRECOVERABLE;
  158. }
  159. if (result == ETIMEDOUT || result == EINVAL)
  160. goto out;
  161. oldval = result;
  162. }
  163. while ((oldval & FUTEX_OWNER_DIED) != 0);
  164. mutex->__data.__count = 1;
  165. ENQUEUE_MUTEX (mutex);
  166. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  167. break;
  168. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  169. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  170. case PTHREAD_MUTEX_PI_NORMAL_NP:
  171. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  172. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  173. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  174. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  175. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  176. {
  177. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  178. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  179. if (robust)
  180. /* Note: robust PI futexes are signaled by setting bit 0. */
  181. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  182. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  183. | 1));
  184. oldval = mutex->__data.__lock;
  185. /* Check whether we already hold the mutex. */
  186. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  187. {
  188. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  189. {
  190. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  191. return EDEADLK;
  192. }
  193. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  194. {
  195. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  196. /* Just bump the counter. */
  197. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  198. /* Overflow of the counter. */
  199. return EAGAIN;
  200. ++mutex->__data.__count;
  201. return 0;
  202. }
  203. }
  204. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  205. id, 0);
  206. if (oldval != 0)
  207. {
  208. /* The mutex is locked. The kernel will now take care of
  209. everything. The timeout value must be a relative value.
  210. Convert it. */
  211. int private = (robust
  212. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  213. : PTHREAD_MUTEX_PSHARED (mutex));
  214. INTERNAL_SYSCALL_DECL (__err);
  215. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  216. __lll_private_flag (FUTEX_LOCK_PI,
  217. private), 1,
  218. abstime);
  219. if (INTERNAL_SYSCALL_ERROR_P (e, __err))
  220. {
  221. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
  222. return ETIMEDOUT;
  223. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  224. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
  225. {
  226. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  227. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  228. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  229. /* ESRCH can happen only for non-robust PI mutexes where
  230. the owner of the lock died. */
  231. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
  232. || !robust);
  233. /* Delay the thread until the timeout is reached.
  234. Then return ETIMEDOUT. */
  235. struct timespec reltime;
  236. struct timespec now;
  237. INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
  238. &now);
  239. reltime.tv_sec = abstime->tv_sec - now.tv_sec;
  240. reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
  241. if (reltime.tv_nsec < 0)
  242. {
  243. reltime.tv_nsec += 1000000000;
  244. --reltime.tv_sec;
  245. }
  246. if (reltime.tv_sec >= 0)
  247. while (nanosleep_not_cancel (&reltime, &reltime) != 0)
  248. continue;
  249. return ETIMEDOUT;
  250. }
  251. return INTERNAL_SYSCALL_ERRNO (e, __err);
  252. }
  253. oldval = mutex->__data.__lock;
  254. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  255. }
  256. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  257. {
  258. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  259. /* We got the mutex. */
  260. mutex->__data.__count = 1;
  261. /* But it is inconsistent unless marked otherwise. */
  262. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  263. ENQUEUE_MUTEX_PI (mutex);
  264. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  265. /* Note that we deliberately exit here. If we fall
  266. through to the end of the function __nusers would be
  267. incremented which is not correct because the old owner
  268. has to be discounted. */
  269. return EOWNERDEAD;
  270. }
  271. if (robust
  272. && __builtin_expect (mutex->__data.__owner
  273. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  274. {
  275. /* This mutex is now not recoverable. */
  276. mutex->__data.__count = 0;
  277. INTERNAL_SYSCALL_DECL (__err);
  278. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  279. __lll_private_flag (FUTEX_UNLOCK_PI,
  280. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  281. 0, 0);
  282. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  283. return ENOTRECOVERABLE;
  284. }
  285. mutex->__data.__count = 1;
  286. if (robust)
  287. {
  288. ENQUEUE_MUTEX_PI (mutex);
  289. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  290. }
  291. }
  292. break;
  293. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  294. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  295. case PTHREAD_MUTEX_PP_NORMAL_NP:
  296. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  297. {
  298. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  299. oldval = mutex->__data.__lock;
  300. /* Check whether we already hold the mutex. */
  301. if (mutex->__data.__owner == id)
  302. {
  303. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  304. return EDEADLK;
  305. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  306. {
  307. /* Just bump the counter. */
  308. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  309. /* Overflow of the counter. */
  310. return EAGAIN;
  311. ++mutex->__data.__count;
  312. return 0;
  313. }
  314. }
  315. int oldprio = -1, ceilval;
  316. do
  317. {
  318. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  319. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  320. if (__pthread_current_priority () > ceiling)
  321. {
  322. result = EINVAL;
  323. failpp:
  324. if (oldprio != -1)
  325. __pthread_tpp_change_priority (oldprio, -1);
  326. return result;
  327. }
  328. result = __pthread_tpp_change_priority (oldprio, ceiling);
  329. if (result)
  330. return result;
  331. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  332. oldprio = ceiling;
  333. oldval
  334. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  335. ceilval | 1, ceilval);
  336. if (oldval == ceilval)
  337. break;
  338. do
  339. {
  340. oldval
  341. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  342. ceilval | 2,
  343. ceilval | 1);
  344. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  345. break;
  346. if (oldval != ceilval)
  347. {
  348. /* Reject invalid timeouts. */
  349. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  350. {
  351. result = EINVAL;
  352. goto failpp;
  353. }
  354. struct timeval tv;
  355. struct timespec rt;
  356. /* Get the current time. */
  357. (void) gettimeofday (&tv, NULL);
  358. /* Compute relative timeout. */
  359. rt.tv_sec = abstime->tv_sec - tv.tv_sec;
  360. rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
  361. if (rt.tv_nsec < 0)
  362. {
  363. rt.tv_nsec += 1000000000;
  364. --rt.tv_sec;
  365. }
  366. /* Already timed out? */
  367. if (rt.tv_sec < 0)
  368. {
  369. result = ETIMEDOUT;
  370. goto failpp;
  371. }
  372. lll_futex_timed_wait (&mutex->__data.__lock,
  373. ceilval | 2, &rt,
  374. PTHREAD_MUTEX_PSHARED (mutex));
  375. }
  376. }
  377. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  378. ceilval | 2, ceilval)
  379. != ceilval);
  380. }
  381. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  382. assert (mutex->__data.__owner == 0);
  383. mutex->__data.__count = 1;
  384. }
  385. break;
  386. default:
  387. /* Correct code cannot set any other type. */
  388. return EINVAL;
  389. }
  390. if (result == 0)
  391. {
  392. /* Record the ownership. */
  393. mutex->__data.__owner = id;
  394. ++mutex->__data.__nusers;
  395. }
  396. out:
  397. return result;
  398. }