pthread_mutex_timedlock.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <assert.h>
  16. #include <errno.h>
  17. #include <time.h>
  18. #include "pthreadP.h"
  19. #include <lowlevellock.h>
  20. #include <not-cancel.h>
  21. /* We need to build this function with optimization to avoid
  22. * lll_timedlock erroring out with
  23. * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
  24. */
  25. int
  26. #ifndef __OPTIMIZE__
  27. attribute_optimize("Os")
  28. #endif
  29. pthread_mutex_timedlock (
  30. pthread_mutex_t *mutex,
  31. const struct timespec *abstime)
  32. {
  33. int oldval;
  34. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  35. int result = 0;
  36. /* We must not check ABSTIME here. If the thread does not block
  37. abstime must not be checked for a valid value. */
  38. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  39. PTHREAD_MUTEX_TIMED_NP))
  40. {
  41. /* Recursive mutex. */
  42. case PTHREAD_MUTEX_RECURSIVE_NP:
  43. /* Check whether we already hold the mutex. */
  44. if (mutex->__data.__owner == id)
  45. {
  46. /* Just bump the counter. */
  47. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  48. /* Overflow of the counter. */
  49. return EAGAIN;
  50. ++mutex->__data.__count;
  51. goto out;
  52. }
  53. /* We have to get the mutex. */
  54. result = lll_timedlock (mutex->__data.__lock, abstime,
  55. PTHREAD_MUTEX_PSHARED (mutex));
  56. if (result != 0)
  57. goto out;
  58. /* Only locked once so far. */
  59. mutex->__data.__count = 1;
  60. break;
  61. /* Error checking mutex. */
  62. case PTHREAD_MUTEX_ERRORCHECK_NP:
  63. /* Check whether we already hold the mutex. */
  64. if (__builtin_expect (mutex->__data.__owner == id, 0))
  65. return EDEADLK;
  66. /* FALLTHROUGH */
  67. case PTHREAD_MUTEX_TIMED_NP:
  68. simple:
  69. /* Normal mutex. */
  70. result = lll_timedlock (mutex->__data.__lock, abstime,
  71. PTHREAD_MUTEX_PSHARED (mutex));
  72. break;
  73. case PTHREAD_MUTEX_ADAPTIVE_NP:
  74. if (! __is_smp)
  75. goto simple;
  76. if (lll_trylock (mutex->__data.__lock) != 0)
  77. {
  78. int cnt = 0;
  79. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  80. mutex->__data.__spins * 2 + 10);
  81. do
  82. {
  83. if (cnt++ >= max_cnt)
  84. {
  85. result = lll_timedlock (mutex->__data.__lock, abstime,
  86. PTHREAD_MUTEX_PSHARED (mutex));
  87. break;
  88. }
  89. #ifdef BUSY_WAIT_NOP
  90. BUSY_WAIT_NOP;
  91. #endif
  92. }
  93. while (lll_trylock (mutex->__data.__lock) != 0);
  94. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  95. }
  96. break;
  97. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  98. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  99. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  100. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  101. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  102. &mutex->__data.__list.__next);
  103. oldval = mutex->__data.__lock;
  104. do
  105. {
  106. again:
  107. if ((oldval & FUTEX_OWNER_DIED) != 0)
  108. {
  109. /* The previous owner died. Try locking the mutex. */
  110. int newval = id | (oldval & FUTEX_WAITERS);
  111. newval
  112. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  113. newval, oldval);
  114. if (newval != oldval)
  115. {
  116. oldval = newval;
  117. goto again;
  118. }
  119. /* We got the mutex. */
  120. mutex->__data.__count = 1;
  121. /* But it is inconsistent unless marked otherwise. */
  122. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  123. ENQUEUE_MUTEX (mutex);
  124. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  125. /* Note that we deliberately exit here. If we fall
  126. through to the end of the function __nusers would be
  127. incremented which is not correct because the old
  128. owner has to be discounted. */
  129. return EOWNERDEAD;
  130. }
  131. /* Check whether we already hold the mutex. */
  132. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  133. {
  134. int kind = PTHREAD_MUTEX_TYPE (mutex);
  135. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  136. {
  137. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  138. NULL);
  139. return EDEADLK;
  140. }
  141. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  142. {
  143. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  144. NULL);
  145. /* Just bump the counter. */
  146. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  147. /* Overflow of the counter. */
  148. return EAGAIN;
  149. ++mutex->__data.__count;
  150. return 0;
  151. }
  152. }
  153. result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
  154. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  155. if (__builtin_expect (mutex->__data.__owner
  156. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  157. {
  158. /* This mutex is now not recoverable. */
  159. mutex->__data.__count = 0;
  160. lll_unlock (mutex->__data.__lock,
  161. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  162. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  163. return ENOTRECOVERABLE;
  164. }
  165. if (result == ETIMEDOUT || result == EINVAL)
  166. goto out;
  167. oldval = result;
  168. }
  169. while ((oldval & FUTEX_OWNER_DIED) != 0);
  170. mutex->__data.__count = 1;
  171. ENQUEUE_MUTEX (mutex);
  172. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  173. break;
  174. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  175. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  176. case PTHREAD_MUTEX_PI_NORMAL_NP:
  177. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  178. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  179. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  180. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  181. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  182. {
  183. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  184. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  185. if (robust)
  186. /* Note: robust PI futexes are signaled by setting bit 0. */
  187. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  188. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  189. | 1));
  190. oldval = mutex->__data.__lock;
  191. /* Check whether we already hold the mutex. */
  192. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  193. {
  194. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  195. {
  196. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  197. return EDEADLK;
  198. }
  199. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  200. {
  201. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  202. /* Just bump the counter. */
  203. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  204. /* Overflow of the counter. */
  205. return EAGAIN;
  206. ++mutex->__data.__count;
  207. return 0;
  208. }
  209. }
  210. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  211. id, 0);
  212. if (oldval != 0)
  213. {
  214. /* The mutex is locked. The kernel will now take care of
  215. everything. The timeout value must be a relative value.
  216. Convert it. */
  217. int private = (robust
  218. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  219. : PTHREAD_MUTEX_PSHARED (mutex));
  220. INTERNAL_SYSCALL_DECL (__err);
  221. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
  222. int e = INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
  223. __lll_private_flag (FUTEX_LOCK_PI,
  224. private), 1,
  225. abstime);
  226. #else
  227. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  228. __lll_private_flag (FUTEX_LOCK_PI,
  229. private), 1,
  230. abstime);
  231. #endif
  232. if (INTERNAL_SYSCALL_ERROR_P (e, __err))
  233. {
  234. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
  235. return ETIMEDOUT;
  236. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  237. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
  238. {
  239. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  240. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  241. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  242. /* ESRCH can happen only for non-robust PI mutexes where
  243. the owner of the lock died. */
  244. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
  245. || !robust);
  246. /* Delay the thread until the timeout is reached.
  247. Then return ETIMEDOUT. */
  248. struct timespec reltime;
  249. struct timespec now;
  250. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_clock_gettime64)
  251. INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
  252. &now);
  253. #else
  254. INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
  255. &now);
  256. #endif
  257. reltime.tv_sec = abstime->tv_sec - now.tv_sec;
  258. reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
  259. if (reltime.tv_nsec < 0)
  260. {
  261. reltime.tv_nsec += 1000000000;
  262. --reltime.tv_sec;
  263. }
  264. if (reltime.tv_sec >= 0)
  265. while (nanosleep_not_cancel (&reltime, &reltime) != 0)
  266. continue;
  267. return ETIMEDOUT;
  268. }
  269. return INTERNAL_SYSCALL_ERRNO (e, __err);
  270. }
  271. oldval = mutex->__data.__lock;
  272. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  273. }
  274. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  275. {
  276. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  277. /* We got the mutex. */
  278. mutex->__data.__count = 1;
  279. /* But it is inconsistent unless marked otherwise. */
  280. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  281. ENQUEUE_MUTEX_PI (mutex);
  282. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  283. /* Note that we deliberately exit here. If we fall
  284. through to the end of the function __nusers would be
  285. incremented which is not correct because the old owner
  286. has to be discounted. */
  287. return EOWNERDEAD;
  288. }
  289. if (robust
  290. && __builtin_expect (mutex->__data.__owner
  291. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  292. {
  293. /* This mutex is now not recoverable. */
  294. mutex->__data.__count = 0;
  295. INTERNAL_SYSCALL_DECL (__err);
  296. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
  297. INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
  298. __lll_private_flag (FUTEX_UNLOCK_PI,
  299. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  300. 0, 0);
  301. #else
  302. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  303. __lll_private_flag (FUTEX_UNLOCK_PI,
  304. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  305. 0, 0);
  306. #endif
  307. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  308. return ENOTRECOVERABLE;
  309. }
  310. mutex->__data.__count = 1;
  311. if (robust)
  312. {
  313. ENQUEUE_MUTEX_PI (mutex);
  314. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  315. }
  316. }
  317. break;
  318. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  319. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  320. case PTHREAD_MUTEX_PP_NORMAL_NP:
  321. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  322. {
  323. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  324. oldval = mutex->__data.__lock;
  325. /* Check whether we already hold the mutex. */
  326. if (mutex->__data.__owner == id)
  327. {
  328. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  329. return EDEADLK;
  330. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  331. {
  332. /* Just bump the counter. */
  333. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  334. /* Overflow of the counter. */
  335. return EAGAIN;
  336. ++mutex->__data.__count;
  337. return 0;
  338. }
  339. }
  340. int oldprio = -1, ceilval;
  341. do
  342. {
  343. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  344. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  345. if (__pthread_current_priority () > ceiling)
  346. {
  347. result = EINVAL;
  348. failpp:
  349. if (oldprio != -1)
  350. __pthread_tpp_change_priority (oldprio, -1);
  351. return result;
  352. }
  353. result = __pthread_tpp_change_priority (oldprio, ceiling);
  354. if (result)
  355. return result;
  356. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  357. oldprio = ceiling;
  358. oldval
  359. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  360. ceilval | 1, ceilval);
  361. if (oldval == ceilval)
  362. break;
  363. do
  364. {
  365. oldval
  366. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  367. ceilval | 2,
  368. ceilval | 1);
  369. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  370. break;
  371. if (oldval != ceilval)
  372. {
  373. /* Reject invalid timeouts. */
  374. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  375. {
  376. result = EINVAL;
  377. goto failpp;
  378. }
  379. struct timeval tv;
  380. struct timespec rt;
  381. /* Get the current time. */
  382. (void) gettimeofday (&tv, NULL);
  383. /* Compute relative timeout. */
  384. rt.tv_sec = abstime->tv_sec - tv.tv_sec;
  385. rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
  386. if (rt.tv_nsec < 0)
  387. {
  388. rt.tv_nsec += 1000000000;
  389. --rt.tv_sec;
  390. }
  391. /* Already timed out? */
  392. if (rt.tv_sec < 0)
  393. {
  394. result = ETIMEDOUT;
  395. goto failpp;
  396. }
  397. lll_futex_timed_wait (&mutex->__data.__lock,
  398. ceilval | 2, &rt,
  399. PTHREAD_MUTEX_PSHARED (mutex));
  400. }
  401. }
  402. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  403. ceilval | 2, ceilval)
  404. != ceilval);
  405. }
  406. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  407. assert (mutex->__data.__owner == 0);
  408. mutex->__data.__count = 1;
  409. }
  410. break;
  411. default:
  412. /* Correct code cannot set any other type. */
  413. return EINVAL;
  414. }
  415. if (result == 0)
  416. {
  417. /* Record the ownership. */
  418. mutex->__data.__owner = id;
  419. ++mutex->__data.__nusers;
  420. }
  421. out:
  422. return result;
  423. }