pthread_mutex_timedlock.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <assert.h>
  16. #include <errno.h>
  17. #include <time.h>
  18. #include "pthreadP.h"
  19. #include <lowlevellock.h>
  20. #include <not-cancel.h>
  21. #if defined(__UCLIBC_USE_TIME64__)
  22. #include "internal/time64_helpers.h"
  23. #endif
  24. /* We need to build this function with optimization to avoid
  25. * lll_timedlock erroring out with
  26. * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
  27. */
  28. int
  29. #ifndef __OPTIMIZE__
  30. attribute_optimize("Os")
  31. #endif
  32. pthread_mutex_timedlock (
  33. pthread_mutex_t *mutex,
  34. const struct timespec *abstime)
  35. {
  36. int oldval;
  37. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  38. int result = 0;
  39. /* We must not check ABSTIME here. If the thread does not block
  40. abstime must not be checked for a valid value. */
  41. switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
  42. PTHREAD_MUTEX_TIMED_NP))
  43. {
  44. /* Recursive mutex. */
  45. case PTHREAD_MUTEX_RECURSIVE_NP:
  46. /* Check whether we already hold the mutex. */
  47. if (mutex->__data.__owner == id)
  48. {
  49. /* Just bump the counter. */
  50. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  51. /* Overflow of the counter. */
  52. return EAGAIN;
  53. ++mutex->__data.__count;
  54. goto out;
  55. }
  56. /* We have to get the mutex. */
  57. result = lll_timedlock (mutex->__data.__lock, abstime,
  58. PTHREAD_MUTEX_PSHARED (mutex));
  59. if (result != 0)
  60. goto out;
  61. /* Only locked once so far. */
  62. mutex->__data.__count = 1;
  63. break;
  64. /* Error checking mutex. */
  65. case PTHREAD_MUTEX_ERRORCHECK_NP:
  66. /* Check whether we already hold the mutex. */
  67. if (__builtin_expect (mutex->__data.__owner == id, 0))
  68. return EDEADLK;
  69. /* FALLTHROUGH */
  70. case PTHREAD_MUTEX_TIMED_NP:
  71. simple:
  72. /* Normal mutex. */
  73. result = lll_timedlock (mutex->__data.__lock, abstime,
  74. PTHREAD_MUTEX_PSHARED (mutex));
  75. break;
  76. case PTHREAD_MUTEX_ADAPTIVE_NP:
  77. if (! __is_smp)
  78. goto simple;
  79. if (lll_trylock (mutex->__data.__lock) != 0)
  80. {
  81. int cnt = 0;
  82. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  83. mutex->__data.__spins * 2 + 10);
  84. do
  85. {
  86. if (cnt++ >= max_cnt)
  87. {
  88. result = lll_timedlock (mutex->__data.__lock, abstime,
  89. PTHREAD_MUTEX_PSHARED (mutex));
  90. break;
  91. }
  92. #ifdef BUSY_WAIT_NOP
  93. BUSY_WAIT_NOP;
  94. #endif
  95. }
  96. while (lll_trylock (mutex->__data.__lock) != 0);
  97. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  98. }
  99. break;
  100. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  101. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  102. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  103. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  104. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  105. &mutex->__data.__list.__next);
  106. oldval = mutex->__data.__lock;
  107. do
  108. {
  109. again:
  110. if ((oldval & FUTEX_OWNER_DIED) != 0)
  111. {
  112. /* The previous owner died. Try locking the mutex. */
  113. int newval = id | (oldval & FUTEX_WAITERS);
  114. newval
  115. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  116. newval, oldval);
  117. if (newval != oldval)
  118. {
  119. oldval = newval;
  120. goto again;
  121. }
  122. /* We got the mutex. */
  123. mutex->__data.__count = 1;
  124. /* But it is inconsistent unless marked otherwise. */
  125. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  126. ENQUEUE_MUTEX (mutex);
  127. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  128. /* Note that we deliberately exit here. If we fall
  129. through to the end of the function __nusers would be
  130. incremented which is not correct because the old
  131. owner has to be discounted. */
  132. return EOWNERDEAD;
  133. }
  134. /* Check whether we already hold the mutex. */
  135. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  136. {
  137. int kind = PTHREAD_MUTEX_TYPE (mutex);
  138. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  139. {
  140. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  141. NULL);
  142. return EDEADLK;
  143. }
  144. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  145. {
  146. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  147. NULL);
  148. /* Just bump the counter. */
  149. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  150. /* Overflow of the counter. */
  151. return EAGAIN;
  152. ++mutex->__data.__count;
  153. return 0;
  154. }
  155. }
  156. result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
  157. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  158. if (__builtin_expect (mutex->__data.__owner
  159. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  160. {
  161. /* This mutex is now not recoverable. */
  162. mutex->__data.__count = 0;
  163. lll_unlock (mutex->__data.__lock,
  164. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  165. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  166. return ENOTRECOVERABLE;
  167. }
  168. if (result == ETIMEDOUT || result == EINVAL)
  169. goto out;
  170. oldval = result;
  171. }
  172. while ((oldval & FUTEX_OWNER_DIED) != 0);
  173. mutex->__data.__count = 1;
  174. ENQUEUE_MUTEX (mutex);
  175. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  176. break;
  177. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  178. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  179. case PTHREAD_MUTEX_PI_NORMAL_NP:
  180. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  181. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  182. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  183. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  184. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  185. {
  186. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  187. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  188. if (robust)
  189. /* Note: robust PI futexes are signaled by setting bit 0. */
  190. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  191. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  192. | 1));
  193. oldval = mutex->__data.__lock;
  194. /* Check whether we already hold the mutex. */
  195. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  196. {
  197. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  198. {
  199. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  200. return EDEADLK;
  201. }
  202. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  203. {
  204. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  205. /* Just bump the counter. */
  206. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  207. /* Overflow of the counter. */
  208. return EAGAIN;
  209. ++mutex->__data.__count;
  210. return 0;
  211. }
  212. }
  213. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  214. id, 0);
  215. if (oldval != 0)
  216. {
  217. /* The mutex is locked. The kernel will now take care of
  218. everything. The timeout value must be a relative value.
  219. Convert it. */
  220. int private = (robust
  221. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  222. : PTHREAD_MUTEX_PSHARED (mutex));
  223. INTERNAL_SYSCALL_DECL (__err);
  224. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
  225. int e = INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
  226. __lll_private_flag (FUTEX_LOCK_PI,
  227. private), 1,
  228. TO_TS64_P(abstime));
  229. #else
  230. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  231. __lll_private_flag (FUTEX_LOCK_PI,
  232. private), 1,
  233. abstime);
  234. #endif
  235. if (INTERNAL_SYSCALL_ERROR_P (e, __err))
  236. {
  237. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
  238. return ETIMEDOUT;
  239. if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  240. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
  241. {
  242. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  243. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  244. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  245. /* ESRCH can happen only for non-robust PI mutexes where
  246. the owner of the lock died. */
  247. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
  248. || !robust);
  249. /* Delay the thread until the timeout is reached.
  250. Then return ETIMEDOUT. */
  251. struct timespec reltime;
  252. #if defined(__UCLIBC_USE_TIME64__)
  253. struct __ts64_struct __now64;
  254. #endif
  255. struct timespec now = {.tv_sec = 0, .tv_nsec = 0};
  256. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_clock_gettime64)
  257. int __r = INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
  258. &__now64);
  259. if (__r == 0) {
  260. now.tv_sec = __now64.tv_sec;
  261. now.tv_nsec = __now64.tv_nsec;
  262. }
  263. #else
  264. INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
  265. &now);
  266. #endif
  267. reltime.tv_sec = abstime->tv_sec - now.tv_sec;
  268. reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
  269. if (reltime.tv_nsec < 0)
  270. {
  271. reltime.tv_nsec += 1000000000;
  272. --reltime.tv_sec;
  273. }
  274. if (reltime.tv_sec >= 0)
  275. while (nanosleep_not_cancel (&reltime, &reltime) != 0)
  276. continue;
  277. return ETIMEDOUT;
  278. }
  279. return INTERNAL_SYSCALL_ERRNO (e, __err);
  280. }
  281. oldval = mutex->__data.__lock;
  282. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  283. }
  284. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  285. {
  286. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  287. /* We got the mutex. */
  288. mutex->__data.__count = 1;
  289. /* But it is inconsistent unless marked otherwise. */
  290. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  291. ENQUEUE_MUTEX_PI (mutex);
  292. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  293. /* Note that we deliberately exit here. If we fall
  294. through to the end of the function __nusers would be
  295. incremented which is not correct because the old owner
  296. has to be discounted. */
  297. return EOWNERDEAD;
  298. }
  299. if (robust
  300. && __builtin_expect (mutex->__data.__owner
  301. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  302. {
  303. /* This mutex is now not recoverable. */
  304. mutex->__data.__count = 0;
  305. INTERNAL_SYSCALL_DECL (__err);
  306. #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
  307. INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
  308. __lll_private_flag (FUTEX_UNLOCK_PI,
  309. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  310. 0, 0);
  311. #else
  312. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  313. __lll_private_flag (FUTEX_UNLOCK_PI,
  314. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  315. 0, 0);
  316. #endif
  317. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  318. return ENOTRECOVERABLE;
  319. }
  320. mutex->__data.__count = 1;
  321. if (robust)
  322. {
  323. ENQUEUE_MUTEX_PI (mutex);
  324. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  325. }
  326. }
  327. break;
  328. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  329. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  330. case PTHREAD_MUTEX_PP_NORMAL_NP:
  331. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  332. {
  333. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  334. oldval = mutex->__data.__lock;
  335. /* Check whether we already hold the mutex. */
  336. if (mutex->__data.__owner == id)
  337. {
  338. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  339. return EDEADLK;
  340. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  341. {
  342. /* Just bump the counter. */
  343. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  344. /* Overflow of the counter. */
  345. return EAGAIN;
  346. ++mutex->__data.__count;
  347. return 0;
  348. }
  349. }
  350. int oldprio = -1, ceilval;
  351. do
  352. {
  353. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  354. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  355. if (__pthread_current_priority () > ceiling)
  356. {
  357. result = EINVAL;
  358. failpp:
  359. if (oldprio != -1)
  360. __pthread_tpp_change_priority (oldprio, -1);
  361. return result;
  362. }
  363. result = __pthread_tpp_change_priority (oldprio, ceiling);
  364. if (result)
  365. return result;
  366. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  367. oldprio = ceiling;
  368. oldval
  369. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  370. ceilval | 1, ceilval);
  371. if (oldval == ceilval)
  372. break;
  373. do
  374. {
  375. oldval
  376. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  377. ceilval | 2,
  378. ceilval | 1);
  379. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  380. break;
  381. if (oldval != ceilval)
  382. {
  383. /* Reject invalid timeouts. */
  384. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  385. {
  386. result = EINVAL;
  387. goto failpp;
  388. }
  389. struct timeval tv;
  390. struct timespec rt;
  391. /* Get the current time. */
  392. (void) gettimeofday (&tv, NULL);
  393. /* Compute relative timeout. */
  394. rt.tv_sec = abstime->tv_sec - tv.tv_sec;
  395. rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
  396. if (rt.tv_nsec < 0)
  397. {
  398. rt.tv_nsec += 1000000000;
  399. --rt.tv_sec;
  400. }
  401. /* Already timed out? */
  402. if (rt.tv_sec < 0)
  403. {
  404. result = ETIMEDOUT;
  405. goto failpp;
  406. }
  407. lll_futex_timed_wait (&mutex->__data.__lock,
  408. ceilval | 2, &rt,
  409. PTHREAD_MUTEX_PSHARED (mutex));
  410. }
  411. }
  412. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  413. ceilval | 2, ceilval)
  414. != ceilval);
  415. }
  416. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  417. assert (mutex->__data.__owner == 0);
  418. mutex->__data.__count = 1;
  419. }
  420. break;
  421. default:
  422. /* Correct code cannot set any other type. */
  423. return EINVAL;
  424. }
  425. if (result == 0)
  426. {
  427. /* Record the ownership. */
  428. mutex->__data.__owner = id;
  429. ++mutex->__data.__nusers;
  430. }
  431. out:
  432. return result;
  433. }