pthread_mutex_lock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #include <assert.h>
  17. #include <errno.h>
  18. #include <stdlib.h>
  19. #include <unistd.h>
  20. #include <not-cancel.h>
  21. #include "pthreadP.h"
  22. #include <lowlevellock.h>
  23. #ifndef LLL_MUTEX_LOCK
  24. # define LLL_MUTEX_LOCK(mutex) \
  25. lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
  26. # define LLL_MUTEX_TRYLOCK(mutex) \
  27. lll_trylock ((mutex)->__data.__lock)
  28. # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
  29. lll_robust_lock ((mutex)->__data.__lock, id, \
  30. PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
  31. #endif
  32. static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
  33. __attribute_noinline__;
  34. int
  35. #ifdef NO_INCR
  36. attribute_hidden internal_function
  37. #else
  38. attribute_protected
  39. #endif
  40. __pthread_mutex_lock (
  41. pthread_mutex_t *mutex)
  42. {
  43. assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
  44. unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
  45. if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
  46. return __pthread_mutex_lock_full (mutex);
  47. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  48. if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
  49. == PTHREAD_MUTEX_TIMED_NP)
  50. {
  51. simple:
  52. /* Normal mutex. */
  53. LLL_MUTEX_LOCK (mutex);
  54. assert (mutex->__data.__owner == 0);
  55. }
  56. else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
  57. {
  58. /* Recursive mutex. */
  59. /* Check whether we already hold the mutex. */
  60. if (mutex->__data.__owner == id)
  61. {
  62. /* Just bump the counter. */
  63. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  64. /* Overflow of the counter. */
  65. return EAGAIN;
  66. ++mutex->__data.__count;
  67. return 0;
  68. }
  69. /* We have to get the mutex. */
  70. LLL_MUTEX_LOCK (mutex);
  71. assert (mutex->__data.__owner == 0);
  72. mutex->__data.__count = 1;
  73. }
  74. else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
  75. {
  76. if (! __is_smp)
  77. goto simple;
  78. if (LLL_MUTEX_TRYLOCK (mutex) != 0)
  79. {
  80. int cnt = 0;
  81. int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
  82. mutex->__data.__spins * 2 + 10);
  83. do
  84. {
  85. if (cnt++ >= max_cnt)
  86. {
  87. LLL_MUTEX_LOCK (mutex);
  88. break;
  89. }
  90. #ifdef BUSY_WAIT_NOP
  91. BUSY_WAIT_NOP;
  92. #endif
  93. }
  94. while (LLL_MUTEX_TRYLOCK (mutex) != 0);
  95. mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
  96. }
  97. assert (mutex->__data.__owner == 0);
  98. }
  99. else
  100. {
  101. assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
  102. /* Check whether we already hold the mutex. */
  103. if (__builtin_expect (mutex->__data.__owner == id, 0))
  104. return EDEADLK;
  105. goto simple;
  106. }
  107. /* Record the ownership. */
  108. mutex->__data.__owner = id;
  109. #ifndef NO_INCR
  110. ++mutex->__data.__nusers;
  111. #endif
  112. return 0;
  113. }
  114. static int
  115. __pthread_mutex_lock_full (pthread_mutex_t *mutex)
  116. {
  117. int oldval;
  118. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  119. switch (PTHREAD_MUTEX_TYPE (mutex))
  120. {
  121. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  122. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  123. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  124. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  125. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  126. &mutex->__data.__list.__next);
  127. oldval = mutex->__data.__lock;
  128. do
  129. {
  130. again:
  131. if ((oldval & FUTEX_OWNER_DIED) != 0)
  132. {
  133. /* The previous owner died. Try locking the mutex. */
  134. int newval = id;
  135. #ifdef NO_INCR
  136. newval |= FUTEX_WAITERS;
  137. #else
  138. newval |= (oldval & FUTEX_WAITERS);
  139. #endif
  140. newval
  141. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  142. newval, oldval);
  143. if (newval != oldval)
  144. {
  145. oldval = newval;
  146. goto again;
  147. }
  148. /* We got the mutex. */
  149. mutex->__data.__count = 1;
  150. /* But it is inconsistent unless marked otherwise. */
  151. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  152. ENQUEUE_MUTEX (mutex);
  153. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  154. /* Note that we deliberately exit here. If we fall
  155. through to the end of the function __nusers would be
  156. incremented which is not correct because the old
  157. owner has to be discounted. If we are not supposed
  158. to increment __nusers we actually have to decrement
  159. it here. */
  160. #ifdef NO_INCR
  161. --mutex->__data.__nusers;
  162. #endif
  163. return EOWNERDEAD;
  164. }
  165. /* Check whether we already hold the mutex. */
  166. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  167. {
  168. int kind = PTHREAD_MUTEX_TYPE (mutex);
  169. if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
  170. {
  171. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  172. NULL);
  173. return EDEADLK;
  174. }
  175. if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
  176. {
  177. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  178. NULL);
  179. /* Just bump the counter. */
  180. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  181. /* Overflow of the counter. */
  182. return EAGAIN;
  183. ++mutex->__data.__count;
  184. return 0;
  185. }
  186. }
  187. oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
  188. if (__builtin_expect (mutex->__data.__owner
  189. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  190. {
  191. /* This mutex is now not recoverable. */
  192. mutex->__data.__count = 0;
  193. lll_unlock (mutex->__data.__lock,
  194. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  195. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  196. return ENOTRECOVERABLE;
  197. }
  198. }
  199. while ((oldval & FUTEX_OWNER_DIED) != 0);
  200. mutex->__data.__count = 1;
  201. ENQUEUE_MUTEX (mutex);
  202. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  203. break;
  204. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  205. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  206. case PTHREAD_MUTEX_PI_NORMAL_NP:
  207. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  208. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  209. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  210. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  211. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  212. {
  213. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  214. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  215. if (robust)
  216. /* Note: robust PI futexes are signaled by setting bit 0. */
  217. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  218. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  219. | 1));
  220. oldval = mutex->__data.__lock;
  221. /* Check whether we already hold the mutex. */
  222. if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
  223. {
  224. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  225. {
  226. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  227. return EDEADLK;
  228. }
  229. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  230. {
  231. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  232. /* Just bump the counter. */
  233. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  234. /* Overflow of the counter. */
  235. return EAGAIN;
  236. ++mutex->__data.__count;
  237. return 0;
  238. }
  239. }
  240. int newval = id;
  241. #ifdef NO_INCR
  242. newval |= FUTEX_WAITERS;
  243. #endif
  244. oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  245. newval, 0);
  246. if (oldval != 0)
  247. {
  248. /* The mutex is locked. The kernel will now take care of
  249. everything. */
  250. int private = (robust
  251. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  252. : PTHREAD_MUTEX_PSHARED (mutex));
  253. INTERNAL_SYSCALL_DECL (__err);
  254. int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  255. __lll_private_flag (FUTEX_LOCK_PI,
  256. private), 1, 0);
  257. if (INTERNAL_SYSCALL_ERROR_P (e, __err)
  258. && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
  259. || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
  260. {
  261. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
  262. || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
  263. && kind != PTHREAD_MUTEX_RECURSIVE_NP));
  264. /* ESRCH can happen only for non-robust PI mutexes where
  265. the owner of the lock died. */
  266. assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
  267. /* Delay the thread indefinitely. */
  268. while (1)
  269. pause_not_cancel ();
  270. }
  271. oldval = mutex->__data.__lock;
  272. assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
  273. }
  274. if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
  275. {
  276. atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
  277. /* We got the mutex. */
  278. mutex->__data.__count = 1;
  279. /* But it is inconsistent unless marked otherwise. */
  280. mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
  281. ENQUEUE_MUTEX_PI (mutex);
  282. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  283. /* Note that we deliberately exit here. If we fall
  284. through to the end of the function __nusers would be
  285. incremented which is not correct because the old owner
  286. has to be discounted. If we are not supposed to
  287. increment __nusers we actually have to decrement it here. */
  288. #ifdef NO_INCR
  289. --mutex->__data.__nusers;
  290. #endif
  291. return EOWNERDEAD;
  292. }
  293. if (robust
  294. && __builtin_expect (mutex->__data.__owner
  295. == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
  296. {
  297. /* This mutex is now not recoverable. */
  298. mutex->__data.__count = 0;
  299. INTERNAL_SYSCALL_DECL (__err);
  300. INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
  301. __lll_private_flag (FUTEX_UNLOCK_PI,
  302. PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
  303. 0, 0);
  304. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  305. return ENOTRECOVERABLE;
  306. }
  307. mutex->__data.__count = 1;
  308. if (robust)
  309. {
  310. ENQUEUE_MUTEX_PI (mutex);
  311. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  312. }
  313. }
  314. break;
  315. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  316. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  317. case PTHREAD_MUTEX_PP_NORMAL_NP:
  318. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  319. {
  320. int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
  321. oldval = mutex->__data.__lock;
  322. /* Check whether we already hold the mutex. */
  323. if (mutex->__data.__owner == id)
  324. {
  325. if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
  326. return EDEADLK;
  327. if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
  328. {
  329. /* Just bump the counter. */
  330. if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
  331. /* Overflow of the counter. */
  332. return EAGAIN;
  333. ++mutex->__data.__count;
  334. return 0;
  335. }
  336. }
  337. int oldprio = -1, ceilval;
  338. do
  339. {
  340. int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
  341. >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  342. if (__pthread_current_priority () > ceiling)
  343. {
  344. if (oldprio != -1)
  345. __pthread_tpp_change_priority (oldprio, -1);
  346. return EINVAL;
  347. }
  348. int retval = __pthread_tpp_change_priority (oldprio, ceiling);
  349. if (retval)
  350. return retval;
  351. ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  352. oldprio = ceiling;
  353. oldval
  354. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  355. #ifdef NO_INCR
  356. ceilval | 2,
  357. #else
  358. ceilval | 1,
  359. #endif
  360. ceilval);
  361. if (oldval == ceilval)
  362. break;
  363. do
  364. {
  365. oldval
  366. = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  367. ceilval | 2,
  368. ceilval | 1);
  369. if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
  370. break;
  371. if (oldval != ceilval)
  372. lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
  373. PTHREAD_MUTEX_PSHARED (mutex));
  374. }
  375. while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
  376. ceilval | 2, ceilval)
  377. != ceilval);
  378. }
  379. while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
  380. assert (mutex->__data.__owner == 0);
  381. mutex->__data.__count = 1;
  382. }
  383. break;
  384. default:
  385. /* Correct code cannot set any other type. */
  386. return EINVAL;
  387. }
  388. /* Record the ownership. */
  389. mutex->__data.__owner = id;
  390. #ifndef NO_INCR
  391. ++mutex->__data.__nusers;
  392. #endif
  393. return 0;
  394. }
  395. #ifndef __pthread_mutex_lock
  396. strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
  397. strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
  398. #endif
  399. #ifdef NO_INCR
  400. void
  401. attribute_hidden internal_function
  402. __pthread_mutex_cond_lock_adjust (
  403. pthread_mutex_t *mutex)
  404. {
  405. assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
  406. assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
  407. assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
  408. /* Record the ownership. */
  409. pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  410. mutex->__data.__owner = id;
  411. if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
  412. ++mutex->__data.__count;
  413. }
  414. #endif