pthread_mutex_unlock.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /* Copyright (C) 2002, 2003, 2005-2008, 2009 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, write to the Free
  14. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  15. 02111-1307 USA. */
  16. #include <assert.h>
  17. #include <errno.h>
  18. #include <stdlib.h>
  19. #include "pthreadP.h"
  20. #include <lowlevellock.h>
  21. static int
  22. internal_function
  23. __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
  24. __attribute_noinline__;
  25. int
  26. internal_function attribute_hidden
  27. __pthread_mutex_unlock_usercnt (
  28. pthread_mutex_t *mutex,
  29. int decr)
  30. {
  31. int type = PTHREAD_MUTEX_TYPE (mutex);
  32. if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
  33. return __pthread_mutex_unlock_full (mutex, decr);
  34. if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
  35. == PTHREAD_MUTEX_TIMED_NP)
  36. {
  37. /* Always reset the owner field. */
  38. normal:
  39. mutex->__data.__owner = 0;
  40. if (decr)
  41. /* One less user. */
  42. --mutex->__data.__nusers;
  43. /* Unlock. */
  44. lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
  45. return 0;
  46. }
  47. else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
  48. {
  49. /* Recursive mutex. */
  50. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
  51. return EPERM;
  52. if (--mutex->__data.__count != 0)
  53. /* We still hold the mutex. */
  54. return 0;
  55. goto normal;
  56. }
  57. else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
  58. goto normal;
  59. else
  60. {
  61. /* Error checking mutex. */
  62. assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
  63. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
  64. || ! lll_islocked (mutex->__data.__lock))
  65. return EPERM;
  66. goto normal;
  67. }
  68. }
  69. static int
  70. internal_function
  71. __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
  72. {
  73. int newowner = 0;
  74. switch (PTHREAD_MUTEX_TYPE (mutex))
  75. {
  76. case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
  77. /* Recursive mutex. */
  78. if ((mutex->__data.__lock & FUTEX_TID_MASK)
  79. == THREAD_GETMEM (THREAD_SELF, tid)
  80. && __builtin_expect (mutex->__data.__owner
  81. == PTHREAD_MUTEX_INCONSISTENT, 0))
  82. {
  83. if (--mutex->__data.__count != 0)
  84. /* We still hold the mutex. */
  85. return ENOTRECOVERABLE;
  86. goto notrecoverable;
  87. }
  88. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
  89. return EPERM;
  90. if (--mutex->__data.__count != 0)
  91. /* We still hold the mutex. */
  92. return 0;
  93. goto robust;
  94. case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
  95. case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
  96. case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
  97. if ((mutex->__data.__lock & FUTEX_TID_MASK)
  98. != THREAD_GETMEM (THREAD_SELF, tid)
  99. || ! lll_islocked (mutex->__data.__lock))
  100. return EPERM;
  101. /* If the previous owner died and the caller did not succeed in
  102. making the state consistent, mark the mutex as unrecoverable
  103. and make all waiters. */
  104. if (__builtin_expect (mutex->__data.__owner
  105. == PTHREAD_MUTEX_INCONSISTENT, 0))
  106. notrecoverable:
  107. newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
  108. robust:
  109. /* Remove mutex from the list. */
  110. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  111. &mutex->__data.__list.__next);
  112. DEQUEUE_MUTEX (mutex);
  113. mutex->__data.__owner = newowner;
  114. if (decr)
  115. /* One less user. */
  116. --mutex->__data.__nusers;
  117. /* Unlock. */
  118. lll_robust_unlock (mutex->__data.__lock,
  119. PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
  120. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  121. break;
  122. case PTHREAD_MUTEX_PI_RECURSIVE_NP:
  123. /* Recursive mutex. */
  124. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
  125. return EPERM;
  126. if (--mutex->__data.__count != 0)
  127. /* We still hold the mutex. */
  128. return 0;
  129. goto continue_pi_non_robust;
  130. case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
  131. /* Recursive mutex. */
  132. if ((mutex->__data.__lock & FUTEX_TID_MASK)
  133. == THREAD_GETMEM (THREAD_SELF, tid)
  134. && __builtin_expect (mutex->__data.__owner
  135. == PTHREAD_MUTEX_INCONSISTENT, 0))
  136. {
  137. if (--mutex->__data.__count != 0)
  138. /* We still hold the mutex. */
  139. return ENOTRECOVERABLE;
  140. goto pi_notrecoverable;
  141. }
  142. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
  143. return EPERM;
  144. if (--mutex->__data.__count != 0)
  145. /* We still hold the mutex. */
  146. return 0;
  147. goto continue_pi_robust;
  148. case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
  149. case PTHREAD_MUTEX_PI_NORMAL_NP:
  150. case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
  151. case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
  152. case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
  153. case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
  154. if ((mutex->__data.__lock & FUTEX_TID_MASK)
  155. != THREAD_GETMEM (THREAD_SELF, tid)
  156. || ! lll_islocked (mutex->__data.__lock))
  157. return EPERM;
  158. /* If the previous owner died and the caller did not succeed in
  159. making the state consistent, mark the mutex as unrecoverable
  160. and make all waiters. */
  161. if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
  162. && __builtin_expect (mutex->__data.__owner
  163. == PTHREAD_MUTEX_INCONSISTENT, 0))
  164. pi_notrecoverable:
  165. newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
  166. if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
  167. {
  168. continue_pi_robust:
  169. /* Remove mutex from the list.
  170. Note: robust PI futexes are signaled by setting bit 0. */
  171. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
  172. (void *) (((uintptr_t) &mutex->__data.__list.__next)
  173. | 1));
  174. DEQUEUE_MUTEX (mutex);
  175. }
  176. continue_pi_non_robust:
  177. mutex->__data.__owner = newowner;
  178. if (decr)
  179. /* One less user. */
  180. --mutex->__data.__nusers;
  181. /* Unlock. */
  182. if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
  183. || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
  184. THREAD_GETMEM (THREAD_SELF,
  185. tid)))
  186. {
  187. int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
  188. int private = (robust
  189. ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
  190. : PTHREAD_MUTEX_PSHARED (mutex));
  191. INTERNAL_SYSCALL_DECL (__err);
  192. INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
  193. __lll_private_flag (FUTEX_UNLOCK_PI, private));
  194. }
  195. THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
  196. break;
  197. case PTHREAD_MUTEX_PP_RECURSIVE_NP:
  198. /* Recursive mutex. */
  199. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
  200. return EPERM;
  201. if (--mutex->__data.__count != 0)
  202. /* We still hold the mutex. */
  203. return 0;
  204. goto pp;
  205. case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
  206. /* Error checking mutex. */
  207. if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
  208. || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
  209. return EPERM;
  210. /* FALLTHROUGH */
  211. case PTHREAD_MUTEX_PP_NORMAL_NP:
  212. case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
  213. /* Always reset the owner field. */
  214. pp:
  215. mutex->__data.__owner = 0;
  216. if (decr)
  217. /* One less user. */
  218. --mutex->__data.__nusers;
  219. /* Unlock. */
  220. int newval, oldval;
  221. do
  222. {
  223. oldval = mutex->__data.__lock;
  224. newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
  225. }
  226. while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
  227. newval, oldval));
  228. if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
  229. lll_futex_wake (&mutex->__data.__lock, 1,
  230. PTHREAD_MUTEX_PSHARED (mutex));
  231. int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
  232. return __pthread_tpp_change_priority (oldprio, -1);
  233. default:
  234. /* Correct code cannot set any other type. */
  235. return EINVAL;
  236. }
  237. return 0;
  238. }
  239. int
  240. __pthread_mutex_unlock (
  241. pthread_mutex_t *mutex)
  242. {
  243. return __pthread_mutex_unlock_usercnt (mutex, 1);
  244. }
  245. strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
  246. strong_alias (__pthread_mutex_unlock, __pthread_mutex_unlock_internal)