rwlock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /* Read-write lock implementation.
  2. Copyright (C) 1998, 2000 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Xavier Leroy <Xavier.Leroy@inria.fr>
  5. and Ulrich Drepper <drepper@cygnus.com>, 1998.
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public License as
  8. published by the Free Software Foundation; either version 2.1 of the
  9. License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; see the file COPYING.LIB. If
  16. not, see <http://www.gnu.org/licenses/>. */
  17. #include <bits/libc-lock.h>
  18. #include <errno.h>
  19. #include <pthread.h>
  20. #include <stdlib.h>
  21. #include "internals.h"
  22. #include "queue.h"
  23. #include "spinlock.h"
  24. #include "restart.h"
  25. /* Function called by pthread_cancel to remove the thread from
  26. waiting inside pthread_rwlock_timedrdlock or pthread_rwlock_timedwrlock. */
  27. static int rwlock_rd_extricate_func(void *obj, pthread_descr th)
  28. {
  29. pthread_rwlock_t *rwlock = obj;
  30. int did_remove = 0;
  31. __pthread_lock(&rwlock->__rw_lock, NULL);
  32. did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th);
  33. __pthread_unlock(&rwlock->__rw_lock);
  34. return did_remove;
  35. }
  36. static int rwlock_wr_extricate_func(void *obj, pthread_descr th)
  37. {
  38. pthread_rwlock_t *rwlock = obj;
  39. int did_remove = 0;
  40. __pthread_lock(&rwlock->__rw_lock, NULL);
  41. did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
  42. __pthread_unlock(&rwlock->__rw_lock);
  43. return did_remove;
  44. }
  45. /*
  46. * Check whether the calling thread already owns one or more read locks on the
  47. * specified lock. If so, return a pointer to the read lock info structure
  48. * corresponding to that lock.
  49. */
  50. static pthread_readlock_info *
  51. rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock)
  52. {
  53. pthread_readlock_info *info;
  54. for (info = THREAD_GETMEM (self, p_readlock_list); info != NULL;
  55. info = info->pr_next)
  56. {
  57. if (info->pr_lock == rwlock)
  58. return info;
  59. }
  60. return NULL;
  61. }
  62. /*
  63. * Add a new lock to the thread's list of locks for which it has a read lock.
  64. * A new info node must be allocated for this, which is taken from the thread's
  65. * free list, or by calling malloc. If malloc fails, a null pointer is
  66. * returned. Otherwise the lock info structure is initialized and pushed
  67. * onto the thread's list.
  68. */
  69. static pthread_readlock_info *
  70. rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
  71. {
  72. pthread_readlock_info *info = THREAD_GETMEM (self, p_readlock_free);
  73. if (info != NULL)
  74. THREAD_SETMEM (self, p_readlock_free, info->pr_next);
  75. else
  76. info = malloc(sizeof *info);
  77. if (info == NULL)
  78. return NULL;
  79. info->pr_lock_count = 1;
  80. info->pr_lock = rwlock;
  81. info->pr_next = THREAD_GETMEM (self, p_readlock_list);
  82. THREAD_SETMEM (self, p_readlock_list, info);
  83. return info;
  84. }
  85. /*
  86. * If the thread owns a read lock over the given pthread_rwlock_t,
  87. * and this read lock is tracked in the thread's lock list,
  88. * this function returns a pointer to the info node in that list.
  89. * It also decrements the lock count within that node, and if
  90. * it reaches zero, it removes the node from the list.
  91. * If nothing is found, it returns a null pointer.
  92. */
  93. static pthread_readlock_info *
  94. rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
  95. {
  96. pthread_readlock_info **pinfo;
  97. for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next)
  98. {
  99. if ((*pinfo)->pr_lock == rwlock)
  100. {
  101. pthread_readlock_info *info = *pinfo;
  102. if (--info->pr_lock_count == 0)
  103. *pinfo = info->pr_next;
  104. return info;
  105. }
  106. }
  107. return NULL;
  108. }
  109. /*
  110. * This function checks whether the conditions are right to place a read lock.
  111. * It returns 1 if so, otherwise zero. The rwlock's internal lock must be
  112. * locked upon entry.
  113. */
  114. static int
  115. rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
  116. {
  117. /* Can't readlock; it is write locked. */
  118. if (rwlock->__rw_writer != NULL)
  119. return 0;
  120. /* Lock prefers readers; get it. */
  121. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
  122. return 1;
  123. /* Lock prefers writers, but none are waiting. */
  124. if (queue_is_empty(&rwlock->__rw_write_waiting))
  125. return 1;
  126. /* Writers are waiting, but this thread already has a read lock */
  127. if (have_lock_already)
  128. return 1;
  129. /* Writers are waiting, and this is a new lock */
  130. return 0;
  131. }
  132. /*
  133. * This function helps support brain-damaged recursive read locking
  134. * semantics required by Unix 98, while maintaining write priority.
  135. * This basically determines whether this thread already holds a read lock
  136. * already. It returns 1 if so, otherwise it returns 0.
  137. *
  138. * If the thread has any ``untracked read locks'' then it just assumes
  139. * that this lock is among them, just to be safe, and returns 1.
  140. *
  141. * Also, if it finds the thread's lock in the list, it sets the pointer
  142. * referenced by pexisting to refer to the list entry.
  143. *
  144. * If the thread has no untracked locks, and the lock is not found
  145. * in its list, then it is added to the list. If this fails,
  146. * then *pout_of_mem is set to 1.
  147. */
  148. static int
  149. rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
  150. pthread_readlock_info **pexisting, int *pout_of_mem)
  151. {
  152. pthread_readlock_info *existing = NULL;
  153. int out_of_mem = 0, have_lock_already = 0;
  154. pthread_descr self = *pself;
  155. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
  156. {
  157. if (!self)
  158. *pself = self = thread_self();
  159. existing = rwlock_is_in_list(self, rwlock);
  160. if (existing != NULL
  161. || THREAD_GETMEM (self, p_untracked_readlock_count) > 0)
  162. have_lock_already = 1;
  163. else
  164. {
  165. existing = rwlock_add_to_list(self, rwlock);
  166. if (existing == NULL)
  167. out_of_mem = 1;
  168. }
  169. }
  170. *pout_of_mem = out_of_mem;
  171. *pexisting = existing;
  172. return have_lock_already;
  173. }
  174. int
  175. __pthread_rwlock_init (pthread_rwlock_t *rwlock,
  176. const pthread_rwlockattr_t *attr)
  177. {
  178. __pthread_init_lock(&rwlock->__rw_lock);
  179. rwlock->__rw_readers = 0;
  180. rwlock->__rw_writer = NULL;
  181. rwlock->__rw_read_waiting = NULL;
  182. rwlock->__rw_write_waiting = NULL;
  183. if (attr == NULL)
  184. {
  185. rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
  186. rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE;
  187. }
  188. else
  189. {
  190. rwlock->__rw_kind = attr->__lockkind;
  191. rwlock->__rw_pshared = attr->__pshared;
  192. }
  193. return 0;
  194. }
  195. strong_alias (__pthread_rwlock_init, pthread_rwlock_init)
  196. int
  197. __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
  198. {
  199. int readers;
  200. _pthread_descr writer;
  201. __pthread_lock (&rwlock->__rw_lock, NULL);
  202. readers = rwlock->__rw_readers;
  203. writer = rwlock->__rw_writer;
  204. __pthread_unlock (&rwlock->__rw_lock);
  205. if (readers > 0 || writer != NULL)
  206. return EBUSY;
  207. return 0;
  208. }
  209. strong_alias (__pthread_rwlock_destroy, pthread_rwlock_destroy)
  210. int
  211. __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
  212. {
  213. pthread_descr self = NULL;
  214. pthread_readlock_info *existing;
  215. int out_of_mem, have_lock_already;
  216. have_lock_already = rwlock_have_already(&self, rwlock,
  217. &existing, &out_of_mem);
  218. if (self == NULL)
  219. self = thread_self ();
  220. for (;;)
  221. {
  222. __pthread_lock (&rwlock->__rw_lock, self);
  223. if (rwlock_can_rdlock(rwlock, have_lock_already))
  224. break;
  225. enqueue (&rwlock->__rw_read_waiting, self);
  226. __pthread_unlock (&rwlock->__rw_lock);
  227. suspend (self); /* This is not a cancellation point */
  228. }
  229. ++rwlock->__rw_readers;
  230. __pthread_unlock (&rwlock->__rw_lock);
  231. if (have_lock_already || out_of_mem)
  232. {
  233. if (existing != NULL)
  234. ++existing->pr_lock_count;
  235. else
  236. ++self->p_untracked_readlock_count;
  237. }
  238. return 0;
  239. }
  240. strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
  241. int
  242. __pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
  243. const struct timespec *abstime)
  244. {
  245. pthread_descr self = NULL;
  246. pthread_readlock_info *existing;
  247. int out_of_mem, have_lock_already;
  248. pthread_extricate_if extr;
  249. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  250. return EINVAL;
  251. have_lock_already = rwlock_have_already(&self, rwlock,
  252. &existing, &out_of_mem);
  253. if (self == NULL)
  254. self = thread_self ();
  255. /* Set up extrication interface */
  256. extr.pu_object = rwlock;
  257. extr.pu_extricate_func = rwlock_rd_extricate_func;
  258. /* Register extrication interface */
  259. __pthread_set_own_extricate_if (self, &extr);
  260. for (;;)
  261. {
  262. __pthread_lock (&rwlock->__rw_lock, self);
  263. if (rwlock_can_rdlock(rwlock, have_lock_already))
  264. break;
  265. enqueue (&rwlock->__rw_read_waiting, self);
  266. __pthread_unlock (&rwlock->__rw_lock);
  267. /* This is not a cancellation point */
  268. if (timedsuspend (self, abstime) == 0)
  269. {
  270. int was_on_queue;
  271. __pthread_lock (&rwlock->__rw_lock, self);
  272. was_on_queue = remove_from_queue (&rwlock->__rw_read_waiting, self);
  273. __pthread_unlock (&rwlock->__rw_lock);
  274. if (was_on_queue)
  275. {
  276. __pthread_set_own_extricate_if (self, 0);
  277. return ETIMEDOUT;
  278. }
  279. /* Eat the outstanding restart() from the signaller */
  280. suspend (self);
  281. }
  282. }
  283. __pthread_set_own_extricate_if (self, 0);
  284. ++rwlock->__rw_readers;
  285. __pthread_unlock (&rwlock->__rw_lock);
  286. if (have_lock_already || out_of_mem)
  287. {
  288. if (existing != NULL)
  289. ++existing->pr_lock_count;
  290. else
  291. ++self->p_untracked_readlock_count;
  292. }
  293. return 0;
  294. }
  295. strong_alias (__pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock)
  296. int
  297. __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
  298. {
  299. pthread_descr self = thread_self();
  300. pthread_readlock_info *existing;
  301. int out_of_mem, have_lock_already;
  302. int retval = EBUSY;
  303. have_lock_already = rwlock_have_already(&self, rwlock,
  304. &existing, &out_of_mem);
  305. __pthread_lock (&rwlock->__rw_lock, self);
  306. /* 0 is passed to here instead of have_lock_already.
  307. This is to meet Single Unix Spec requirements:
  308. if writers are waiting, pthread_rwlock_tryrdlock
  309. does not acquire a read lock, even if the caller has
  310. one or more read locks already. */
  311. if (rwlock_can_rdlock(rwlock, 0))
  312. {
  313. ++rwlock->__rw_readers;
  314. retval = 0;
  315. }
  316. __pthread_unlock (&rwlock->__rw_lock);
  317. if (retval == 0)
  318. {
  319. if (have_lock_already || out_of_mem)
  320. {
  321. if (existing != NULL)
  322. ++existing->pr_lock_count;
  323. else
  324. ++self->p_untracked_readlock_count;
  325. }
  326. }
  327. return retval;
  328. }
  329. strong_alias (__pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock)
  330. int
  331. __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
  332. {
  333. pthread_descr self = thread_self ();
  334. while(1)
  335. {
  336. __pthread_lock (&rwlock->__rw_lock, self);
  337. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  338. {
  339. rwlock->__rw_writer = self;
  340. __pthread_unlock (&rwlock->__rw_lock);
  341. return 0;
  342. }
  343. /* Suspend ourselves, then try again */
  344. enqueue (&rwlock->__rw_write_waiting, self);
  345. __pthread_unlock (&rwlock->__rw_lock);
  346. suspend (self); /* This is not a cancellation point */
  347. }
  348. }
  349. strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
  350. int
  351. __pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
  352. const struct timespec *abstime)
  353. {
  354. pthread_descr self;
  355. pthread_extricate_if extr;
  356. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  357. return EINVAL;
  358. self = thread_self ();
  359. /* Set up extrication interface */
  360. extr.pu_object = rwlock;
  361. extr.pu_extricate_func = rwlock_wr_extricate_func;
  362. /* Register extrication interface */
  363. __pthread_set_own_extricate_if (self, &extr);
  364. while(1)
  365. {
  366. __pthread_lock (&rwlock->__rw_lock, self);
  367. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  368. {
  369. rwlock->__rw_writer = self;
  370. __pthread_set_own_extricate_if (self, 0);
  371. __pthread_unlock (&rwlock->__rw_lock);
  372. return 0;
  373. }
  374. /* Suspend ourselves, then try again */
  375. enqueue (&rwlock->__rw_write_waiting, self);
  376. __pthread_unlock (&rwlock->__rw_lock);
  377. /* This is not a cancellation point */
  378. if (timedsuspend (self, abstime) == 0)
  379. {
  380. int was_on_queue;
  381. __pthread_lock (&rwlock->__rw_lock, self);
  382. was_on_queue = remove_from_queue (&rwlock->__rw_write_waiting, self);
  383. __pthread_unlock (&rwlock->__rw_lock);
  384. if (was_on_queue)
  385. {
  386. __pthread_set_own_extricate_if (self, 0);
  387. return ETIMEDOUT;
  388. }
  389. /* Eat the outstanding restart() from the signaller */
  390. suspend (self);
  391. }
  392. }
  393. }
  394. strong_alias (__pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock)
  395. int
  396. __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
  397. {
  398. int result = EBUSY;
  399. __pthread_lock (&rwlock->__rw_lock, NULL);
  400. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  401. {
  402. rwlock->__rw_writer = thread_self ();
  403. result = 0;
  404. }
  405. __pthread_unlock (&rwlock->__rw_lock);
  406. return result;
  407. }
  408. strong_alias (__pthread_rwlock_trywrlock, pthread_rwlock_trywrlock)
  409. int
  410. __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
  411. {
  412. pthread_descr torestart;
  413. pthread_descr th;
  414. __pthread_lock (&rwlock->__rw_lock, NULL);
  415. if (rwlock->__rw_writer != NULL)
  416. {
  417. /* Unlocking a write lock. */
  418. if (rwlock->__rw_writer != thread_self ())
  419. {
  420. __pthread_unlock (&rwlock->__rw_lock);
  421. return EPERM;
  422. }
  423. rwlock->__rw_writer = NULL;
  424. if ((rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
  425. && !queue_is_empty(&rwlock->__rw_read_waiting))
  426. || (th = dequeue(&rwlock->__rw_write_waiting)) == NULL)
  427. {
  428. /* Restart all waiting readers. */
  429. torestart = rwlock->__rw_read_waiting;
  430. rwlock->__rw_read_waiting = NULL;
  431. __pthread_unlock (&rwlock->__rw_lock);
  432. while ((th = dequeue (&torestart)) != NULL)
  433. restart (th);
  434. }
  435. else
  436. {
  437. /* Restart one waiting writer. */
  438. __pthread_unlock (&rwlock->__rw_lock);
  439. restart (th);
  440. }
  441. }
  442. else
  443. {
  444. /* Unlocking a read lock. */
  445. if (rwlock->__rw_readers == 0)
  446. {
  447. __pthread_unlock (&rwlock->__rw_lock);
  448. return EPERM;
  449. }
  450. --rwlock->__rw_readers;
  451. if (rwlock->__rw_readers == 0)
  452. /* Restart one waiting writer, if any. */
  453. th = dequeue (&rwlock->__rw_write_waiting);
  454. else
  455. th = NULL;
  456. __pthread_unlock (&rwlock->__rw_lock);
  457. if (th != NULL)
  458. restart (th);
  459. /* Recursive lock fixup */
  460. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
  461. {
  462. pthread_descr self = thread_self();
  463. pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);
  464. if (victim != NULL)
  465. {
  466. if (victim->pr_lock_count == 0)
  467. {
  468. victim->pr_next = THREAD_GETMEM (self, p_readlock_free);
  469. THREAD_SETMEM (self, p_readlock_free, victim);
  470. }
  471. }
  472. else
  473. {
  474. int val = THREAD_GETMEM (self, p_untracked_readlock_count);
  475. if (val > 0)
  476. THREAD_SETMEM (self, p_untracked_readlock_count, val - 1);
  477. }
  478. }
  479. }
  480. return 0;
  481. }
  482. strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
  483. int
  484. pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
  485. {
  486. attr->__lockkind = 0;
  487. attr->__pshared = PTHREAD_PROCESS_PRIVATE;
  488. return 0;
  489. }
  490. int
  491. __pthread_rwlockattr_destroy (pthread_rwlockattr_t *attr)
  492. {
  493. return 0;
  494. }
  495. strong_alias (__pthread_rwlockattr_destroy, pthread_rwlockattr_destroy)
  496. int
  497. pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
  498. {
  499. *pshared = attr->__pshared;
  500. return 0;
  501. }
  502. int
  503. pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
  504. {
  505. if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
  506. return EINVAL;
  507. /* For now it is not possible to shared a conditional variable. */
  508. if (pshared != PTHREAD_PROCESS_PRIVATE)
  509. return ENOSYS;
  510. attr->__pshared = pshared;
  511. return 0;
  512. }
  513. int
  514. pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *attr, int *pref)
  515. {
  516. *pref = attr->__lockkind;
  517. return 0;
  518. }
  519. int
  520. pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref)
  521. {
  522. if (pref != PTHREAD_RWLOCK_PREFER_READER_NP
  523. && pref != PTHREAD_RWLOCK_PREFER_WRITER_NP
  524. && pref != PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
  525. && pref != PTHREAD_RWLOCK_DEFAULT_NP)
  526. return EINVAL;
  527. attr->__lockkind = pref;
  528. return 0;
  529. }