rwlock.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /* Read-write lock implementation.
  2. Copyright (C) 1998, 2000 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Xavier Leroy <Xavier.Leroy@inria.fr>
  5. and Ulrich Drepper <drepper@cygnus.com>, 1998.
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public License as
  8. published by the Free Software Foundation; either version 2.1 of the
  9. License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; see the file COPYING.LIB. If not,
  16. write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17. Boston, MA 02111-1307, USA. */
  18. #include <errno.h>
  19. #include <pthread.h>
  20. #include <stdlib.h>
  21. #include "internals.h"
  22. #include "queue.h"
  23. #include "spinlock.h"
  24. #include "restart.h"
  25. /* Function called by pthread_cancel to remove the thread from
  26. waiting inside pthread_rwlock_timedrdlock or pthread_rwlock_timedwrlock. */
  27. static int rwlock_rd_extricate_func(void *obj, pthread_descr th)
  28. {
  29. pthread_rwlock_t *rwlock = obj;
  30. int did_remove = 0;
  31. __pthread_lock(&rwlock->__rw_lock, NULL);
  32. did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th);
  33. __pthread_unlock(&rwlock->__rw_lock);
  34. return did_remove;
  35. }
  36. static int rwlock_wr_extricate_func(void *obj, pthread_descr th)
  37. {
  38. pthread_rwlock_t *rwlock = obj;
  39. int did_remove = 0;
  40. __pthread_lock(&rwlock->__rw_lock, NULL);
  41. did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
  42. __pthread_unlock(&rwlock->__rw_lock);
  43. return did_remove;
  44. }
  45. /*
  46. * Check whether the calling thread already owns one or more read locks on the
  47. * specified lock. If so, return a pointer to the read lock info structure
  48. * corresponding to that lock.
  49. */
  50. static pthread_readlock_info *
  51. rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock)
  52. {
  53. pthread_readlock_info *info;
  54. for (info = THREAD_GETMEM (self, p_readlock_list); info != NULL;
  55. info = info->pr_next)
  56. {
  57. if (info->pr_lock == rwlock)
  58. return info;
  59. }
  60. return NULL;
  61. }
  62. /*
  63. * Add a new lock to the thread's list of locks for which it has a read lock.
  64. * A new info node must be allocated for this, which is taken from the thread's
  65. * free list, or by calling malloc. If malloc fails, a null pointer is
  66. * returned. Otherwise the lock info structure is initialized and pushed
  67. * onto the thread's list.
  68. */
  69. static pthread_readlock_info *
  70. rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
  71. {
  72. pthread_readlock_info *info = THREAD_GETMEM (self, p_readlock_free);
  73. if (info != NULL)
  74. THREAD_SETMEM (self, p_readlock_free, info->pr_next);
  75. else
  76. info = malloc(sizeof *info);
  77. if (info == NULL)
  78. return NULL;
  79. info->pr_lock_count = 1;
  80. info->pr_lock = rwlock;
  81. info->pr_next = THREAD_GETMEM (self, p_readlock_list);
  82. THREAD_SETMEM (self, p_readlock_list, info);
  83. return info;
  84. }
  85. /*
  86. * If the thread owns a read lock over the given pthread_rwlock_t,
  87. * and this read lock is tracked in the thread's lock list,
  88. * this function returns a pointer to the info node in that list.
  89. * It also decrements the lock count within that node, and if
  90. * it reaches zero, it removes the node from the list.
  91. * If nothing is found, it returns a null pointer.
  92. */
  93. static pthread_readlock_info *
  94. rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
  95. {
  96. pthread_readlock_info **pinfo;
  97. for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next)
  98. {
  99. if ((*pinfo)->pr_lock == rwlock)
  100. {
  101. pthread_readlock_info *info = *pinfo;
  102. if (--info->pr_lock_count == 0)
  103. *pinfo = info->pr_next;
  104. return info;
  105. }
  106. }
  107. return NULL;
  108. }
  109. /*
  110. * This function checks whether the conditions are right to place a read lock.
  111. * It returns 1 if so, otherwise zero. The rwlock's internal lock must be
  112. * locked upon entry.
  113. */
  114. static int
  115. rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
  116. {
  117. /* Can't readlock; it is write locked. */
  118. if (rwlock->__rw_writer != NULL)
  119. return 0;
  120. /* Lock prefers readers; get it. */
  121. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
  122. return 1;
  123. /* Lock prefers writers, but none are waiting. */
  124. if (queue_is_empty(&rwlock->__rw_write_waiting))
  125. return 1;
  126. /* Writers are waiting, but this thread already has a read lock */
  127. if (have_lock_already)
  128. return 1;
  129. /* Writers are waiting, and this is a new lock */
  130. return 0;
  131. }
  132. /*
  133. * This function helps support brain-damaged recursive read locking
  134. * semantics required by Unix 98, while maintaining write priority.
  135. * This basically determines whether this thread already holds a read lock
  136. * already. It returns 1 if so, otherwise it returns 0.
  137. *
  138. * If the thread has any ``untracked read locks'' then it just assumes
  139. * that this lock is among them, just to be safe, and returns 1.
  140. *
  141. * Also, if it finds the thread's lock in the list, it sets the pointer
  142. * referenced by pexisting to refer to the list entry.
  143. *
  144. * If the thread has no untracked locks, and the lock is not found
  145. * in its list, then it is added to the list. If this fails,
  146. * then *pout_of_mem is set to 1.
  147. */
  148. static int
  149. rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
  150. pthread_readlock_info **pexisting, int *pout_of_mem)
  151. {
  152. pthread_readlock_info *existing = NULL;
  153. int out_of_mem = 0, have_lock_already = 0;
  154. pthread_descr self = *pself;
  155. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
  156. {
  157. if (!self)
  158. *pself = self = thread_self();
  159. existing = rwlock_is_in_list(self, rwlock);
  160. if (existing != NULL
  161. || THREAD_GETMEM (self, p_untracked_readlock_count) > 0)
  162. have_lock_already = 1;
  163. else
  164. {
  165. existing = rwlock_add_to_list(self, rwlock);
  166. if (existing == NULL)
  167. out_of_mem = 1;
  168. }
  169. }
  170. *pout_of_mem = out_of_mem;
  171. *pexisting = existing;
  172. return have_lock_already;
  173. }
  174. int
  175. pthread_rwlock_init (pthread_rwlock_t *rwlock,
  176. const pthread_rwlockattr_t *attr)
  177. {
  178. __pthread_init_lock(&rwlock->__rw_lock);
  179. rwlock->__rw_readers = 0;
  180. rwlock->__rw_writer = NULL;
  181. rwlock->__rw_read_waiting = NULL;
  182. rwlock->__rw_write_waiting = NULL;
  183. if (attr == NULL)
  184. {
  185. rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
  186. rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE;
  187. }
  188. else
  189. {
  190. rwlock->__rw_kind = attr->__lockkind;
  191. rwlock->__rw_pshared = attr->__pshared;
  192. }
  193. return 0;
  194. }
  195. int
  196. pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
  197. {
  198. int readers;
  199. _pthread_descr writer;
  200. __pthread_lock (&rwlock->__rw_lock, NULL);
  201. readers = rwlock->__rw_readers;
  202. writer = rwlock->__rw_writer;
  203. __pthread_unlock (&rwlock->__rw_lock);
  204. if (readers > 0 || writer != NULL)
  205. return EBUSY;
  206. return 0;
  207. }
  208. int
  209. pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
  210. {
  211. pthread_descr self = NULL;
  212. pthread_readlock_info *existing;
  213. int out_of_mem, have_lock_already;
  214. have_lock_already = rwlock_have_already(&self, rwlock,
  215. &existing, &out_of_mem);
  216. if (self == NULL)
  217. self = thread_self ();
  218. for (;;)
  219. {
  220. __pthread_lock (&rwlock->__rw_lock, self);
  221. if (rwlock_can_rdlock(rwlock, have_lock_already))
  222. break;
  223. enqueue (&rwlock->__rw_read_waiting, self);
  224. __pthread_unlock (&rwlock->__rw_lock);
  225. suspend (self); /* This is not a cancellation point */
  226. }
  227. ++rwlock->__rw_readers;
  228. __pthread_unlock (&rwlock->__rw_lock);
  229. if (have_lock_already || out_of_mem)
  230. {
  231. if (existing != NULL)
  232. ++existing->pr_lock_count;
  233. else
  234. ++self->p_untracked_readlock_count;
  235. }
  236. return 0;
  237. }
  238. int
  239. pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
  240. const struct timespec *abstime)
  241. {
  242. pthread_descr self = NULL;
  243. pthread_readlock_info *existing;
  244. int out_of_mem, have_lock_already;
  245. pthread_extricate_if extr;
  246. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  247. return EINVAL;
  248. have_lock_already = rwlock_have_already(&self, rwlock,
  249. &existing, &out_of_mem);
  250. if (self == NULL)
  251. self = thread_self ();
  252. /* Set up extrication interface */
  253. extr.pu_object = rwlock;
  254. extr.pu_extricate_func = rwlock_rd_extricate_func;
  255. /* Register extrication interface */
  256. __pthread_set_own_extricate_if (self, &extr);
  257. for (;;)
  258. {
  259. __pthread_lock (&rwlock->__rw_lock, self);
  260. if (rwlock_can_rdlock(rwlock, have_lock_already))
  261. break;
  262. enqueue (&rwlock->__rw_read_waiting, self);
  263. __pthread_unlock (&rwlock->__rw_lock);
  264. /* This is not a cancellation point */
  265. if (timedsuspend (self, abstime) == 0)
  266. {
  267. int was_on_queue;
  268. __pthread_lock (&rwlock->__rw_lock, self);
  269. was_on_queue = remove_from_queue (&rwlock->__rw_read_waiting, self);
  270. __pthread_unlock (&rwlock->__rw_lock);
  271. if (was_on_queue)
  272. {
  273. __pthread_set_own_extricate_if (self, 0);
  274. return ETIMEDOUT;
  275. }
  276. /* Eat the outstanding restart() from the signaller */
  277. suspend (self);
  278. }
  279. }
  280. __pthread_set_own_extricate_if (self, 0);
  281. ++rwlock->__rw_readers;
  282. __pthread_unlock (&rwlock->__rw_lock);
  283. if (have_lock_already || out_of_mem)
  284. {
  285. if (existing != NULL)
  286. ++existing->pr_lock_count;
  287. else
  288. ++self->p_untracked_readlock_count;
  289. }
  290. return 0;
  291. }
  292. int
  293. pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
  294. {
  295. pthread_descr self = thread_self();
  296. pthread_readlock_info *existing;
  297. int out_of_mem, have_lock_already;
  298. int retval = EBUSY;
  299. have_lock_already = rwlock_have_already(&self, rwlock,
  300. &existing, &out_of_mem);
  301. __pthread_lock (&rwlock->__rw_lock, self);
  302. /* 0 is passed to here instead of have_lock_already.
  303. This is to meet Single Unix Spec requirements:
  304. if writers are waiting, pthread_rwlock_tryrdlock
  305. does not acquire a read lock, even if the caller has
  306. one or more read locks already. */
  307. if (rwlock_can_rdlock(rwlock, 0))
  308. {
  309. ++rwlock->__rw_readers;
  310. retval = 0;
  311. }
  312. __pthread_unlock (&rwlock->__rw_lock);
  313. if (retval == 0)
  314. {
  315. if (have_lock_already || out_of_mem)
  316. {
  317. if (existing != NULL)
  318. ++existing->pr_lock_count;
  319. else
  320. ++self->p_untracked_readlock_count;
  321. }
  322. }
  323. return retval;
  324. }
  325. int
  326. pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
  327. {
  328. pthread_descr self = thread_self ();
  329. while(1)
  330. {
  331. __pthread_lock (&rwlock->__rw_lock, self);
  332. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  333. {
  334. rwlock->__rw_writer = self;
  335. __pthread_unlock (&rwlock->__rw_lock);
  336. return 0;
  337. }
  338. /* Suspend ourselves, then try again */
  339. enqueue (&rwlock->__rw_write_waiting, self);
  340. __pthread_unlock (&rwlock->__rw_lock);
  341. suspend (self); /* This is not a cancellation point */
  342. }
  343. }
  344. int
  345. pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
  346. const struct timespec *abstime)
  347. {
  348. pthread_descr self;
  349. pthread_extricate_if extr;
  350. if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
  351. return EINVAL;
  352. self = thread_self ();
  353. /* Set up extrication interface */
  354. extr.pu_object = rwlock;
  355. extr.pu_extricate_func = rwlock_wr_extricate_func;
  356. /* Register extrication interface */
  357. __pthread_set_own_extricate_if (self, &extr);
  358. while(1)
  359. {
  360. __pthread_lock (&rwlock->__rw_lock, self);
  361. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  362. {
  363. rwlock->__rw_writer = self;
  364. __pthread_set_own_extricate_if (self, 0);
  365. __pthread_unlock (&rwlock->__rw_lock);
  366. return 0;
  367. }
  368. /* Suspend ourselves, then try again */
  369. enqueue (&rwlock->__rw_write_waiting, self);
  370. __pthread_unlock (&rwlock->__rw_lock);
  371. /* This is not a cancellation point */
  372. if (timedsuspend (self, abstime) == 0)
  373. {
  374. int was_on_queue;
  375. __pthread_lock (&rwlock->__rw_lock, self);
  376. was_on_queue = remove_from_queue (&rwlock->__rw_write_waiting, self);
  377. __pthread_unlock (&rwlock->__rw_lock);
  378. if (was_on_queue)
  379. {
  380. __pthread_set_own_extricate_if (self, 0);
  381. return ETIMEDOUT;
  382. }
  383. /* Eat the outstanding restart() from the signaller */
  384. suspend (self);
  385. }
  386. }
  387. }
  388. int
  389. pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
  390. {
  391. int result = EBUSY;
  392. __pthread_lock (&rwlock->__rw_lock, NULL);
  393. if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
  394. {
  395. rwlock->__rw_writer = thread_self ();
  396. result = 0;
  397. }
  398. __pthread_unlock (&rwlock->__rw_lock);
  399. return result;
  400. }
  401. int
  402. pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
  403. {
  404. pthread_descr torestart;
  405. pthread_descr th;
  406. __pthread_lock (&rwlock->__rw_lock, NULL);
  407. if (rwlock->__rw_writer != NULL)
  408. {
  409. /* Unlocking a write lock. */
  410. if (rwlock->__rw_writer != thread_self ())
  411. {
  412. __pthread_unlock (&rwlock->__rw_lock);
  413. return EPERM;
  414. }
  415. rwlock->__rw_writer = NULL;
  416. if ((rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
  417. && !queue_is_empty(&rwlock->__rw_read_waiting))
  418. || (th = dequeue(&rwlock->__rw_write_waiting)) == NULL)
  419. {
  420. /* Restart all waiting readers. */
  421. torestart = rwlock->__rw_read_waiting;
  422. rwlock->__rw_read_waiting = NULL;
  423. __pthread_unlock (&rwlock->__rw_lock);
  424. while ((th = dequeue (&torestart)) != NULL)
  425. restart (th);
  426. }
  427. else
  428. {
  429. /* Restart one waiting writer. */
  430. __pthread_unlock (&rwlock->__rw_lock);
  431. restart (th);
  432. }
  433. }
  434. else
  435. {
  436. /* Unlocking a read lock. */
  437. if (rwlock->__rw_readers == 0)
  438. {
  439. __pthread_unlock (&rwlock->__rw_lock);
  440. return EPERM;
  441. }
  442. --rwlock->__rw_readers;
  443. if (rwlock->__rw_readers == 0)
  444. /* Restart one waiting writer, if any. */
  445. th = dequeue (&rwlock->__rw_write_waiting);
  446. else
  447. th = NULL;
  448. __pthread_unlock (&rwlock->__rw_lock);
  449. if (th != NULL)
  450. restart (th);
  451. /* Recursive lock fixup */
  452. if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
  453. {
  454. pthread_descr self = thread_self();
  455. pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);
  456. if (victim != NULL)
  457. {
  458. if (victim->pr_lock_count == 0)
  459. {
  460. victim->pr_next = THREAD_GETMEM (self, p_readlock_free);
  461. THREAD_SETMEM (self, p_readlock_free, victim);
  462. }
  463. }
  464. else
  465. {
  466. int val = THREAD_GETMEM (self, p_untracked_readlock_count);
  467. if (val > 0)
  468. THREAD_SETMEM (self, p_untracked_readlock_count, val - 1);
  469. }
  470. }
  471. }
  472. return 0;
  473. }
  474. int
  475. pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
  476. {
  477. attr->__lockkind = 0;
  478. attr->__pshared = PTHREAD_PROCESS_PRIVATE;
  479. return 0;
  480. }
  481. int
  482. pthread_rwlockattr_destroy (pthread_rwlockattr_t *attr)
  483. {
  484. return 0;
  485. }
  486. int
  487. pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
  488. {
  489. *pshared = attr->__pshared;
  490. return 0;
  491. }
  492. int
  493. pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
  494. {
  495. if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
  496. return EINVAL;
  497. /* For now it is not possible to shared a conditional variable. */
  498. if (pshared != PTHREAD_PROCESS_PRIVATE)
  499. return ENOSYS;
  500. attr->__pshared = pshared;
  501. return 0;
  502. }
  503. int
  504. pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *attr, int *pref)
  505. {
  506. *pref = attr->__lockkind;
  507. return 0;
  508. }
  509. int
  510. pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref)
  511. {
  512. if (pref != PTHREAD_RWLOCK_PREFER_READER_NP
  513. && pref != PTHREAD_RWLOCK_PREFER_WRITER_NP
  514. && pref != PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
  515. && pref != PTHREAD_RWLOCK_DEFAULT_NP)
  516. return EINVAL;
  517. attr->__lockkind = pref;
  518. return 0;
  519. }