spinlock.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /* Linuxthreads - a simple clone()-based implementation of Posix */
  2. /* threads for Linux. */
  3. /* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
  4. /* */
  5. /* This program is free software; you can redistribute it and/or */
  6. /* modify it under the terms of the GNU Library General Public License */
  7. /* as published by the Free Software Foundation; either version 2 */
  8. /* of the License, or (at your option) any later version. */
  9. /* */
  10. /* This program is distributed in the hope that it will be useful, */
  11. /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
  12. /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
  13. /* GNU Library General Public License for more details. */
  14. #include <bits/initspin.h>
  15. /* There are 2 compare and swap synchronization primitives with
  16. different semantics:
  17. 1. compare_and_swap, which has acquire semantics (i.e. it
  18. completes befor subsequent writes.)
  19. 2. compare_and_swap_with_release_semantics, which has release
  20. semantics (it completes after previous writes.)
  21. For those platforms on which they are the same. HAS_COMPARE_AND_SWAP
  22. should be defined. For those platforms on which they are different,
  23. HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS has to be defined. */
  24. #ifndef HAS_COMPARE_AND_SWAP
  25. #ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
  26. #define HAS_COMPARE_AND_SWAP
  27. #endif
  28. #endif
  29. #if defined(TEST_FOR_COMPARE_AND_SWAP)
  30. extern int __pthread_has_cas;
  31. extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
  32. int * spinlock);
  33. static inline int compare_and_swap(long * ptr, long oldval, long newval,
  34. int * spinlock)
  35. {
  36. if (__builtin_expect (__pthread_has_cas, 1))
  37. return __compare_and_swap(ptr, oldval, newval);
  38. else
  39. return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
  40. }
  41. #elif defined(HAS_COMPARE_AND_SWAP)
  42. #ifdef IMPLEMENT_TAS_WITH_CAS
  43. #define testandset(p) !__compare_and_swap((long int *) p, 0, 1)
  44. #endif
  45. #ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
  46. static inline int
  47. compare_and_swap_with_release_semantics (long * ptr, long oldval,
  48. long newval, int * spinlock)
  49. {
  50. return __compare_and_swap_with_release_semantics (ptr, oldval,
  51. newval);
  52. }
  53. #endif
  54. static inline int compare_and_swap(long * ptr, long oldval, long newval,
  55. int * spinlock)
  56. {
  57. return __compare_and_swap(ptr, oldval, newval);
  58. }
  59. #else
  60. extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
  61. int * spinlock);
  62. static inline int compare_and_swap(long * ptr, long oldval, long newval,
  63. int * spinlock)
  64. {
  65. return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
  66. }
  67. #endif
  68. #ifndef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
  69. #define compare_and_swap_with_release_semantics compare_and_swap
  70. #define __compare_and_swap_with_release_semantics __compare_and_swap
  71. #endif
  72. /* Internal locks */
  73. extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
  74. pthread_descr self);
  75. extern int __pthread_unlock(struct _pthread_fastlock *lock);
  76. static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
  77. {
  78. lock->__status = 0;
  79. lock->__spinlock = __LT_SPINLOCK_INIT;
  80. }
  81. static inline int __pthread_trylock (struct _pthread_fastlock * lock)
  82. {
  83. #if defined TEST_FOR_COMPARE_AND_SWAP
  84. if (!__pthread_has_cas)
  85. #endif
  86. #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
  87. {
  88. return (testandset(&lock->__spinlock) ? EBUSY : 0);
  89. }
  90. #endif
  91. #if defined HAS_COMPARE_AND_SWAP
  92. do {
  93. if (lock->__status != 0) return EBUSY;
  94. } while(! __compare_and_swap(&lock->__status, 0, 1));
  95. return 0;
  96. #endif
  97. }
  98. /* Variation of internal lock used for pthread_mutex_t, supporting
  99. timed-out waits. Warning: do not mix these operations with the above ones
  100. over the same lock object! */
  101. extern void __pthread_alt_lock(struct _pthread_fastlock * lock,
  102. pthread_descr self);
  103. extern int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
  104. pthread_descr self, const struct timespec *abstime);
  105. extern void __pthread_alt_unlock(struct _pthread_fastlock *lock);
  106. static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
  107. {
  108. lock->__status = 0;
  109. lock->__spinlock = __LT_SPINLOCK_INIT;
  110. }
  111. static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
  112. {
  113. #if defined TEST_FOR_COMPARE_AND_SWAP
  114. if (!__pthread_has_cas)
  115. #endif
  116. #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
  117. {
  118. int res = EBUSY;
  119. if (testandset(&lock->__spinlock) == 0)
  120. {
  121. if (lock->__status == 0)
  122. {
  123. lock->__status = 1;
  124. WRITE_MEMORY_BARRIER();
  125. res = 0;
  126. }
  127. lock->__spinlock = __LT_SPINLOCK_INIT;
  128. }
  129. return res;
  130. }
  131. #endif
  132. #if defined HAS_COMPARE_AND_SWAP
  133. do {
  134. if (lock->__status != 0) return EBUSY;
  135. } while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
  136. return 0;
  137. #endif
  138. }
  139. /* Operations on pthread_atomic, which is defined in internals.h */
  140. static inline long atomic_increment(struct pthread_atomic *pa)
  141. {
  142. long oldval;
  143. do {
  144. oldval = pa->p_count;
  145. } while (!compare_and_swap(&pa->p_count, oldval, oldval + 1, &pa->p_spinlock));
  146. return oldval;
  147. }
  148. static inline long atomic_decrement(struct pthread_atomic *pa)
  149. {
  150. long oldval;
  151. do {
  152. oldval = pa->p_count;
  153. } while (!compare_and_swap(&pa->p_count, oldval, oldval - 1, &pa->p_spinlock));
  154. return oldval;
  155. }
  156. static inline void
  157. __pthread_set_own_extricate_if (pthread_descr self, pthread_extricate_if *peif)
  158. {
  159. /* Only store a non-null peif if the thread has cancellation enabled.
  160. Otherwise pthread_cancel will unconditionally call the extricate handler,
  161. and restart the thread giving rise to forbidden spurious wakeups. */
  162. if (peif == NULL
  163. || THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)
  164. {
  165. /* If we are removing the extricate interface, we need to synchronize
  166. against pthread_cancel so that it does not continue with a pointer
  167. to a deallocated pthread_extricate_if struct! The thread lock
  168. is (ab)used for this synchronization purpose. */
  169. if (peif == NULL)
  170. __pthread_lock (THREAD_GETMEM(self, p_lock), self);
  171. THREAD_SETMEM(self, p_extricate, peif);
  172. if (peif == NULL)
  173. __pthread_unlock (THREAD_GETMEM(self, p_lock));
  174. }
  175. }