pthread_barrier_wait.S 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <sysdep.h>
  16. #include <lowlevellock.h>
  17. #include <lowlevelbarrier.h>
  18. .text
  19. .globl pthread_barrier_wait
  20. .type pthread_barrier_wait,@function
  21. .align 16
  22. pthread_barrier_wait:
  23. cfi_startproc
  24. pushl %ebx
  25. cfi_adjust_cfa_offset(4)
  26. cfi_offset(%ebx, -8)
  27. movl 8(%esp), %ebx
  28. /* Get the mutex. */
  29. movl $1, %edx
  30. xorl %eax, %eax
  31. LOCK
  32. cmpxchgl %edx, MUTEX(%ebx)
  33. jnz 1f
  34. /* One less waiter. If this was the last one needed wake
  35. everybody. */
  36. 2: subl $1, LEFT(%ebx)
  37. je 3f
  38. /* There are more threads to come. */
  39. pushl %esi
  40. cfi_adjust_cfa_offset(4)
  41. cfi_offset(%esi, -12)
  42. #if CURR_EVENT == 0
  43. movl (%ebx), %edx
  44. #else
  45. movl CURR_EVENT(%ebx), %edx
  46. #endif
  47. /* Release the mutex. */
  48. LOCK
  49. subl $1, MUTEX(%ebx)
  50. jne 6f
  51. /* Wait for the remaining threads. The call will return immediately
  52. if the CURR_EVENT memory has meanwhile been changed. */
  53. 7:
  54. #if FUTEX_WAIT == 0
  55. movl PRIVATE(%ebx), %ecx
  56. #else
  57. movl $FUTEX_WAIT, %ecx
  58. orl PRIVATE(%ebx), %ecx
  59. #endif
  60. xorl %esi, %esi
  61. 8: movl $SYS_futex, %eax
  62. ENTER_KERNEL
  63. /* Don't return on spurious wakeups. The syscall does not change
  64. any register except %eax so there is no need to reload any of
  65. them. */
  66. #if CURR_EVENT == 0
  67. cmpl %edx, (%ebx)
  68. #else
  69. cmpl %edx, CURR_EVENT(%ebx)
  70. #endif
  71. je 8b
  72. /* Increment LEFT. If this brings the count back to the
  73. initial count unlock the object. */
  74. movl $1, %edx
  75. movl INIT_COUNT(%ebx), %ecx
  76. LOCK
  77. xaddl %edx, LEFT(%ebx)
  78. subl $1, %ecx
  79. cmpl %ecx, %edx
  80. jne 10f
  81. /* Release the mutex. We cannot release the lock before
  82. waking the waiting threads since otherwise a new thread might
  83. arrive and gets waken up, too. */
  84. LOCK
  85. subl $1, MUTEX(%ebx)
  86. jne 9f
  87. /* Note: %esi is still zero. */
  88. 10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
  89. popl %esi
  90. cfi_adjust_cfa_offset(-4)
  91. cfi_restore(%esi)
  92. popl %ebx
  93. cfi_adjust_cfa_offset(-4)
  94. cfi_restore(%ebx)
  95. ret
  96. cfi_adjust_cfa_offset(4)
  97. cfi_offset(%ebx, -8)
  98. /* The necessary number of threads arrived. */
  99. 3:
  100. #if CURR_EVENT == 0
  101. addl $1, (%ebx)
  102. #else
  103. addl $1, CURR_EVENT(%ebx)
  104. #endif
  105. /* Wake up all waiters. The count is a signed number in the kernel
  106. so 0x7fffffff is the highest value. */
  107. movl $0x7fffffff, %edx
  108. movl $FUTEX_WAKE, %ecx
  109. orl PRIVATE(%ebx), %ecx
  110. movl $SYS_futex, %eax
  111. ENTER_KERNEL
  112. /* Increment LEFT. If this brings the count back to the
  113. initial count unlock the object. */
  114. movl $1, %edx
  115. movl INIT_COUNT(%ebx), %ecx
  116. LOCK
  117. xaddl %edx, LEFT(%ebx)
  118. subl $1, %ecx
  119. cmpl %ecx, %edx
  120. jne 5f
  121. /* Release the mutex. We cannot release the lock before
  122. waking the waiting threads since otherwise a new thread might
  123. arrive and gets waken up, too. */
  124. LOCK
  125. subl $1, MUTEX(%ebx)
  126. jne 4f
  127. 5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
  128. popl %ebx
  129. cfi_adjust_cfa_offset(-4)
  130. cfi_restore(%ebx)
  131. ret
  132. cfi_adjust_cfa_offset(4)
  133. cfi_offset(%ebx, -8)
  134. 1: movl PRIVATE(%ebx), %ecx
  135. leal MUTEX(%ebx), %edx
  136. xorl $LLL_SHARED, %ecx
  137. call __lll_lock_wait
  138. jmp 2b
  139. 4: movl PRIVATE(%ebx), %ecx
  140. leal MUTEX(%ebx), %eax
  141. xorl $LLL_SHARED, %ecx
  142. call __lll_unlock_wake
  143. jmp 5b
  144. cfi_adjust_cfa_offset(4)
  145. cfi_offset(%esi, -12)
  146. 6: movl PRIVATE(%ebx), %ecx
  147. leal MUTEX(%ebx), %eax
  148. xorl $LLL_SHARED, %ecx
  149. call __lll_unlock_wake
  150. jmp 7b
  151. 9: movl PRIVATE(%ebx), %ecx
  152. leal MUTEX(%ebx), %eax
  153. xorl $LLL_SHARED, %ecx
  154. call __lll_unlock_wake
  155. jmp 10b
  156. cfi_endproc
  157. .size pthread_barrier_wait,.-pthread_barrier_wait