atomic.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /* Atomic operations. PowerPC32 version.
  2. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  5. The GNU C Library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public
  7. License as published by the Free Software Foundation; either
  8. version 2.1 of the License, or (at your option) any later version.
  9. The GNU C Library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public
  14. License along with the GNU C Library; if not, see
  15. <http://www.gnu.org/licenses/>. */
  16. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  17. ({ \
  18. unsigned int __tmp; \
  19. __asm__ __volatile__ ( \
  20. "1: lwarx %0,0,%1\n" \
  21. " subf. %0,%2,%0\n" \
  22. " bne 2f\n" \
  23. " stwcx. %3,0,%1\n" \
  24. " bne- 1b\n" \
  25. "2: " __ARCH_ACQ_INSTR \
  26. : "=&r" (__tmp) \
  27. : "b" (mem), "r" (oldval), "r" (newval) \
  28. : "cr0", "memory"); \
  29. __tmp != 0; \
  30. })
  31. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  32. ({ \
  33. unsigned int __tmp; \
  34. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  35. "1: lwarx %0,0,%1\n" \
  36. " subf. %0,%2,%0\n" \
  37. " bne 2f\n" \
  38. " stwcx. %3,0,%1\n" \
  39. " bne- 1b\n" \
  40. "2: " \
  41. : "=&r" (__tmp) \
  42. : "b" (mem), "r" (oldval), "r" (newval) \
  43. : "cr0", "memory"); \
  44. __tmp != 0; \
  45. })
  46. /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
  47. load and reserve (ldarx) and store conditional (stdcx.) instructions.
  48. So for powerpc32 we stub out the 64-bit forms. */
  49. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  50. (abort (), 0)
  51. # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  52. (abort (), (__typeof (*mem)) 0)
  53. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  54. (abort (), 0)
  55. # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  56. (abort (), (__typeof (*mem)) 0)
  57. # define __arch_atomic_exchange_64_acq(mem, value) \
  58. ({ abort (); (*mem) = (value); })
  59. # define __arch_atomic_exchange_64_rel(mem, value) \
  60. ({ abort (); (*mem) = (value); })
  61. # define __arch_atomic_exchange_and_add_64(mem, value) \
  62. ({ abort (); (*mem) = (value); })
  63. # define __arch_atomic_increment_val_64(mem) \
  64. ({ abort (); (*mem)++; })
  65. # define __arch_atomic_decrement_val_64(mem) \
  66. ({ abort (); (*mem)--; })
  67. # define __arch_atomic_decrement_if_positive_64(mem) \
  68. ({ abort (); (*mem)--; })
  69. #ifdef _ARCH_PWR4
  70. /*
  71. * Newer powerpc64 processors support the new "light weight" sync (lwsync)
  72. * So if the build is using -mcpu=[power4,power5,power5+,970] we can
  73. * safely use lwsync.
  74. */
  75. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  76. /*
  77. * "light weight" sync can also be used for the release barrier.
  78. */
  79. # ifndef UP
  80. # define __ARCH_REL_INSTR "lwsync"
  81. # endif
  82. #else
  83. /*
  84. * Older powerpc32 processors don't support the new "light weight"
  85. * sync (lwsync). So the only safe option is to use normal sync
  86. * for all powerpc32 applications.
  87. */
  88. # define atomic_read_barrier() __asm__ ("sync" ::: "memory")
  89. #endif
  90. #include <stdint.h>
  91. typedef int32_t atomic32_t;
  92. typedef uint32_t uatomic32_t;
  93. typedef int_fast32_t atomic_fast32_t;
  94. typedef uint_fast32_t uatomic_fast32_t;
  95. typedef int64_t atomic64_t;
  96. typedef uint64_t uatomic64_t;
  97. typedef int_fast64_t atomic_fast64_t;
  98. typedef uint_fast64_t uatomic_fast64_t;
  99. typedef intptr_t atomicptr_t;
  100. typedef uintptr_t uatomicptr_t;
  101. typedef intmax_t atomic_max_t;
  102. typedef uintmax_t uatomic_max_t;
  103. /*
  104. * Powerpc does not have byte and halfword forms of load and reserve and
  105. * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
  106. */
  107. #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
  108. (abort (), 0)
  109. #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
  110. (abort (), 0)
  111. #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
  112. (abort (), 0)
  113. #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
  114. (abort (), 0)
  115. #ifdef UP
  116. # define __ARCH_ACQ_INSTR ""
  117. # define __ARCH_REL_INSTR ""
  118. #else
  119. # define __ARCH_ACQ_INSTR "isync"
  120. # ifndef __ARCH_REL_INSTR
  121. # define __ARCH_REL_INSTR "sync"
  122. # endif
  123. #endif
  124. #ifndef MUTEX_HINT_ACQ
  125. # define MUTEX_HINT_ACQ
  126. #endif
  127. #ifndef MUTEX_HINT_REL
  128. # define MUTEX_HINT_REL
  129. #endif
  130. #define atomic_full_barrier() __asm__ ("sync" ::: "memory")
  131. #define atomic_write_barrier() __asm__ ("eieio" ::: "memory")
  132. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  133. ({ \
  134. __typeof (*(mem)) __tmp; \
  135. __typeof (mem) __memp = (mem); \
  136. __asm__ __volatile__ ( \
  137. "1: lwarx %0,0,%1\n" \
  138. " cmpw %0,%2\n" \
  139. " bne 2f\n" \
  140. " stwcx. %3,0,%1\n" \
  141. " bne- 1b\n" \
  142. "2: " __ARCH_ACQ_INSTR \
  143. : "=&r" (__tmp) \
  144. : "b" (__memp), "r" (oldval), "r" (newval) \
  145. : "cr0", "memory"); \
  146. __tmp; \
  147. })
  148. #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
  149. ({ \
  150. __typeof (*(mem)) __tmp; \
  151. __typeof (mem) __memp = (mem); \
  152. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  153. "1: lwarx %0,0,%1\n" \
  154. " cmpw %0,%2\n" \
  155. " bne 2f\n" \
  156. " stwcx. %3,0,%1\n" \
  157. " bne- 1b\n" \
  158. "2: " \
  159. : "=&r" (__tmp) \
  160. : "b" (__memp), "r" (oldval), "r" (newval) \
  161. : "cr0", "memory"); \
  162. __tmp; \
  163. })
  164. #define __arch_atomic_exchange_32_acq(mem, value) \
  165. ({ \
  166. __typeof (*mem) __val; \
  167. __asm__ __volatile__ ( \
  168. "1: lwarx %0,0,%2\n" \
  169. " stwcx. %3,0,%2\n" \
  170. " bne- 1b\n" \
  171. " " __ARCH_ACQ_INSTR \
  172. : "=&r" (__val), "=m" (*mem) \
  173. : "b" (mem), "r" (value), "m" (*mem) \
  174. : "cr0", "memory"); \
  175. __val; \
  176. })
  177. #define __arch_atomic_exchange_32_rel(mem, value) \
  178. ({ \
  179. __typeof (*mem) __val; \
  180. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  181. "1: lwarx %0,0,%2\n" \
  182. " stwcx. %3,0,%2\n" \
  183. " bne- 1b" \
  184. : "=&r" (__val), "=m" (*mem) \
  185. : "b" (mem), "r" (value), "m" (*mem) \
  186. : "cr0", "memory"); \
  187. __val; \
  188. })
  189. #define __arch_atomic_exchange_and_add_32(mem, value) \
  190. ({ \
  191. __typeof (*mem) __val, __tmp; \
  192. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  193. " add %1,%0,%4\n" \
  194. " stwcx. %1,0,%3\n" \
  195. " bne- 1b" \
  196. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  197. : "b" (mem), "r" (value), "m" (*mem) \
  198. : "cr0", "memory"); \
  199. __val; \
  200. })
  201. #define __arch_atomic_increment_val_32(mem) \
  202. ({ \
  203. __typeof (*(mem)) __val; \
  204. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  205. " addi %0,%0,1\n" \
  206. " stwcx. %0,0,%2\n" \
  207. " bne- 1b" \
  208. : "=&b" (__val), "=m" (*mem) \
  209. : "b" (mem), "m" (*mem) \
  210. : "cr0", "memory"); \
  211. __val; \
  212. })
  213. #define __arch_atomic_decrement_val_32(mem) \
  214. ({ \
  215. __typeof (*(mem)) __val; \
  216. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  217. " subi %0,%0,1\n" \
  218. " stwcx. %0,0,%2\n" \
  219. " bne- 1b" \
  220. : "=&b" (__val), "=m" (*mem) \
  221. : "b" (mem), "m" (*mem) \
  222. : "cr0", "memory"); \
  223. __val; \
  224. })
  225. #define __arch_atomic_decrement_if_positive_32(mem) \
  226. ({ int __val, __tmp; \
  227. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  228. " cmpwi 0,%0,0\n" \
  229. " addi %1,%0,-1\n" \
  230. " ble 2f\n" \
  231. " stwcx. %1,0,%3\n" \
  232. " bne- 1b\n" \
  233. "2: " __ARCH_ACQ_INSTR \
  234. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  235. : "b" (mem), "m" (*mem) \
  236. : "cr0", "memory"); \
  237. __val; \
  238. })
  239. #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
  240. ({ \
  241. __typeof (*(mem)) __result; \
  242. if (sizeof (*mem) == 4) \
  243. __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
  244. else if (sizeof (*mem) == 8) \
  245. __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
  246. else \
  247. abort (); \
  248. __result; \
  249. })
  250. #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
  251. ({ \
  252. __typeof (*(mem)) __result; \
  253. if (sizeof (*mem) == 4) \
  254. __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
  255. else if (sizeof (*mem) == 8) \
  256. __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
  257. else \
  258. abort (); \
  259. __result; \
  260. })
  261. #define atomic_exchange_acq(mem, value) \
  262. ({ \
  263. __typeof (*(mem)) __result; \
  264. if (sizeof (*mem) == 4) \
  265. __result = __arch_atomic_exchange_32_acq (mem, value); \
  266. else if (sizeof (*mem) == 8) \
  267. __result = __arch_atomic_exchange_64_acq (mem, value); \
  268. else \
  269. abort (); \
  270. __result; \
  271. })
  272. #define atomic_exchange_rel(mem, value) \
  273. ({ \
  274. __typeof (*(mem)) __result; \
  275. if (sizeof (*mem) == 4) \
  276. __result = __arch_atomic_exchange_32_rel (mem, value); \
  277. else if (sizeof (*mem) == 8) \
  278. __result = __arch_atomic_exchange_64_rel (mem, value); \
  279. else \
  280. abort (); \
  281. __result; \
  282. })
  283. #define atomic_exchange_and_add(mem, value) \
  284. ({ \
  285. __typeof (*(mem)) __result; \
  286. if (sizeof (*mem) == 4) \
  287. __result = __arch_atomic_exchange_and_add_32 (mem, value); \
  288. else if (sizeof (*mem) == 8) \
  289. __result = __arch_atomic_exchange_and_add_64 (mem, value); \
  290. else \
  291. abort (); \
  292. __result; \
  293. })
  294. #define atomic_increment_val(mem) \
  295. ({ \
  296. __typeof (*(mem)) __result; \
  297. if (sizeof (*(mem)) == 4) \
  298. __result = __arch_atomic_increment_val_32 (mem); \
  299. else if (sizeof (*(mem)) == 8) \
  300. __result = __arch_atomic_increment_val_64 (mem); \
  301. else \
  302. abort (); \
  303. __result; \
  304. })
  305. #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
  306. #define atomic_decrement_val(mem) \
  307. ({ \
  308. __typeof (*(mem)) __result; \
  309. if (sizeof (*(mem)) == 4) \
  310. __result = __arch_atomic_decrement_val_32 (mem); \
  311. else if (sizeof (*(mem)) == 8) \
  312. __result = __arch_atomic_decrement_val_64 (mem); \
  313. else \
  314. abort (); \
  315. __result; \
  316. })
  317. #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
  318. /* Decrement *MEM if it is > 0, and return the old value. */
  319. #define atomic_decrement_if_positive(mem) \
  320. ({ __typeof (*(mem)) __result; \
  321. if (sizeof (*mem) == 4) \
  322. __result = __arch_atomic_decrement_if_positive_32 (mem); \
  323. else if (sizeof (*mem) == 8) \
  324. __result = __arch_atomic_decrement_if_positive_64 (mem); \
  325. else \
  326. abort (); \
  327. __result; \
  328. })