atomic.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /* Copyright (C) 2012 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. The GNU C Library is free software; you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public
  5. License as published by the Free Software Foundation; either
  6. version 2.1 of the License, or (at your option) any later version.
  7. The GNU C Library is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public
  12. License along with the GNU C Library; if not, see
  13. <http://www.gnu.org/licenses/>. */
  14. #ifndef _BITS_ATOMIC_H
  15. #define _BITS_ATOMIC_H 1
  16. #include <bits/xtensa-config.h>
  17. #include <inttypes.h>
  18. typedef int32_t atomic32_t;
  19. typedef uint32_t uatomic32_t;
  20. typedef int_fast32_t atomic_fast32_t;
  21. typedef uint_fast32_t uatomic_fast32_t;
  22. typedef int64_t atomic64_t;
  23. typedef uint64_t uatomic64_t;
  24. typedef int_fast64_t atomic_fast64_t;
  25. typedef uint_fast64_t uatomic_fast64_t;
  26. typedef intptr_t atomicptr_t;
  27. typedef uintptr_t uatomicptr_t;
  28. typedef intmax_t atomic_max_t;
  29. typedef uintmax_t uatomic_max_t;
  30. /* Xtensa has only a 32-bit form of a store-conditional instruction. */
  31. #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
  32. (abort (), 0)
  33. #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
  34. (abort (), 0)
  35. #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
  36. (abort (), 0)
  37. #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
  38. (abort (), 0)
  39. #if XCHAL_HAVE_EXCLUSIVE
  40. /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
  41. Return the old *MEM value. */
  42. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  43. ({__typeof__(*(mem)) __tmp, __value; \
  44. __asm__ __volatile__( \
  45. " memw \n" \
  46. "1: l32ex %0, %2 \n" \
  47. " bne %0, %4, 2f \n" \
  48. " mov %1, %3 \n" \
  49. " s32ex %1, %2 \n" \
  50. " getex %1 \n" \
  51. " beqz %1, 1b \n" \
  52. " memw \n" \
  53. "2: \n" \
  54. : "=&a" (__value), "=&a" (__tmp) \
  55. : "a" (mem), "a" (newval), "a" (oldval) \
  56. : "memory" ); \
  57. __value; \
  58. })
  59. /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
  60. Return zero if *MEM was changed or non-zero if no exchange happened. */
  61. #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  62. ({__typeof__(*(mem)) __tmp, __value; \
  63. __asm__ __volatile__( \
  64. " memw \n" \
  65. "1: l32ex %0, %2 \n" \
  66. " sub %0, %4, %0 \n" \
  67. " bnez %0, 2f \n" \
  68. " mov %1, %3 \n" \
  69. " s32ex %1, %2 \n" \
  70. " getex %1 \n" \
  71. " beqz %1, 1b \n" \
  72. " movi %0, 0 \n" \
  73. " memw \n" \
  74. "2: \n" \
  75. : "=&a" (__value), "=&a" (__tmp) \
  76. : "a" (mem), "a" (newval), "a" (oldval) \
  77. : "memory" ); \
  78. __value != 0; \
  79. })
  80. /* Store NEWVALUE in *MEM and return the old value. */
  81. #define __arch_exchange_32_acq(mem, newval) \
  82. ({__typeof__(*(mem)) __tmp, __value; \
  83. __asm__ __volatile__( \
  84. " memw \n" \
  85. "1: l32ex %0, %2 \n" \
  86. " mov %1, %3 \n" \
  87. " s32ex %1, %2 \n" \
  88. " getex %1 \n" \
  89. " beqz %1, 1b \n" \
  90. " memw \n" \
  91. : "=&a" (__value), "=&a" (__tmp) \
  92. : "a" (mem), "a" (newval) \
  93. : "memory" ); \
  94. __value; \
  95. })
  96. /* Add VALUE to *MEM and return the old value of *MEM. */
  97. #define __arch_atomic_exchange_and_add_32(mem, value) \
  98. ({__typeof__(*(mem)) __tmp, __value; \
  99. __asm__ __volatile__( \
  100. " memw \n" \
  101. "1: l32ex %0, %2 \n" \
  102. " add %1, %0, %3 \n" \
  103. " s32ex %1, %2 \n" \
  104. " getex %1 \n" \
  105. " beqz %1, 1b \n" \
  106. " memw \n" \
  107. : "=&a" (__value), "=&a" (__tmp) \
  108. : "a" (mem), "a" (value) \
  109. : "memory" ); \
  110. __value; \
  111. })
  112. /* Subtract VALUE from *MEM and return the old value of *MEM. */
  113. #define __arch_atomic_exchange_and_sub_32(mem, value) \
  114. ({__typeof__(*(mem)) __tmp, __value; \
  115. __asm__ __volatile__( \
  116. " memw \n" \
  117. "1: l32ex %0, %2 \n" \
  118. " sub %1, %0, %3 \n" \
  119. " s32ex %1, %2 \n" \
  120. " getex %1 \n" \
  121. " beqz %1, 1b \n" \
  122. " memw \n" \
  123. : "=&a" (__value), "=&a" (__tmp) \
  124. : "a" (mem), "a" (value) \
  125. : "memory" ); \
  126. __tmp; \
  127. })
  128. /* Decrement *MEM if it is > 0, and return the old value. */
  129. #define __arch_atomic_decrement_if_positive_32(mem) \
  130. ({__typeof__(*(mem)) __tmp, __value; \
  131. __asm__ __volatile__( \
  132. " memw \n" \
  133. "1: l32ex %0, %2 \n" \
  134. " blti %0, 1, 2f \n" \
  135. " addi %1, %0, -1 \n" \
  136. " s32ex %1, %2 \n" \
  137. " getex %1 \n" \
  138. " beqz %1, 1b \n" \
  139. " memw \n" \
  140. "2: \n" \
  141. : "=&a" (__value), "=&a" (__tmp) \
  142. : "a" (mem) \
  143. : "memory" ); \
  144. __value; \
  145. })
  146. #elif XCHAL_HAVE_S32C1I
  147. /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
  148. Return the old *MEM value. */
  149. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  150. ({__typeof__(*(mem)) __tmp, __value; \
  151. __asm__ __volatile__( \
  152. "1: l32i %1, %2 \n" \
  153. " bne %1, %4, 2f \n" \
  154. " wsr %1, SCOMPARE1 \n" \
  155. " mov %0, %1 \n" \
  156. " mov %1, %3 \n" \
  157. " s32c1i %1, %2 \n" \
  158. " bne %0, %1, 1b \n" \
  159. "2: \n" \
  160. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  161. : "a" (newval), "a" (oldval) \
  162. : "memory" ); \
  163. __tmp; \
  164. })
  165. /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
  166. Return zero if *MEM was changed or non-zero if no exchange happened. */
  167. #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  168. ({__typeof__(*(mem)) __tmp, __value; \
  169. __asm__ __volatile__( \
  170. "1: l32i %0, %2 \n" \
  171. " sub %1, %4, %0 \n" \
  172. " bnez %1, 2f \n" \
  173. " wsr %0, SCOMPARE1 \n" \
  174. " mov %1, %3 \n" \
  175. " s32c1i %1, %2 \n" \
  176. " bne %0, %1, 1b \n" \
  177. " movi %1, 0 \n" \
  178. "2: \n" \
  179. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  180. : "a" (newval), "a" (oldval) \
  181. : "memory" ); \
  182. __tmp != 0; \
  183. })
  184. /* Store NEWVALUE in *MEM and return the old value. */
  185. #define __arch_exchange_32_acq(mem, newval) \
  186. ({__typeof__(*(mem)) __tmp, __value; \
  187. __asm__ __volatile__( \
  188. "1: l32i %0, %2 \n" \
  189. " wsr %0, SCOMPARE1 \n" \
  190. " mov %1, %3 \n" \
  191. " s32c1i %1, %2 \n" \
  192. " bne %0, %1, 1b \n" \
  193. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  194. : "a" (newval) \
  195. : "memory" ); \
  196. __tmp; \
  197. })
  198. /* Add VALUE to *MEM and return the old value of *MEM. */
  199. #define __arch_atomic_exchange_and_add_32(mem, value) \
  200. ({__typeof__(*(mem)) __tmp, __value; \
  201. __asm__ __volatile__( \
  202. "1: l32i %0, %2 \n" \
  203. " wsr %0, SCOMPARE1 \n" \
  204. " add %1, %0, %3 \n" \
  205. " s32c1i %1, %2 \n" \
  206. " bne %0, %1, 1b \n" \
  207. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  208. : "a" (value) \
  209. : "memory" ); \
  210. __tmp; \
  211. })
  212. /* Subtract VALUE from *MEM and return the old value of *MEM. */
  213. #define __arch_atomic_exchange_and_sub_32(mem, value) \
  214. ({__typeof__(*(mem)) __tmp, __value; \
  215. __asm__ __volatile__( \
  216. "1: l32i %0, %2 \n" \
  217. " wsr %0, SCOMPARE1 \n" \
  218. " sub %1, %0, %3 \n" \
  219. " s32c1i %1, %2 \n" \
  220. " bne %0, %1, 1b \n" \
  221. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  222. : "a" (value) \
  223. : "memory" ); \
  224. __tmp; \
  225. })
  226. /* Decrement *MEM if it is > 0, and return the old value. */
  227. #define __arch_atomic_decrement_if_positive_32(mem) \
  228. ({__typeof__(*(mem)) __tmp, __value; \
  229. __asm__ __volatile__( \
  230. "1: l32i %0, %2 \n" \
  231. " blti %0, 1, 2f \n" \
  232. " wsr %0, SCOMPARE1 \n" \
  233. " addi %1, %0, -1 \n" \
  234. " s32c1i %1, %2 \n" \
  235. " bne %0, %1, 1b \n" \
  236. "2: \n" \
  237. : "=&a" (__value), "=&a" (__tmp), "+m" (*(mem)) \
  238. :: "memory" ); \
  239. __value; \
  240. })
  241. #else
  242. #error No hardware atomic operations
  243. #endif
  244. /* These are the preferred public interfaces: */
  245. #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
  246. ({ \
  247. if (sizeof (*mem) != 4) \
  248. abort(); \
  249. __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
  250. })
  251. #define atomic_exchange_acq(mem, newval) \
  252. ({ \
  253. if (sizeof(*(mem)) != 4) \
  254. abort(); \
  255. __arch_exchange_32_acq(mem, newval); \
  256. })
  257. #define atomic_exchange_and_add(mem, newval) \
  258. ({ \
  259. if (sizeof(*(mem)) != 4) \
  260. abort(); \
  261. __arch_atomic_exchange_and_add_32(mem, newval); \
  262. })
  263. #define atomic_exchange_and_sub(mem, newval) \
  264. ({ \
  265. if (sizeof(*(mem)) != 4) \
  266. abort(); \
  267. __arch_atomic_exchange_and_sub_32(mem, newval); \
  268. })
  269. #define atomic_decrement_if_positive(mem) \
  270. ({ \
  271. if (sizeof(*(mem)) != 4) \
  272. abort(); \
  273. __arch_atomic_decrement_if_positive_32(mem); \
  274. })
  275. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  276. (abort (), 0)
  277. # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  278. (abort (), (__typeof (*mem)) 0)
  279. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  280. (abort (), 0)
  281. # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  282. (abort (), (__typeof (*mem)) 0)
  283. # define __arch_atomic_exchange_64_acq(mem, value) \
  284. ({ abort (); (*mem) = (value); })
  285. # define __arch_atomic_exchange_64_rel(mem, value) \
  286. ({ abort (); (*mem) = (value); })
  287. # define __arch_atomic_exchange_and_add_64(mem, value) \
  288. ({ abort (); (*mem) = (value); })
  289. # define __arch_atomic_increment_val_64(mem) \
  290. ({ abort (); (*mem)++; })
  291. # define __arch_atomic_decrement_val_64(mem) \
  292. ({ abort (); (*mem)--; })
  293. # define __arch_atomic_decrement_if_positive_64(mem) \
  294. ({ abort (); (*mem)--; })
  295. #endif /* _BITS_ATOMIC_H */