atomic.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /* Atomic operations. PowerPC Common version.
  2. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  5. The GNU C Library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public
  7. License as published by the Free Software Foundation; either
  8. version 2.1 of the License, or (at your option) any later version.
  9. The GNU C Library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public
  14. License along with the GNU C Library; if not, write to the Free
  15. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  16. 02111-1307 USA. */
  17. #include <bits/wordsize.h>
  18. #if __WORDSIZE == 64
  19. /* Atomic operations. PowerPC64 version.
  20. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  21. This file is part of the GNU C Library.
  22. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  23. The GNU C Library is free software; you can redistribute it and/or
  24. modify it under the terms of the GNU Lesser General Public
  25. License as published by the Free Software Foundation; either
  26. version 2.1 of the License, or (at your option) any later version.
  27. The GNU C Library is distributed in the hope that it will be useful,
  28. but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  30. Lesser General Public License for more details.
  31. You should have received a copy of the GNU Lesser General Public
  32. License along with the GNU C Library; if not, write to the Free
  33. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  34. 02111-1307 USA. */
  35. /* The 32-bit exchange_bool is different on powerpc64 because the subf
  36. does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  37. (a load word and zero (high 32) form) load.
  38. In powerpc64 register values are 64-bit by default, including oldval.
  39. The value in old val unknown sign extension, lwarx loads the 32-bit
  40. value as unsigned. So we explicitly clear the high 32 bits in oldval. */
  41. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  42. ({ \
  43. unsigned int __tmp, __tmp2; \
  44. __asm__ __volatile__ (" clrldi %1,%1,32\n" \
  45. "1: lwarx %0,0,%2\n" \
  46. " subf. %0,%1,%0\n" \
  47. " bne 2f\n" \
  48. " stwcx. %4,0,%2\n" \
  49. " bne- 1b\n" \
  50. "2: " __ARCH_ACQ_INSTR \
  51. : "=&r" (__tmp), "=r" (__tmp2) \
  52. : "b" (mem), "1" (oldval), "r" (newval) \
  53. : "cr0", "memory"); \
  54. __tmp != 0; \
  55. })
  56. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  57. ({ \
  58. unsigned int __tmp, __tmp2; \
  59. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  60. " clrldi %1,%1,32\n" \
  61. "1: lwarx %0,0,%2\n" \
  62. " subf. %0,%1,%0\n" \
  63. " bne 2f\n" \
  64. " stwcx. %4,0,%2\n" \
  65. " bne- 1b\n" \
  66. "2: " \
  67. : "=&r" (__tmp), "=r" (__tmp2) \
  68. : "b" (mem), "1" (oldval), "r" (newval) \
  69. : "cr0", "memory"); \
  70. __tmp != 0; \
  71. })
  72. /*
  73. * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
  74. * and Store doubleword conditional indexed (stdcx) instructions. So here
  75. * we define the 64-bit forms.
  76. */
  77. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  78. ({ \
  79. unsigned long __tmp; \
  80. __asm__ __volatile__ ( \
  81. "1: ldarx %0,0,%1\n" \
  82. " subf. %0,%2,%0\n" \
  83. " bne 2f\n" \
  84. " stdcx. %3,0,%1\n" \
  85. " bne- 1b\n" \
  86. "2: " __ARCH_ACQ_INSTR \
  87. : "=&r" (__tmp) \
  88. : "b" (mem), "r" (oldval), "r" (newval) \
  89. : "cr0", "memory"); \
  90. __tmp != 0; \
  91. })
  92. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  93. ({ \
  94. unsigned long __tmp; \
  95. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  96. "1: ldarx %0,0,%1\n" \
  97. " subf. %0,%2,%0\n" \
  98. " bne 2f\n" \
  99. " stdcx. %3,0,%1\n" \
  100. " bne- 1b\n" \
  101. "2: " \
  102. : "=&r" (__tmp) \
  103. : "b" (mem), "r" (oldval), "r" (newval) \
  104. : "cr0", "memory"); \
  105. __tmp != 0; \
  106. })
  107. #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  108. ({ \
  109. __typeof (*(mem)) __tmp; \
  110. __typeof (mem) __memp = (mem); \
  111. __asm__ __volatile__ ( \
  112. "1: ldarx %0,0,%1\n" \
  113. " cmpd %0,%2\n" \
  114. " bne 2f\n" \
  115. " stdcx. %3,0,%1\n" \
  116. " bne- 1b\n" \
  117. "2: " __ARCH_ACQ_INSTR \
  118. : "=&r" (__tmp) \
  119. : "b" (__memp), "r" (oldval), "r" (newval) \
  120. : "cr0", "memory"); \
  121. __tmp; \
  122. })
  123. #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  124. ({ \
  125. __typeof (*(mem)) __tmp; \
  126. __typeof (mem) __memp = (mem); \
  127. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  128. "1: ldarx %0,0,%1\n" \
  129. " cmpd %0,%2\n" \
  130. " bne 2f\n" \
  131. " stdcx. %3,0,%1\n" \
  132. " bne- 1b\n" \
  133. "2: " \
  134. : "=&r" (__tmp) \
  135. : "b" (__memp), "r" (oldval), "r" (newval) \
  136. : "cr0", "memory"); \
  137. __tmp; \
  138. })
  139. # define __arch_atomic_exchange_64_acq(mem, value) \
  140. ({ \
  141. __typeof (*mem) __val; \
  142. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  143. "1: ldarx %0,0,%2\n" \
  144. " stdcx. %3,0,%2\n" \
  145. " bne- 1b\n" \
  146. " " __ARCH_ACQ_INSTR \
  147. : "=&r" (__val), "=m" (*mem) \
  148. : "b" (mem), "r" (value), "m" (*mem) \
  149. : "cr0", "memory"); \
  150. __val; \
  151. })
  152. # define __arch_atomic_exchange_64_rel(mem, value) \
  153. ({ \
  154. __typeof (*mem) __val; \
  155. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  156. "1: ldarx %0,0,%2\n" \
  157. " stdcx. %3,0,%2\n" \
  158. " bne- 1b" \
  159. : "=&r" (__val), "=m" (*mem) \
  160. : "b" (mem), "r" (value), "m" (*mem) \
  161. : "cr0", "memory"); \
  162. __val; \
  163. })
  164. # define __arch_atomic_exchange_and_add_64(mem, value) \
  165. ({ \
  166. __typeof (*mem) __val, __tmp; \
  167. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  168. " add %1,%0,%4\n" \
  169. " stdcx. %1,0,%3\n" \
  170. " bne- 1b" \
  171. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  172. : "b" (mem), "r" (value), "m" (*mem) \
  173. : "cr0", "memory"); \
  174. __val; \
  175. })
  176. # define __arch_atomic_increment_val_64(mem) \
  177. ({ \
  178. __typeof (*(mem)) __val; \
  179. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  180. " addi %0,%0,1\n" \
  181. " stdcx. %0,0,%2\n" \
  182. " bne- 1b" \
  183. : "=&b" (__val), "=m" (*mem) \
  184. : "b" (mem), "m" (*mem) \
  185. : "cr0", "memory"); \
  186. __val; \
  187. })
  188. # define __arch_atomic_decrement_val_64(mem) \
  189. ({ \
  190. __typeof (*(mem)) __val; \
  191. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  192. " subi %0,%0,1\n" \
  193. " stdcx. %0,0,%2\n" \
  194. " bne- 1b" \
  195. : "=&b" (__val), "=m" (*mem) \
  196. : "b" (mem), "m" (*mem) \
  197. : "cr0", "memory"); \
  198. __val; \
  199. })
  200. # define __arch_atomic_decrement_if_positive_64(mem) \
  201. ({ int __val, __tmp; \
  202. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  203. " cmpdi 0,%0,0\n" \
  204. " addi %1,%0,-1\n" \
  205. " ble 2f\n" \
  206. " stdcx. %1,0,%3\n" \
  207. " bne- 1b\n" \
  208. "2: " __ARCH_ACQ_INSTR \
  209. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  210. : "b" (mem), "m" (*mem) \
  211. : "cr0", "memory"); \
  212. __val; \
  213. })
  214. /*
  215. * All powerpc64 processors support the new "light weight" sync (lwsync).
  216. */
  217. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  218. /*
  219. * "light weight" sync can also be used for the release barrier.
  220. */
  221. # ifndef UP
  222. # define __ARCH_REL_INSTR "lwsync"
  223. # endif
  224. #else
  225. /* Atomic operations. PowerPC32 version.
  226. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  227. This file is part of the GNU C Library.
  228. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  229. The GNU C Library is free software; you can redistribute it and/or
  230. modify it under the terms of the GNU Lesser General Public
  231. License as published by the Free Software Foundation; either
  232. version 2.1 of the License, or (at your option) any later version.
  233. The GNU C Library is distributed in the hope that it will be useful,
  234. but WITHOUT ANY WARRANTY; without even the implied warranty of
  235. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  236. Lesser General Public License for more details.
  237. You should have received a copy of the GNU Lesser General Public
  238. License along with the GNU C Library; if not, write to the Free
  239. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  240. 02111-1307 USA. */
  241. /*
  242. * The 32-bit exchange_bool is different on powerpc64 because the subf
  243. * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  244. * (a load word and zero (high 32) form). So powerpc64 has a slightly
  245. * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
  246. */
  247. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  248. ({ \
  249. unsigned int __tmp; \
  250. __asm__ __volatile__ ( \
  251. "1: lwarx %0,0,%1\n" \
  252. " subf. %0,%2,%0\n" \
  253. " bne 2f\n" \
  254. " stwcx. %3,0,%1\n" \
  255. " bne- 1b\n" \
  256. "2: " __ARCH_ACQ_INSTR \
  257. : "=&r" (__tmp) \
  258. : "b" (mem), "r" (oldval), "r" (newval) \
  259. : "cr0", "memory"); \
  260. __tmp != 0; \
  261. })
  262. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  263. ({ \
  264. unsigned int __tmp; \
  265. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  266. "1: lwarx %0,0,%1\n" \
  267. " subf. %0,%2,%0\n" \
  268. " bne 2f\n" \
  269. " stwcx. %3,0,%1\n" \
  270. " bne- 1b\n" \
  271. "2: " \
  272. : "=&r" (__tmp) \
  273. : "b" (mem), "r" (oldval), "r" (newval) \
  274. : "cr0", "memory"); \
  275. __tmp != 0; \
  276. })
  277. /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
  278. load and reserve (ldarx) and store conditional (stdcx.) instructions.
  279. So for powerpc32 we stub out the 64-bit forms. */
  280. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  281. (abort (), 0)
  282. # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  283. (abort (), (__typeof (*mem)) 0)
  284. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  285. (abort (), 0)
  286. # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  287. (abort (), (__typeof (*mem)) 0)
  288. # define __arch_atomic_exchange_64_acq(mem, value) \
  289. ({ abort (); (*mem) = (value); })
  290. # define __arch_atomic_exchange_64_rel(mem, value) \
  291. ({ abort (); (*mem) = (value); })
  292. # define __arch_atomic_exchange_and_add_64(mem, value) \
  293. ({ abort (); (*mem) = (value); })
  294. # define __arch_atomic_increment_val_64(mem) \
  295. ({ abort (); (*mem)++; })
  296. # define __arch_atomic_decrement_val_64(mem) \
  297. ({ abort (); (*mem)--; })
  298. # define __arch_atomic_decrement_if_positive_64(mem) \
  299. ({ abort (); (*mem)--; })
  300. #ifdef _ARCH_PWR4
  301. /*
  302. * Newer powerpc64 processors support the new "light weight" sync (lwsync)
  303. * So if the build is using -mcpu=[power4,power5,power5+,970] we can
  304. * safely use lwsync.
  305. */
  306. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  307. /*
  308. * "light weight" sync can also be used for the release barrier.
  309. */
  310. # ifndef UP
  311. # define __ARCH_REL_INSTR "lwsync"
  312. # endif
  313. #else
  314. /*
  315. * Older powerpc32 processors don't support the new "light weight"
  316. * sync (lwsync). So the only safe option is to use normal sync
  317. * for all powerpc32 applications.
  318. */
  319. # define atomic_read_barrier() __asm__ ("sync" ::: "memory")
  320. #endif
  321. #endif
  322. #include <stdint.h>
  323. typedef int32_t atomic32_t;
  324. typedef uint32_t uatomic32_t;
  325. typedef int_fast32_t atomic_fast32_t;
  326. typedef uint_fast32_t uatomic_fast32_t;
  327. typedef int64_t atomic64_t;
  328. typedef uint64_t uatomic64_t;
  329. typedef int_fast64_t atomic_fast64_t;
  330. typedef uint_fast64_t uatomic_fast64_t;
  331. typedef intptr_t atomicptr_t;
  332. typedef uintptr_t uatomicptr_t;
  333. typedef intmax_t atomic_max_t;
  334. typedef uintmax_t uatomic_max_t;
  335. /*
  336. * Powerpc does not have byte and halfword forms of load and reserve and
  337. * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
  338. */
  339. #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
  340. (abort (), 0)
  341. #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
  342. (abort (), 0)
  343. #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
  344. (abort (), 0)
  345. #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
  346. (abort (), 0)
  347. #ifdef UP
  348. # define __ARCH_ACQ_INSTR ""
  349. # define __ARCH_REL_INSTR ""
  350. #else
  351. # define __ARCH_ACQ_INSTR "isync"
  352. # ifndef __ARCH_REL_INSTR
  353. # define __ARCH_REL_INSTR "sync"
  354. # endif
  355. #endif
  356. #ifndef MUTEX_HINT_ACQ
  357. # define MUTEX_HINT_ACQ
  358. #endif
  359. #ifndef MUTEX_HINT_REL
  360. # define MUTEX_HINT_REL
  361. #endif
  362. #define atomic_full_barrier() __asm__ ("sync" ::: "memory")
  363. #define atomic_write_barrier() __asm__ ("eieio" ::: "memory")
  364. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  365. ({ \
  366. __typeof (*(mem)) __tmp; \
  367. __typeof (mem) __memp = (mem); \
  368. __asm__ __volatile__ ( \
  369. "1: lwarx %0,0,%1\n" \
  370. " cmpw %0,%2\n" \
  371. " bne 2f\n" \
  372. " stwcx. %3,0,%1\n" \
  373. " bne- 1b\n" \
  374. "2: " __ARCH_ACQ_INSTR \
  375. : "=&r" (__tmp) \
  376. : "b" (__memp), "r" (oldval), "r" (newval) \
  377. : "cr0", "memory"); \
  378. __tmp; \
  379. })
  380. #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
  381. ({ \
  382. __typeof (*(mem)) __tmp; \
  383. __typeof (mem) __memp = (mem); \
  384. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  385. "1: lwarx %0,0,%1\n" \
  386. " cmpw %0,%2\n" \
  387. " bne 2f\n" \
  388. " stwcx. %3,0,%1\n" \
  389. " bne- 1b\n" \
  390. "2: " \
  391. : "=&r" (__tmp) \
  392. : "b" (__memp), "r" (oldval), "r" (newval) \
  393. : "cr0", "memory"); \
  394. __tmp; \
  395. })
  396. #define __arch_atomic_exchange_32_acq(mem, value) \
  397. ({ \
  398. __typeof (*mem) __val; \
  399. __asm__ __volatile__ ( \
  400. "1: lwarx %0,0,%2\n" \
  401. " stwcx. %3,0,%2\n" \
  402. " bne- 1b\n" \
  403. " " __ARCH_ACQ_INSTR \
  404. : "=&r" (__val), "=m" (*mem) \
  405. : "b" (mem), "r" (value), "m" (*mem) \
  406. : "cr0", "memory"); \
  407. __val; \
  408. })
  409. #define __arch_atomic_exchange_32_rel(mem, value) \
  410. ({ \
  411. __typeof (*mem) __val; \
  412. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  413. "1: lwarx %0,0,%2\n" \
  414. " stwcx. %3,0,%2\n" \
  415. " bne- 1b" \
  416. : "=&r" (__val), "=m" (*mem) \
  417. : "b" (mem), "r" (value), "m" (*mem) \
  418. : "cr0", "memory"); \
  419. __val; \
  420. })
  421. #define __arch_atomic_exchange_and_add_32(mem, value) \
  422. ({ \
  423. __typeof (*mem) __val, __tmp; \
  424. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  425. " add %1,%0,%4\n" \
  426. " stwcx. %1,0,%3\n" \
  427. " bne- 1b" \
  428. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  429. : "b" (mem), "r" (value), "m" (*mem) \
  430. : "cr0", "memory"); \
  431. __val; \
  432. })
  433. #define __arch_atomic_increment_val_32(mem) \
  434. ({ \
  435. __typeof (*(mem)) __val; \
  436. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  437. " addi %0,%0,1\n" \
  438. " stwcx. %0,0,%2\n" \
  439. " bne- 1b" \
  440. : "=&b" (__val), "=m" (*mem) \
  441. : "b" (mem), "m" (*mem) \
  442. : "cr0", "memory"); \
  443. __val; \
  444. })
  445. #define __arch_atomic_decrement_val_32(mem) \
  446. ({ \
  447. __typeof (*(mem)) __val; \
  448. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  449. " subi %0,%0,1\n" \
  450. " stwcx. %0,0,%2\n" \
  451. " bne- 1b" \
  452. : "=&b" (__val), "=m" (*mem) \
  453. : "b" (mem), "m" (*mem) \
  454. : "cr0", "memory"); \
  455. __val; \
  456. })
  457. #define __arch_atomic_decrement_if_positive_32(mem) \
  458. ({ int __val, __tmp; \
  459. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  460. " cmpwi 0,%0,0\n" \
  461. " addi %1,%0,-1\n" \
  462. " ble 2f\n" \
  463. " stwcx. %1,0,%3\n" \
  464. " bne- 1b\n" \
  465. "2: " __ARCH_ACQ_INSTR \
  466. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  467. : "b" (mem), "m" (*mem) \
  468. : "cr0", "memory"); \
  469. __val; \
  470. })
  471. #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
  472. ({ \
  473. __typeof (*(mem)) __result; \
  474. if (sizeof (*mem) == 4) \
  475. __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
  476. else if (sizeof (*mem) == 8) \
  477. __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
  478. else \
  479. abort (); \
  480. __result; \
  481. })
  482. #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
  483. ({ \
  484. __typeof (*(mem)) __result; \
  485. if (sizeof (*mem) == 4) \
  486. __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
  487. else if (sizeof (*mem) == 8) \
  488. __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
  489. else \
  490. abort (); \
  491. __result; \
  492. })
  493. #define atomic_exchange_acq(mem, value) \
  494. ({ \
  495. __typeof (*(mem)) __result; \
  496. if (sizeof (*mem) == 4) \
  497. __result = __arch_atomic_exchange_32_acq (mem, value); \
  498. else if (sizeof (*mem) == 8) \
  499. __result = __arch_atomic_exchange_64_acq (mem, value); \
  500. else \
  501. abort (); \
  502. __result; \
  503. })
  504. #define atomic_exchange_rel(mem, value) \
  505. ({ \
  506. __typeof (*(mem)) __result; \
  507. if (sizeof (*mem) == 4) \
  508. __result = __arch_atomic_exchange_32_rel (mem, value); \
  509. else if (sizeof (*mem) == 8) \
  510. __result = __arch_atomic_exchange_64_rel (mem, value); \
  511. else \
  512. abort (); \
  513. __result; \
  514. })
  515. #define atomic_exchange_and_add(mem, value) \
  516. ({ \
  517. __typeof (*(mem)) __result; \
  518. if (sizeof (*mem) == 4) \
  519. __result = __arch_atomic_exchange_and_add_32 (mem, value); \
  520. else if (sizeof (*mem) == 8) \
  521. __result = __arch_atomic_exchange_and_add_64 (mem, value); \
  522. else \
  523. abort (); \
  524. __result; \
  525. })
  526. #define atomic_increment_val(mem) \
  527. ({ \
  528. __typeof (*(mem)) __result; \
  529. if (sizeof (*(mem)) == 4) \
  530. __result = __arch_atomic_increment_val_32 (mem); \
  531. else if (sizeof (*(mem)) == 8) \
  532. __result = __arch_atomic_increment_val_64 (mem); \
  533. else \
  534. abort (); \
  535. __result; \
  536. })
  537. #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
  538. #define atomic_decrement_val(mem) \
  539. ({ \
  540. __typeof (*(mem)) __result; \
  541. if (sizeof (*(mem)) == 4) \
  542. __result = __arch_atomic_decrement_val_32 (mem); \
  543. else if (sizeof (*(mem)) == 8) \
  544. __result = __arch_atomic_decrement_val_64 (mem); \
  545. else \
  546. abort (); \
  547. __result; \
  548. })
  549. #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
  550. /* Decrement *MEM if it is > 0, and return the old value. */
  551. #define atomic_decrement_if_positive(mem) \
  552. ({ __typeof (*(mem)) __result; \
  553. if (sizeof (*mem) == 4) \
  554. __result = __arch_atomic_decrement_if_positive_32 (mem); \
  555. else if (sizeof (*mem) == 8) \
  556. __result = __arch_atomic_decrement_if_positive_64 (mem); \
  557. else \
  558. abort (); \
  559. __result; \
  560. })