atomic.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627
  1. /* Atomic operations. PowerPC Common version.
  2. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  5. The GNU C Library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public
  7. License as published by the Free Software Foundation; either
  8. version 2.1 of the License, or (at your option) any later version.
  9. The GNU C Library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public
  14. License along with the GNU C Library; if not, see
  15. <http://www.gnu.org/licenses/>. */
  16. #include <bits/wordsize.h>
  17. #if __WORDSIZE == 64
  18. /* Atomic operations. PowerPC64 version.
  19. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  20. This file is part of the GNU C Library.
  21. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  22. The GNU C Library is free software; you can redistribute it and/or
  23. modify it under the terms of the GNU Lesser General Public
  24. License as published by the Free Software Foundation; either
  25. version 2.1 of the License, or (at your option) any later version.
  26. The GNU C Library is distributed in the hope that it will be useful,
  27. but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  29. Lesser General Public License for more details.
  30. You should have received a copy of the GNU Lesser General Public
  31. License along with the GNU C Library; if not, see
  32. <http://www.gnu.org/licenses/>. */
  33. /* The 32-bit exchange_bool is different on powerpc64 because the subf
  34. does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  35. (a load word and zero (high 32) form) load.
  36. In powerpc64 register values are 64-bit by default, including oldval.
  37. The value in old val unknown sign extension, lwarx loads the 32-bit
  38. value as unsigned. So we explicitly clear the high 32 bits in oldval. */
  39. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  40. ({ \
  41. unsigned int __tmp, __tmp2; \
  42. __asm__ __volatile__ (" clrldi %1,%1,32\n" \
  43. "1: lwarx %0,0,%2\n" \
  44. " subf. %0,%1,%0\n" \
  45. " bne 2f\n" \
  46. " stwcx. %4,0,%2\n" \
  47. " bne- 1b\n" \
  48. "2: " __ARCH_ACQ_INSTR \
  49. : "=&r" (__tmp), "=r" (__tmp2) \
  50. : "b" (mem), "1" (oldval), "r" (newval) \
  51. : "cr0", "memory"); \
  52. __tmp != 0; \
  53. })
  54. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  55. ({ \
  56. unsigned int __tmp, __tmp2; \
  57. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  58. " clrldi %1,%1,32\n" \
  59. "1: lwarx %0,0,%2\n" \
  60. " subf. %0,%1,%0\n" \
  61. " bne 2f\n" \
  62. " stwcx. %4,0,%2\n" \
  63. " bne- 1b\n" \
  64. "2: " \
  65. : "=&r" (__tmp), "=r" (__tmp2) \
  66. : "b" (mem), "1" (oldval), "r" (newval) \
  67. : "cr0", "memory"); \
  68. __tmp != 0; \
  69. })
  70. /*
  71. * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
  72. * and Store doubleword conditional indexed (stdcx) instructions. So here
  73. * we define the 64-bit forms.
  74. */
  75. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  76. ({ \
  77. unsigned long __tmp; \
  78. __asm__ __volatile__ ( \
  79. "1: ldarx %0,0,%1\n" \
  80. " subf. %0,%2,%0\n" \
  81. " bne 2f\n" \
  82. " stdcx. %3,0,%1\n" \
  83. " bne- 1b\n" \
  84. "2: " __ARCH_ACQ_INSTR \
  85. : "=&r" (__tmp) \
  86. : "b" (mem), "r" (oldval), "r" (newval) \
  87. : "cr0", "memory"); \
  88. __tmp != 0; \
  89. })
  90. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  91. ({ \
  92. unsigned long __tmp; \
  93. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  94. "1: ldarx %0,0,%1\n" \
  95. " subf. %0,%2,%0\n" \
  96. " bne 2f\n" \
  97. " stdcx. %3,0,%1\n" \
  98. " bne- 1b\n" \
  99. "2: " \
  100. : "=&r" (__tmp) \
  101. : "b" (mem), "r" (oldval), "r" (newval) \
  102. : "cr0", "memory"); \
  103. __tmp != 0; \
  104. })
  105. #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  106. ({ \
  107. __typeof (*(mem)) __tmp; \
  108. __typeof (mem) __memp = (mem); \
  109. __asm__ __volatile__ ( \
  110. "1: ldarx %0,0,%1\n" \
  111. " cmpd %0,%2\n" \
  112. " bne 2f\n" \
  113. " stdcx. %3,0,%1\n" \
  114. " bne- 1b\n" \
  115. "2: " __ARCH_ACQ_INSTR \
  116. : "=&r" (__tmp) \
  117. : "b" (__memp), "r" (oldval), "r" (newval) \
  118. : "cr0", "memory"); \
  119. __tmp; \
  120. })
  121. #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  122. ({ \
  123. __typeof (*(mem)) __tmp; \
  124. __typeof (mem) __memp = (mem); \
  125. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  126. "1: ldarx %0,0,%1\n" \
  127. " cmpd %0,%2\n" \
  128. " bne 2f\n" \
  129. " stdcx. %3,0,%1\n" \
  130. " bne- 1b\n" \
  131. "2: " \
  132. : "=&r" (__tmp) \
  133. : "b" (__memp), "r" (oldval), "r" (newval) \
  134. : "cr0", "memory"); \
  135. __tmp; \
  136. })
  137. # define __arch_atomic_exchange_64_acq(mem, value) \
  138. ({ \
  139. __typeof (*mem) __val; \
  140. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  141. "1: ldarx %0,0,%2\n" \
  142. " stdcx. %3,0,%2\n" \
  143. " bne- 1b\n" \
  144. " " __ARCH_ACQ_INSTR \
  145. : "=&r" (__val), "=m" (*mem) \
  146. : "b" (mem), "r" (value), "m" (*mem) \
  147. : "cr0", "memory"); \
  148. __val; \
  149. })
  150. # define __arch_atomic_exchange_64_rel(mem, value) \
  151. ({ \
  152. __typeof (*mem) __val; \
  153. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  154. "1: ldarx %0,0,%2\n" \
  155. " stdcx. %3,0,%2\n" \
  156. " bne- 1b" \
  157. : "=&r" (__val), "=m" (*mem) \
  158. : "b" (mem), "r" (value), "m" (*mem) \
  159. : "cr0", "memory"); \
  160. __val; \
  161. })
  162. # define __arch_atomic_exchange_and_add_64(mem, value) \
  163. ({ \
  164. __typeof (*mem) __val, __tmp; \
  165. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  166. " add %1,%0,%4\n" \
  167. " stdcx. %1,0,%3\n" \
  168. " bne- 1b" \
  169. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  170. : "b" (mem), "r" (value), "m" (*mem) \
  171. : "cr0", "memory"); \
  172. __val; \
  173. })
  174. # define __arch_atomic_increment_val_64(mem) \
  175. ({ \
  176. __typeof (*(mem)) __val; \
  177. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  178. " addi %0,%0,1\n" \
  179. " stdcx. %0,0,%2\n" \
  180. " bne- 1b" \
  181. : "=&b" (__val), "=m" (*mem) \
  182. : "b" (mem), "m" (*mem) \
  183. : "cr0", "memory"); \
  184. __val; \
  185. })
  186. # define __arch_atomic_decrement_val_64(mem) \
  187. ({ \
  188. __typeof (*(mem)) __val; \
  189. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  190. " subi %0,%0,1\n" \
  191. " stdcx. %0,0,%2\n" \
  192. " bne- 1b" \
  193. : "=&b" (__val), "=m" (*mem) \
  194. : "b" (mem), "m" (*mem) \
  195. : "cr0", "memory"); \
  196. __val; \
  197. })
  198. # define __arch_atomic_decrement_if_positive_64(mem) \
  199. ({ int __val, __tmp; \
  200. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  201. " cmpdi 0,%0,0\n" \
  202. " addi %1,%0,-1\n" \
  203. " ble 2f\n" \
  204. " stdcx. %1,0,%3\n" \
  205. " bne- 1b\n" \
  206. "2: " __ARCH_ACQ_INSTR \
  207. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  208. : "b" (mem), "m" (*mem) \
  209. : "cr0", "memory"); \
  210. __val; \
  211. })
  212. /*
  213. * All powerpc64 processors support the new "light weight" sync (lwsync).
  214. */
  215. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  216. /*
  217. * "light weight" sync can also be used for the release barrier.
  218. */
  219. # ifndef UP
  220. # define __ARCH_REL_INSTR "lwsync"
  221. # endif
  222. #else
  223. /* Atomic operations. PowerPC32 version.
  224. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  225. This file is part of the GNU C Library.
  226. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  227. The GNU C Library is free software; you can redistribute it and/or
  228. modify it under the terms of the GNU Lesser General Public
  229. License as published by the Free Software Foundation; either
  230. version 2.1 of the License, or (at your option) any later version.
  231. The GNU C Library is distributed in the hope that it will be useful,
  232. but WITHOUT ANY WARRANTY; without even the implied warranty of
  233. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  234. Lesser General Public License for more details.
  235. You should have received a copy of the GNU Lesser General Public
  236. License along with the GNU C Library; if not, see
  237. <http://www.gnu.org/licenses/>. */
  238. /*
  239. * The 32-bit exchange_bool is different on powerpc64 because the subf
  240. * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  241. * (a load word and zero (high 32) form). So powerpc64 has a slightly
  242. * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
  243. */
  244. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  245. ({ \
  246. unsigned int __tmp; \
  247. __asm__ __volatile__ ( \
  248. "1: lwarx %0,0,%1\n" \
  249. " subf. %0,%2,%0\n" \
  250. " bne 2f\n" \
  251. " stwcx. %3,0,%1\n" \
  252. " bne- 1b\n" \
  253. "2: " __ARCH_ACQ_INSTR \
  254. : "=&r" (__tmp) \
  255. : "b" (mem), "r" (oldval), "r" (newval) \
  256. : "cr0", "memory"); \
  257. __tmp != 0; \
  258. })
  259. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  260. ({ \
  261. unsigned int __tmp; \
  262. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  263. "1: lwarx %0,0,%1\n" \
  264. " subf. %0,%2,%0\n" \
  265. " bne 2f\n" \
  266. " stwcx. %3,0,%1\n" \
  267. " bne- 1b\n" \
  268. "2: " \
  269. : "=&r" (__tmp) \
  270. : "b" (mem), "r" (oldval), "r" (newval) \
  271. : "cr0", "memory"); \
  272. __tmp != 0; \
  273. })
  274. /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
  275. load and reserve (ldarx) and store conditional (stdcx.) instructions.
  276. So for powerpc32 we stub out the 64-bit forms. */
  277. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  278. (abort (), 0)
  279. # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  280. (abort (), (__typeof (*mem)) 0)
  281. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  282. (abort (), 0)
  283. # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  284. (abort (), (__typeof (*mem)) 0)
  285. # define __arch_atomic_exchange_64_acq(mem, value) \
  286. ({ abort (); (*mem) = (value); })
  287. # define __arch_atomic_exchange_64_rel(mem, value) \
  288. ({ abort (); (*mem) = (value); })
  289. # define __arch_atomic_exchange_and_add_64(mem, value) \
  290. ({ abort (); (*mem) = (value); })
  291. # define __arch_atomic_increment_val_64(mem) \
  292. ({ abort (); (*mem)++; })
  293. # define __arch_atomic_decrement_val_64(mem) \
  294. ({ abort (); (*mem)--; })
  295. # define __arch_atomic_decrement_if_positive_64(mem) \
  296. ({ abort (); (*mem)--; })
  297. #ifdef _ARCH_PWR4
  298. /*
  299. * Newer powerpc64 processors support the new "light weight" sync (lwsync)
  300. * So if the build is using -mcpu=[power4,power5,power5+,970] we can
  301. * safely use lwsync.
  302. */
  303. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  304. /*
  305. * "light weight" sync can also be used for the release barrier.
  306. */
  307. # ifndef UP
  308. # define __ARCH_REL_INSTR "lwsync"
  309. # endif
  310. #else
  311. /*
  312. * Older powerpc32 processors don't support the new "light weight"
  313. * sync (lwsync). So the only safe option is to use normal sync
  314. * for all powerpc32 applications.
  315. */
  316. # define atomic_read_barrier() __asm__ ("sync" ::: "memory")
  317. #endif
  318. #endif
  319. #include <stdint.h>
  320. typedef int32_t atomic32_t;
  321. typedef uint32_t uatomic32_t;
  322. typedef int_fast32_t atomic_fast32_t;
  323. typedef uint_fast32_t uatomic_fast32_t;
  324. typedef int64_t atomic64_t;
  325. typedef uint64_t uatomic64_t;
  326. typedef int_fast64_t atomic_fast64_t;
  327. typedef uint_fast64_t uatomic_fast64_t;
  328. typedef intptr_t atomicptr_t;
  329. typedef uintptr_t uatomicptr_t;
  330. typedef intmax_t atomic_max_t;
  331. typedef uintmax_t uatomic_max_t;
  332. /*
  333. * Powerpc does not have byte and halfword forms of load and reserve and
  334. * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
  335. */
  336. #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
  337. (abort (), 0)
  338. #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
  339. (abort (), 0)
  340. #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
  341. (abort (), 0)
  342. #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
  343. (abort (), 0)
  344. #ifdef UP
  345. # define __ARCH_ACQ_INSTR ""
  346. # define __ARCH_REL_INSTR ""
  347. #else
  348. # define __ARCH_ACQ_INSTR "isync"
  349. # ifndef __ARCH_REL_INSTR
  350. # define __ARCH_REL_INSTR "sync"
  351. # endif
  352. #endif
  353. #ifndef MUTEX_HINT_ACQ
  354. # define MUTEX_HINT_ACQ
  355. #endif
  356. #ifndef MUTEX_HINT_REL
  357. # define MUTEX_HINT_REL
  358. #endif
  359. #define atomic_full_barrier() __asm__ ("sync" ::: "memory")
  360. #define atomic_write_barrier() __asm__ ("eieio" ::: "memory")
  361. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  362. ({ \
  363. __typeof (*(mem)) __tmp; \
  364. __typeof (mem) __memp = (mem); \
  365. __asm__ __volatile__ ( \
  366. "1: lwarx %0,0,%1\n" \
  367. " cmpw %0,%2\n" \
  368. " bne 2f\n" \
  369. " stwcx. %3,0,%1\n" \
  370. " bne- 1b\n" \
  371. "2: " __ARCH_ACQ_INSTR \
  372. : "=&r" (__tmp) \
  373. : "b" (__memp), "r" (oldval), "r" (newval) \
  374. : "cr0", "memory"); \
  375. __tmp; \
  376. })
  377. #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
  378. ({ \
  379. __typeof (*(mem)) __tmp; \
  380. __typeof (mem) __memp = (mem); \
  381. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  382. "1: lwarx %0,0,%1\n" \
  383. " cmpw %0,%2\n" \
  384. " bne 2f\n" \
  385. " stwcx. %3,0,%1\n" \
  386. " bne- 1b\n" \
  387. "2: " \
  388. : "=&r" (__tmp) \
  389. : "b" (__memp), "r" (oldval), "r" (newval) \
  390. : "cr0", "memory"); \
  391. __tmp; \
  392. })
  393. #define __arch_atomic_exchange_32_acq(mem, value) \
  394. ({ \
  395. __typeof (*mem) __val; \
  396. __asm__ __volatile__ ( \
  397. "1: lwarx %0,0,%2\n" \
  398. " stwcx. %3,0,%2\n" \
  399. " bne- 1b\n" \
  400. " " __ARCH_ACQ_INSTR \
  401. : "=&r" (__val), "=m" (*mem) \
  402. : "b" (mem), "r" (value), "m" (*mem) \
  403. : "cr0", "memory"); \
  404. __val; \
  405. })
  406. #define __arch_atomic_exchange_32_rel(mem, value) \
  407. ({ \
  408. __typeof (*mem) __val; \
  409. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  410. "1: lwarx %0,0,%2\n" \
  411. " stwcx. %3,0,%2\n" \
  412. " bne- 1b" \
  413. : "=&r" (__val), "=m" (*mem) \
  414. : "b" (mem), "r" (value), "m" (*mem) \
  415. : "cr0", "memory"); \
  416. __val; \
  417. })
  418. #define __arch_atomic_exchange_and_add_32(mem, value) \
  419. ({ \
  420. __typeof (*mem) __val, __tmp; \
  421. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  422. " add %1,%0,%4\n" \
  423. " stwcx. %1,0,%3\n" \
  424. " bne- 1b" \
  425. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  426. : "b" (mem), "r" (value), "m" (*mem) \
  427. : "cr0", "memory"); \
  428. __val; \
  429. })
  430. #define __arch_atomic_increment_val_32(mem) \
  431. ({ \
  432. __typeof (*(mem)) __val; \
  433. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  434. " addi %0,%0,1\n" \
  435. " stwcx. %0,0,%2\n" \
  436. " bne- 1b" \
  437. : "=&b" (__val), "=m" (*mem) \
  438. : "b" (mem), "m" (*mem) \
  439. : "cr0", "memory"); \
  440. __val; \
  441. })
  442. #define __arch_atomic_decrement_val_32(mem) \
  443. ({ \
  444. __typeof (*(mem)) __val; \
  445. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  446. " subi %0,%0,1\n" \
  447. " stwcx. %0,0,%2\n" \
  448. " bne- 1b" \
  449. : "=&b" (__val), "=m" (*mem) \
  450. : "b" (mem), "m" (*mem) \
  451. : "cr0", "memory"); \
  452. __val; \
  453. })
  454. #define __arch_atomic_decrement_if_positive_32(mem) \
  455. ({ int __val, __tmp; \
  456. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  457. " cmpwi 0,%0,0\n" \
  458. " addi %1,%0,-1\n" \
  459. " ble 2f\n" \
  460. " stwcx. %1,0,%3\n" \
  461. " bne- 1b\n" \
  462. "2: " __ARCH_ACQ_INSTR \
  463. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  464. : "b" (mem), "m" (*mem) \
  465. : "cr0", "memory"); \
  466. __val; \
  467. })
  468. #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
  469. ({ \
  470. __typeof (*(mem)) __result; \
  471. if (sizeof (*mem) == 4) \
  472. __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
  473. else if (sizeof (*mem) == 8) \
  474. __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
  475. else \
  476. abort (); \
  477. __result; \
  478. })
  479. #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
  480. ({ \
  481. __typeof (*(mem)) __result; \
  482. if (sizeof (*mem) == 4) \
  483. __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
  484. else if (sizeof (*mem) == 8) \
  485. __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
  486. else \
  487. abort (); \
  488. __result; \
  489. })
  490. #define atomic_exchange_acq(mem, value) \
  491. ({ \
  492. __typeof (*(mem)) __result; \
  493. if (sizeof (*mem) == 4) \
  494. __result = __arch_atomic_exchange_32_acq (mem, value); \
  495. else if (sizeof (*mem) == 8) \
  496. __result = __arch_atomic_exchange_64_acq (mem, value); \
  497. else \
  498. abort (); \
  499. __result; \
  500. })
  501. #define atomic_exchange_rel(mem, value) \
  502. ({ \
  503. __typeof (*(mem)) __result; \
  504. if (sizeof (*mem) == 4) \
  505. __result = __arch_atomic_exchange_32_rel (mem, value); \
  506. else if (sizeof (*mem) == 8) \
  507. __result = __arch_atomic_exchange_64_rel (mem, value); \
  508. else \
  509. abort (); \
  510. __result; \
  511. })
  512. #define atomic_exchange_and_add(mem, value) \
  513. ({ \
  514. __typeof (*(mem)) __result; \
  515. if (sizeof (*mem) == 4) \
  516. __result = __arch_atomic_exchange_and_add_32 (mem, value); \
  517. else if (sizeof (*mem) == 8) \
  518. __result = __arch_atomic_exchange_and_add_64 (mem, value); \
  519. else \
  520. abort (); \
  521. __result; \
  522. })
  523. #define atomic_increment_val(mem) \
  524. ({ \
  525. __typeof (*(mem)) __result; \
  526. if (sizeof (*(mem)) == 4) \
  527. __result = __arch_atomic_increment_val_32 (mem); \
  528. else if (sizeof (*(mem)) == 8) \
  529. __result = __arch_atomic_increment_val_64 (mem); \
  530. else \
  531. abort (); \
  532. __result; \
  533. })
  534. #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
  535. #define atomic_decrement_val(mem) \
  536. ({ \
  537. __typeof (*(mem)) __result; \
  538. if (sizeof (*(mem)) == 4) \
  539. __result = __arch_atomic_decrement_val_32 (mem); \
  540. else if (sizeof (*(mem)) == 8) \
  541. __result = __arch_atomic_decrement_val_64 (mem); \
  542. else \
  543. abort (); \
  544. __result; \
  545. })
  546. #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
  547. /* Decrement *MEM if it is > 0, and return the old value. */
  548. #define atomic_decrement_if_positive(mem) \
  549. ({ __typeof (*(mem)) __result; \
  550. if (sizeof (*mem) == 4) \
  551. __result = __arch_atomic_decrement_if_positive_32 (mem); \
  552. else if (sizeof (*mem) == 8) \
  553. __result = __arch_atomic_decrement_if_positive_64 (mem); \
  554. else \
  555. abort (); \
  556. __result; \
  557. })