atomic.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /* Atomic operations. PowerPC Common version.
  2. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  5. The GNU C Library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public
  7. License as published by the Free Software Foundation; either
  8. version 2.1 of the License, or (at your option) any later version.
  9. The GNU C Library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public
  14. License along with the GNU C Library; if not, write to the Free
  15. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  16. 02111-1307 USA. */
  17. #include <bits/wordsize.h>
  18. #if __WORDSIZE == 64
  19. /* Atomic operations. PowerPC64 version.
  20. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  21. This file is part of the GNU C Library.
  22. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  23. The GNU C Library is free software; you can redistribute it and/or
  24. modify it under the terms of the GNU Lesser General Public
  25. License as published by the Free Software Foundation; either
  26. version 2.1 of the License, or (at your option) any later version.
  27. The GNU C Library is distributed in the hope that it will be useful,
  28. but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  30. Lesser General Public License for more details.
  31. You should have received a copy of the GNU Lesser General Public
  32. License along with the GNU C Library; if not, write to the Free
  33. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  34. 02111-1307 USA. */
  35. /* The 32-bit exchange_bool is different on powerpc64 because the subf
  36. does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  37. (a load word and zero (high 32) form) load.
  38. In powerpc64 register values are 64-bit by default, including oldval.
  39. The value in old val unknown sign extension, lwarx loads the 32-bit
  40. value as unsigned. So we explicitly clear the high 32 bits in oldval. */
  41. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  42. ({ \
  43. unsigned int __tmp, __tmp2; \
  44. __asm__ __volatile__ (" clrldi %1,%1,32\n" \
  45. "1: lwarx %0,0,%2\n" \
  46. " subf. %0,%1,%0\n" \
  47. " bne 2f\n" \
  48. " stwcx. %4,0,%2\n" \
  49. " bne- 1b\n" \
  50. "2: " __ARCH_ACQ_INSTR \
  51. : "=&r" (__tmp), "=r" (__tmp2) \
  52. : "b" (mem), "1" (oldval), "r" (newval) \
  53. : "cr0", "memory"); \
  54. __tmp != 0; \
  55. })
  56. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  57. ({ \
  58. unsigned int __tmp, __tmp2; \
  59. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  60. " clrldi %1,%1,32\n" \
  61. "1: lwarx %0,0,%2\n" \
  62. " subf. %0,%1,%0\n" \
  63. " bne 2f\n" \
  64. " stwcx. %4,0,%2\n" \
  65. " bne- 1b\n" \
  66. "2: " \
  67. : "=&r" (__tmp), "=r" (__tmp2) \
  68. : "b" (mem), "1" (oldval), "r" (newval) \
  69. : "cr0", "memory"); \
  70. __tmp != 0; \
  71. })
  72. /*
  73. * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
  74. * and Store doubleword conditional indexed (stdcx) instructions. So here
  75. * we define the 64-bit forms.
  76. */
  77. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  78. ({ \
  79. unsigned long __tmp; \
  80. __asm__ __volatile__ ( \
  81. "1: ldarx %0,0,%1\n" \
  82. " subf. %0,%2,%0\n" \
  83. " bne 2f\n" \
  84. " stdcx. %3,0,%1\n" \
  85. " bne- 1b\n" \
  86. "2: " __ARCH_ACQ_INSTR \
  87. : "=&r" (__tmp) \
  88. : "b" (mem), "r" (oldval), "r" (newval) \
  89. : "cr0", "memory"); \
  90. __tmp != 0; \
  91. })
  92. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  93. ({ \
  94. unsigned long __tmp; \
  95. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  96. "1: ldarx %0,0,%1\n" \
  97. " subf. %0,%2,%0\n" \
  98. " bne 2f\n" \
  99. " stdcx. %3,0,%1\n" \
  100. " bne- 1b\n" \
  101. "2: " \
  102. : "=&r" (__tmp) \
  103. : "b" (mem), "r" (oldval), "r" (newval) \
  104. : "cr0", "memory"); \
  105. __tmp != 0; \
  106. })
  107. #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  108. ({ \
  109. __typeof (*(mem)) __tmp; \
  110. __typeof (mem) __memp = (mem); \
  111. __asm__ __volatile__ ( \
  112. "1: ldarx %0,0,%1\n" \
  113. " cmpd %0,%2\n" \
  114. " bne 2f\n" \
  115. " stdcx. %3,0,%1\n" \
  116. " bne- 1b\n" \
  117. "2: " __ARCH_ACQ_INSTR \
  118. : "=&r" (__tmp) \
  119. : "b" (__memp), "r" (oldval), "r" (newval) \
  120. : "cr0", "memory"); \
  121. __tmp; \
  122. })
  123. #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  124. ({ \
  125. __typeof (*(mem)) __tmp; \
  126. __typeof (mem) __memp = (mem); \
  127. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  128. "1: ldarx %0,0,%1\n" \
  129. " cmpd %0,%2\n" \
  130. " bne 2f\n" \
  131. " stdcx. %3,0,%1\n" \
  132. " bne- 1b\n" \
  133. "2: " \
  134. : "=&r" (__tmp) \
  135. : "b" (__memp), "r" (oldval), "r" (newval) \
  136. : "cr0", "memory"); \
  137. __tmp; \
  138. })
  139. # define __arch_atomic_exchange_64_acq(mem, value) \
  140. ({ \
  141. __typeof (*mem) __val; \
  142. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  143. "1: ldarx %0,0,%2\n" \
  144. " stdcx. %3,0,%2\n" \
  145. " bne- 1b\n" \
  146. " " __ARCH_ACQ_INSTR \
  147. : "=&r" (__val), "=m" (*mem) \
  148. : "b" (mem), "r" (value), "m" (*mem) \
  149. : "cr0", "memory"); \
  150. __val; \
  151. })
  152. # define __arch_atomic_exchange_64_rel(mem, value) \
  153. ({ \
  154. __typeof (*mem) __val; \
  155. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  156. "1: ldarx %0,0,%2\n" \
  157. " stdcx. %3,0,%2\n" \
  158. " bne- 1b" \
  159. : "=&r" (__val), "=m" (*mem) \
  160. : "b" (mem), "r" (value), "m" (*mem) \
  161. : "cr0", "memory"); \
  162. __val; \
  163. })
  164. # define __arch_atomic_exchange_and_add_64(mem, value) \
  165. ({ \
  166. __typeof (*mem) __val, __tmp; \
  167. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  168. " add %1,%0,%4\n" \
  169. " stdcx. %1,0,%3\n" \
  170. " bne- 1b" \
  171. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  172. : "b" (mem), "r" (value), "m" (*mem) \
  173. : "cr0", "memory"); \
  174. __val; \
  175. })
  176. # define __arch_atomic_increment_val_64(mem) \
  177. ({ \
  178. __typeof (*(mem)) __val; \
  179. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  180. " addi %0,%0,1\n" \
  181. " stdcx. %0,0,%2\n" \
  182. " bne- 1b" \
  183. : "=&b" (__val), "=m" (*mem) \
  184. : "b" (mem), "m" (*mem) \
  185. : "cr0", "memory"); \
  186. __val; \
  187. })
  188. # define __arch_atomic_decrement_val_64(mem) \
  189. ({ \
  190. __typeof (*(mem)) __val; \
  191. __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \
  192. " subi %0,%0,1\n" \
  193. " stdcx. %0,0,%2\n" \
  194. " bne- 1b" \
  195. : "=&b" (__val), "=m" (*mem) \
  196. : "b" (mem), "m" (*mem) \
  197. : "cr0", "memory"); \
  198. __val; \
  199. })
  200. # define __arch_atomic_decrement_if_positive_64(mem) \
  201. ({ int __val, __tmp; \
  202. __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \
  203. " cmpdi 0,%0,0\n" \
  204. " addi %1,%0,-1\n" \
  205. " ble 2f\n" \
  206. " stdcx. %1,0,%3\n" \
  207. " bne- 1b\n" \
  208. "2: " __ARCH_ACQ_INSTR \
  209. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  210. : "b" (mem), "m" (*mem) \
  211. : "cr0", "memory"); \
  212. __val; \
  213. })
  214. /*
  215. * All powerpc64 processors support the new "light weight" sync (lwsync).
  216. */
  217. # define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
  218. /*
  219. * "light weight" sync can also be used for the release barrier.
  220. */
  221. # ifndef UP
  222. # define __ARCH_REL_INSTR "lwsync"
  223. # endif
  224. #else
  225. /* Atomic operations. PowerPC32 version.
  226. Copyright (C) 2003, 2004 Free Software Foundation, Inc.
  227. This file is part of the GNU C Library.
  228. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
  229. The GNU C Library is free software; you can redistribute it and/or
  230. modify it under the terms of the GNU Lesser General Public
  231. License as published by the Free Software Foundation; either
  232. version 2.1 of the License, or (at your option) any later version.
  233. The GNU C Library is distributed in the hope that it will be useful,
  234. but WITHOUT ANY WARRANTY; without even the implied warranty of
  235. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  236. Lesser General Public License for more details.
  237. You should have received a copy of the GNU Lesser General Public
  238. License along with the GNU C Library; if not, write to the Free
  239. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  240. 02111-1307 USA. */
  241. /*
  242. * The 32-bit exchange_bool is different on powerpc64 because the subf
  243. * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  244. * (a load word and zero (high 32) form). So powerpc64 has a slightly
  245. * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
  246. */
  247. # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
  248. ({ \
  249. unsigned int __tmp; \
  250. __asm__ __volatile__ ( \
  251. "1: lwarx %0,0,%1\n" \
  252. " subf. %0,%2,%0\n" \
  253. " bne 2f\n" \
  254. " stwcx. %3,0,%1\n" \
  255. " bne- 1b\n" \
  256. "2: " __ARCH_ACQ_INSTR \
  257. : "=&r" (__tmp) \
  258. : "b" (mem), "r" (oldval), "r" (newval) \
  259. : "cr0", "memory"); \
  260. __tmp != 0; \
  261. })
  262. # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
  263. ({ \
  264. unsigned int __tmp; \
  265. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  266. "1: lwarx %0,0,%1\n" \
  267. " subf. %0,%2,%0\n" \
  268. " bne 2f\n" \
  269. " stwcx. %3,0,%1\n" \
  270. " bne- 1b\n" \
  271. "2: " \
  272. : "=&r" (__tmp) \
  273. : "b" (mem), "r" (oldval), "r" (newval) \
  274. : "cr0", "memory"); \
  275. __tmp != 0; \
  276. })
  277. /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
  278. load and reserve (ldarx) and store conditional (stdcx.) instructions.
  279. So for powerpc32 we stub out the 64-bit forms. */
  280. # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
  281. (abort (), 0)
  282. # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
  283. (abort (), (__typeof (*mem)) 0)
  284. # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
  285. (abort (), 0)
  286. # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
  287. (abort (), (__typeof (*mem)) 0)
  288. # define __arch_atomic_exchange_64_acq(mem, value) \
  289. ({ abort (); (*mem) = (value); })
  290. # define __arch_atomic_exchange_64_rel(mem, value) \
  291. ({ abort (); (*mem) = (value); })
  292. # define __arch_atomic_exchange_and_add_64(mem, value) \
  293. ({ abort (); (*mem) = (value); })
  294. # define __arch_atomic_increment_val_64(mem) \
  295. ({ abort (); (*mem)++; })
  296. # define __arch_atomic_decrement_val_64(mem) \
  297. ({ abort (); (*mem)--; })
  298. # define __arch_atomic_decrement_if_positive_64(mem) \
  299. ({ abort (); (*mem)--; })
  300. /*
  301. * Older powerpc32 processors don't support the new "light weight"
  302. * sync (lwsync). So the only safe option is to use normal sync
  303. * for all powerpc32 applications.
  304. */
  305. # define atomic_read_barrier() __asm__ ("sync" ::: "memory")
  306. #endif
  307. #include <stdint.h>
  308. typedef int32_t atomic32_t;
  309. typedef uint32_t uatomic32_t;
  310. typedef int_fast32_t atomic_fast32_t;
  311. typedef uint_fast32_t uatomic_fast32_t;
  312. typedef int64_t atomic64_t;
  313. typedef uint64_t uatomic64_t;
  314. typedef int_fast64_t atomic_fast64_t;
  315. typedef uint_fast64_t uatomic_fast64_t;
  316. typedef intptr_t atomicptr_t;
  317. typedef uintptr_t uatomicptr_t;
  318. typedef intmax_t atomic_max_t;
  319. typedef uintmax_t uatomic_max_t;
  320. /*
  321. * Powerpc does not have byte and halfword forms of load and reserve and
  322. * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
  323. */
  324. #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
  325. (abort (), 0)
  326. #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
  327. (abort (), 0)
  328. #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
  329. (abort (), 0)
  330. #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
  331. (abort (), 0)
  332. #ifdef UP
  333. # define __ARCH_ACQ_INSTR ""
  334. # define __ARCH_REL_INSTR ""
  335. #else
  336. # define __ARCH_ACQ_INSTR "isync"
  337. # ifndef __ARCH_REL_INSTR
  338. # define __ARCH_REL_INSTR "sync"
  339. # endif
  340. #endif
  341. #define atomic_full_barrier() __asm__ ("sync" ::: "memory")
  342. #define atomic_write_barrier() __asm__ ("eieio" ::: "memory")
  343. #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
  344. ({ \
  345. __typeof (*(mem)) __tmp; \
  346. __typeof (mem) __memp = (mem); \
  347. __asm__ __volatile__ ( \
  348. "1: lwarx %0,0,%1\n" \
  349. " cmpw %0,%2\n" \
  350. " bne 2f\n" \
  351. " stwcx. %3,0,%1\n" \
  352. " bne- 1b\n" \
  353. "2: " __ARCH_ACQ_INSTR \
  354. : "=&r" (__tmp) \
  355. : "b" (__memp), "r" (oldval), "r" (newval) \
  356. : "cr0", "memory"); \
  357. __tmp; \
  358. })
  359. #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
  360. ({ \
  361. __typeof (*(mem)) __tmp; \
  362. __typeof (mem) __memp = (mem); \
  363. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  364. "1: lwarx %0,0,%1\n" \
  365. " cmpw %0,%2\n" \
  366. " bne 2f\n" \
  367. " stwcx. %3,0,%1\n" \
  368. " bne- 1b\n" \
  369. "2: " \
  370. : "=&r" (__tmp) \
  371. : "b" (__memp), "r" (oldval), "r" (newval) \
  372. : "cr0", "memory"); \
  373. __tmp; \
  374. })
  375. #define __arch_atomic_exchange_32_acq(mem, value) \
  376. ({ \
  377. __typeof (*mem) __val; \
  378. __asm__ __volatile__ ( \
  379. "1: lwarx %0,0,%2\n" \
  380. " stwcx. %3,0,%2\n" \
  381. " bne- 1b\n" \
  382. " " __ARCH_ACQ_INSTR \
  383. : "=&r" (__val), "=m" (*mem) \
  384. : "b" (mem), "r" (value), "m" (*mem) \
  385. : "cr0", "memory"); \
  386. __val; \
  387. })
  388. #define __arch_atomic_exchange_32_rel(mem, value) \
  389. ({ \
  390. __typeof (*mem) __val; \
  391. __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \
  392. "1: lwarx %0,0,%2\n" \
  393. " stwcx. %3,0,%2\n" \
  394. " bne- 1b" \
  395. : "=&r" (__val), "=m" (*mem) \
  396. : "b" (mem), "r" (value), "m" (*mem) \
  397. : "cr0", "memory"); \
  398. __val; \
  399. })
  400. #define __arch_atomic_exchange_and_add_32(mem, value) \
  401. ({ \
  402. __typeof (*mem) __val, __tmp; \
  403. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  404. " add %1,%0,%4\n" \
  405. " stwcx. %1,0,%3\n" \
  406. " bne- 1b" \
  407. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  408. : "b" (mem), "r" (value), "m" (*mem) \
  409. : "cr0", "memory"); \
  410. __val; \
  411. })
  412. #define __arch_atomic_increment_val_32(mem) \
  413. ({ \
  414. __typeof (*(mem)) __val; \
  415. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  416. " addi %0,%0,1\n" \
  417. " stwcx. %0,0,%2\n" \
  418. " bne- 1b" \
  419. : "=&b" (__val), "=m" (*mem) \
  420. : "b" (mem), "m" (*mem) \
  421. : "cr0", "memory"); \
  422. __val; \
  423. })
  424. #define __arch_atomic_decrement_val_32(mem) \
  425. ({ \
  426. __typeof (*(mem)) __val; \
  427. __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \
  428. " subi %0,%0,1\n" \
  429. " stwcx. %0,0,%2\n" \
  430. " bne- 1b" \
  431. : "=&b" (__val), "=m" (*mem) \
  432. : "b" (mem), "m" (*mem) \
  433. : "cr0", "memory"); \
  434. __val; \
  435. })
  436. #define __arch_atomic_decrement_if_positive_32(mem) \
  437. ({ int __val, __tmp; \
  438. __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \
  439. " cmpwi 0,%0,0\n" \
  440. " addi %1,%0,-1\n" \
  441. " ble 2f\n" \
  442. " stwcx. %1,0,%3\n" \
  443. " bne- 1b\n" \
  444. "2: " __ARCH_ACQ_INSTR \
  445. : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
  446. : "b" (mem), "m" (*mem) \
  447. : "cr0", "memory"); \
  448. __val; \
  449. })
  450. #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
  451. ({ \
  452. __typeof (*(mem)) __result; \
  453. if (sizeof (*mem) == 4) \
  454. __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
  455. else if (sizeof (*mem) == 8) \
  456. __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
  457. else \
  458. abort (); \
  459. __result; \
  460. })
  461. #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
  462. ({ \
  463. __typeof (*(mem)) __result; \
  464. if (sizeof (*mem) == 4) \
  465. __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
  466. else if (sizeof (*mem) == 8) \
  467. __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
  468. else \
  469. abort (); \
  470. __result; \
  471. })
  472. #define atomic_exchange_acq(mem, value) \
  473. ({ \
  474. __typeof (*(mem)) __result; \
  475. if (sizeof (*mem) == 4) \
  476. __result = __arch_atomic_exchange_32_acq (mem, value); \
  477. else if (sizeof (*mem) == 8) \
  478. __result = __arch_atomic_exchange_64_acq (mem, value); \
  479. else \
  480. abort (); \
  481. __result; \
  482. })
  483. #define atomic_exchange_rel(mem, value) \
  484. ({ \
  485. __typeof (*(mem)) __result; \
  486. if (sizeof (*mem) == 4) \
  487. __result = __arch_atomic_exchange_32_rel (mem, value); \
  488. else if (sizeof (*mem) == 8) \
  489. __result = __arch_atomic_exchange_64_rel (mem, value); \
  490. else \
  491. abort (); \
  492. __result; \
  493. })
  494. #define atomic_exchange_and_add(mem, value) \
  495. ({ \
  496. __typeof (*(mem)) __result; \
  497. if (sizeof (*mem) == 4) \
  498. __result = __arch_atomic_exchange_and_add_32 (mem, value); \
  499. else if (sizeof (*mem) == 8) \
  500. __result = __arch_atomic_exchange_and_add_64 (mem, value); \
  501. else \
  502. abort (); \
  503. __result; \
  504. })
  505. #define atomic_increment_val(mem) \
  506. ({ \
  507. __typeof (*(mem)) __result; \
  508. if (sizeof (*(mem)) == 4) \
  509. __result = __arch_atomic_increment_val_32 (mem); \
  510. else if (sizeof (*(mem)) == 8) \
  511. __result = __arch_atomic_increment_val_64 (mem); \
  512. else \
  513. abort (); \
  514. __result; \
  515. })
  516. #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
  517. #define atomic_decrement_val(mem) \
  518. ({ \
  519. __typeof (*(mem)) __result; \
  520. if (sizeof (*(mem)) == 4) \
  521. __result = __arch_atomic_decrement_val_32 (mem); \
  522. else if (sizeof (*(mem)) == 8) \
  523. __result = __arch_atomic_decrement_val_64 (mem); \
  524. else \
  525. abort (); \
  526. __result; \
  527. })
  528. #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
  529. /* Decrement *MEM if it is > 0, and return the old value. */
  530. #define atomic_decrement_if_positive(mem) \
  531. ({ __typeof (*(mem)) __result; \
  532. if (sizeof (*mem) == 4) \
  533. __result = __arch_atomic_decrement_if_positive_32 (mem); \
  534. else if (sizeof (*mem) == 8) \
  535. __result = __arch_atomic_decrement_if_positive_64 (mem); \
  536. else \
  537. abort (); \
  538. __result; \
  539. })