Просмотр исходного кода

arc: add acq/rel variants for atomic cmpxchg/xchg

Add acquire/release variants for atomic functions cmpxchg/xchg and
provide a memory barrier after/before exchange. For cmpxchg use compiler
builtins. For xchg functions add memory barrier explicitly.

These barriers are required to keep memory consistency of ARCv3 CPU
cores in SMP.

For ARC700 barriers are not required and the compiler doesn't provide
_atomic_compare_exchange*, use current asm insertion without
acquire/release variants for ARC700.

Signed-off-by: Pavel Kozlov <pavel.kozlov@synopsys.com>
Pavel Kozlov 6 месяцев назад
Родитель
Сommit
dfa72192f1
1 измененных файлов с 74 добавлено и 2 удалено
  1. 74 2
      libc/sysdeps/linux/arc/bits/atomic.h

+ 74 - 2
libc/sysdeps/linux/arc/bits/atomic.h

@@ -26,8 +26,10 @@ void __arc_link_error (void);
 
 #ifdef __A7__
 #define atomic_full_barrier() __asm__ __volatile__("": : :"memory")
+#define ARC_BARRIER_INSTR 	""
 #else
 #define atomic_full_barrier() __asm__ __volatile__("dmb 3": : :"memory")
+#define ARC_BARRIER_INSTR 	"dmb 3"
 #endif
 
 /* Atomic compare and exchange. */
@@ -38,11 +40,12 @@ void __arc_link_error (void);
 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
   ({ __arc_link_error (); oldval; })
 
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval)	\
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
   ({ __arc_link_error (); oldval; })
 
 #ifdef __CONFIG_ARC_HAS_ATOMICS__
 
+#ifdef __A7__
 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)     \
   ({									\
 	__typeof(oldval) prev;						\
@@ -60,8 +63,55 @@ void __arc_link_error (void);
 									\
 	prev;								\
   })
+#else /* !__A7__ */
+#define USE_ATOMIC_COMPILER_BUILTINS 1
 
-#else
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)	\
+  ({									\
+    __typeof(*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n(mem, (void *) &__oldval, newval, 0,	\
+                                 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);	\
+    __oldval;								\
+  })
+
+#define __arch_compare_and_exchange_val_8_rel(mem, newval, oldval) \
+  ({ __arc_link_error (); oldval; })
+
+#define __arch_compare_and_exchange_val_16_rel(mem, newval, oldval) \
+  ({ __arc_link_error (); oldval; })
+
+#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
+  ({ __arc_link_error (); oldval; })
+
+#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval)	\
+  ({									\
+    __typeof(*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n(mem, (void *) &__oldval, newval, 0,	\
+                                 __ATOMIC_RELEASE, __ATOMIC_RELAXED);	\
+    __oldval;								\
+  })
+
+/* Compare and exchange with "acquire" semantics, ie barrier after */
+#define atomic_compare_and_exchange_val_acq(mem, new, old)		\
+  __atomic_val_bysize(__arch_compare_and_exchange_val, acq,		\
+                       mem, new, old)
+
+/* Compare and exchange with "release" semantics, ie barrier before */
+#define atomic_compare_and_exchange_val_rel(mem, new, old)		\
+  __atomic_val_bysize(__arch_compare_and_exchange_val, rel,		\
+                       mem, new, old)
+
+/* Explicitly define here to use release semantics*/
+#define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
+  ({									\
+     __typeof (oldval) __atg3_old = (oldval);				\
+     atomic_compare_and_exchange_val_rel (mem, newval, __atg3_old)	\
+       != __atg3_old;							\
+  })
+
+#endif /* __A7__ */
+
+#else /* !__CONFIG_ARC_HAS_ATOMICS__ */
 
 #ifndef __NR_arc_usr_cmpxchg
 #error "__NR_arc_usr_cmpxchg missing: Please upgrade to kernel 4.9+ headers"
@@ -101,6 +151,21 @@ void __arc_link_error (void);
 	__typeof__(*(mem)) val = newval;				\
 									\
 	__asm__ __volatile__(						\
+	"ex %0, [%1]\n"							\
+	ARC_BARRIER_INSTR						\
+	: "+r" (val)							\
+	: "r" (mem)							\
+	: "memory" );							\
+									\
+	val;								\
+  })
+
+#define __arch_exchange_32_rel(mem, newval)				\
+  ({									\
+	__typeof__(*(mem)) val = newval;				\
+									\
+	__asm__ __volatile__(						\
+	ARC_BARRIER_INSTR"\n"						\
 	"ex %0, [%1]"							\
 	: "+r" (val)							\
 	: "r" (mem)							\
@@ -115,3 +180,10 @@ void __arc_link_error (void);
 		abort();						\
 	__arch_exchange_32_acq(mem, newval);				\
   })
+
+#define atomic_exchange_rel(mem, newval)				\
+  ({									\
+	if (sizeof(*(mem)) != 4)					\
+		abort();						\
+	__arch_exchange_32_rel(mem, newval);				\
+  })