This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH 3/3] sysdeps/arm/bits/atomic.h: Use relaxed atomics for catomic_*
- From: Will Newton <will dot newton at linaro dot org>
- To: libc-alpha at sourceware dot org
- Date: Fri, 3 Oct 2014 16:11:26 +0100
- Subject: [PATCH 3/3] sysdeps/arm/bits/atomic.h: Use relaxed atomics for catomic_*
- Authentication-results: sourceware.org; auth=none
- References: <1412349086-11473-1-git-send-email-will dot newton at linaro dot org>
Using the relaxed memory model for atomics when single-threaded allows
a reduction in the number of barriers (dmb) executed and an improvement in
single thread performance on the malloc benchtest:
Before: 259.073
After: 246.749
ChangeLog:
2014-10-03 Will Newton <will.newton@linaro.org>
* sysdeps/arm/bits/atomic.h [__GNUC_PREREQ (4, 7) &&
__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4]
(__atomic_is_single_thread): New define.
(atomic_exchange_and_add_relaxed): Likewise.
(catomic_exchange_and_add): Use relaxed memory model
if single threaded.
(atomic_and_relaxed): New define.
(catomic_and): Use relaxed memory model
if single threaded.
(atomic_or_relaxed): New define.
(catomic_or): Use relaxed memory model
if single threaded.
---
sysdeps/arm/bits/atomic.h | 55 ++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 52 insertions(+), 3 deletions(-)
diff --git a/sysdeps/arm/bits/atomic.h b/sysdeps/arm/bits/atomic.h
index be314e4..0fbd82b 100644
--- a/sysdeps/arm/bits/atomic.h
+++ b/sysdeps/arm/bits/atomic.h
@@ -52,6 +52,19 @@ void __arm_link_error (void);
a pattern to do this efficiently. */
#if __GNUC_PREREQ (4, 7) && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+# if defined IS_IN_libpthread || !defined NOT_IN_libc
+# ifdef IS_IN_libpthread
+extern int __pthread_multiple_threads attribute_hidden;
+# define __atomic_is_single_thread (__pthread_multiple_threads == 0)
+# else
+extern int __libc_multiple_threads attribute_hidden;
+# define __atomic_is_single_thread (__libc_multiple_threads == 0)
+# endif
+# else
+# define __atomic_is_single_thread 0
+# endif
+
+
/* Compare and exchange.
For all "bool" routines, we return FALSE if exchange successful. */
@@ -180,7 +193,19 @@ void __arm_link_error (void);
__atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
__ATOMIC_RELEASE)
-# define catomic_exchange_and_add atomic_exchange_and_add
+# define atomic_exchange_and_add_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_RELAXED)
+
+# define catomic_exchange_and_add(mem, value) \
+ ({ \
+ __typeof (*(mem)) __res; \
+ if (__atomic_is_single_thread) \
+ __res = atomic_exchange_and_add_relaxed (mem, value); \
+ else \
+ __res = atomic_exchange_and_add_acq (mem, value); \
+ __res; \
+ })
/* Atomically bitwise and value and return the previous value. */
@@ -200,9 +225,21 @@ void __arm_link_error (void);
__atomic_val_bysize (__arch_exchange_and_and, int, mem, value, \
__ATOMIC_ACQUIRE)
+# define atomic_and_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_and, int, mem, value, \
+ __ATOMIC_RELAXED)
+
# define atomic_and_val atomic_and
-# define catomic_and atomic_and
+# define catomic_and(mem, value) \
+ ({ \
+ __typeof (*(mem)) __res; \
+ if (__atomic_is_single_thread) \
+ __res = atomic_and_relaxed (mem, value); \
+ else \
+ __res = atomic_and (mem, value); \
+ __res; \
+ })
/* Atomically bitwise or value and return the previous value. */
@@ -222,9 +259,21 @@ void __arm_link_error (void);
__atomic_val_bysize (__arch_exchange_and_or, int, mem, value, \
__ATOMIC_ACQUIRE)
+# define atomic_or_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_or, int, mem, value, \
+ __ATOMIC_RELAXED)
+
# define atomic_or_val atomic_or
-# define catomic_or atomic_or
+# define catomic_or(mem, value) \
+ ({ \
+ __typeof (*(mem)) __res; \
+ if (__atomic_is_single_thread) \
+ __res = atomic_or_relaxed (mem, value); \
+ else \
+ __res = atomic_or (mem, value); \
+ __res; \
+ })
#elif defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
--
1.9.3