summaryrefslogtreecommitdiff
path: root/js/src/jit
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2022-04-14 09:13:29 -0500
committertrav90 <travawine@palemoon.org>2022-04-15 08:30:16 -0500
commit7abed0e35276b50c01c65501d22890d933b3bc56 (patch)
tree30dca392848b089d2eea4fce7689119b8a2af1b2 /js/src/jit
parentd430c6398bf6bd1122f401aec7937a2ad0df99a5 (diff)
downloaduxp-7abed0e35276b50c01c65501d22890d933b3bc56.tar.gz
Issue #1818 - Part 2: Remove code for implementation of atomics with __sync
All supported compilers support using C++11 <atomic> natively, so implementing this using GCC's old __sync functions is no longer necessary.
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/arm/AtomicOperations-arm.h76
-rw-r--r--js/src/jit/mips-shared/AtomicOperations-mips-shared.h76
-rw-r--r--js/src/jit/none/AtomicOperations-ppc.h76
-rw-r--r--js/src/jit/none/AtomicOperations-sparc.h76
-rw-r--r--js/src/jit/x86-shared/AtomicOperations-x86-shared.h108
5 files changed, 0 insertions, 412 deletions
diff --git a/js/src/jit/arm/AtomicOperations-arm.h b/js/src/jit/arm/AtomicOperations-arm.h
index e10b348c0a..f9ca690c07 100644
--- a/js/src/jit/arm/AtomicOperations-arm.h
+++ b/js/src/jit/arm/AtomicOperations-arm.h
@@ -12,18 +12,6 @@
#if defined(__clang__) || defined(__GNUC__)
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>. Where that
-// isn't available, we use GCC's older __sync functions instead.
-//
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions. This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
-
-//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
inline bool
js::jit::AtomicOperations::isLockfree8()
{
@@ -37,24 +25,16 @@ js::jit::AtomicOperations::isLockfree8()
//
// For now, make the JIT defer to the C++ compiler when we know what
// the C++ compiler will do, otherwise assume a lock is needed.
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
return HasLDSTREXBHD() && __atomic_always_lock_free(sizeof(int64_t), 0);
-# else
- return false;
-# endif
}
inline void
js::jit::AtomicOperations::fenceSeqCst()
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
-# else
__atomic_thread_fence(__ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -62,14 +42,8 @@ inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- T v = *addr;
- __sync_synchronize();
-# else
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-# endif
return v;
}
@@ -78,13 +52,7 @@ inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- *addr = val;
- __sync_synchronize();
-# else
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -92,18 +60,9 @@ inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- T v;
- __sync_synchronize();
- do {
- v = *addr;
- } while (__sync_val_compare_and_swap(addr, v, val) != v);
- return v;
-# else
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
-# endif
}
template<typename T>
@@ -111,12 +70,8 @@ inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_val_compare_and_swap(addr, oldval, newval);
-# else
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
-# endif
}
template<typename T>
@@ -124,11 +79,7 @@ inline T
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_add(addr, val);
-# else
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -136,11 +87,7 @@ inline T
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_sub(addr, val);
-# else
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -148,11 +95,7 @@ inline T
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_and(addr, val);
-# else
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -160,11 +103,7 @@ inline T
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_or(addr, val);
-# else
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -172,11 +111,7 @@ inline T
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_xor(addr, val);
-# else
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -209,17 +144,12 @@ template<size_t nbytes>
inline void
js::jit::RegionLock::acquire(void* addr)
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
- ;
-# else
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
zero = 0;
continue;
}
-# endif
}
template<size_t nbytes>
@@ -227,16 +157,10 @@ inline void
js::jit::RegionLock::release(void* addr)
{
MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_sub_and_fetch(&spinlock, 1);
-# else
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
}
-# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
# error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
diff --git a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
index dee9302300..7441a27e87 100644
--- a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -13,22 +13,9 @@
#if defined(__clang__) || defined(__GNUC__)
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>. Where that
-// isn't available, we use GCC's older __sync functions instead.
-//
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions. This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
-
-//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
inline bool
js::jit::AtomicOperations::isLockfree8()
{
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
@@ -36,19 +23,12 @@ js::jit::AtomicOperations::isLockfree8()
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
# endif
return true;
-# else
- return false;
-# endif
}
inline void
js::jit::AtomicOperations::fenceSeqCst()
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
-# else
__atomic_thread_fence(__ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -56,14 +36,8 @@ inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- T v = *addr;
- __sync_synchronize();
-# else
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-# endif
return v;
}
@@ -72,13 +46,7 @@ inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- *addr = val;
- __sync_synchronize();
-# else
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -86,12 +54,8 @@ inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_val_compare_and_swap(addr, oldval, newval);
-# else
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
-# endif
}
template<typename T>
@@ -99,11 +63,7 @@ inline T
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_add(addr, val);
-# else
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -111,11 +71,7 @@ inline T
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_sub(addr, val);
-# else
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -123,11 +79,7 @@ inline T
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_and(addr, val);
-# else
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -135,11 +87,7 @@ inline T
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_or(addr, val);
-# else
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -147,11 +95,7 @@ inline T
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_xor(addr, val);
-# else
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -185,35 +129,21 @@ inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- T v;
- __sync_synchronize();
- do {
- v = *addr;
- } while (__sync_val_compare_and_swap(addr, v, val) != v);
- return v;
-# else
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
-# endif
}
template<size_t nbytes>
inline void
js::jit::RegionLock::acquire(void* addr)
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
- ;
-# else
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
zero = 0;
continue;
}
-# endif
}
template<size_t nbytes>
@@ -221,16 +151,10 @@ inline void
js::jit::RegionLock::release(void* addr)
{
MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_sub_and_fetch(&spinlock, 1);
-# else
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
}
-# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
# error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
diff --git a/js/src/jit/none/AtomicOperations-ppc.h b/js/src/jit/none/AtomicOperations-ppc.h
index 182de9f8ac..465c0cf41b 100644
--- a/js/src/jit/none/AtomicOperations-ppc.h
+++ b/js/src/jit/none/AtomicOperations-ppc.h
@@ -13,22 +13,9 @@
#if defined(__clang__) || defined(__GNUC__)
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>. Where that
-// isn't available, we use GCC's older __sync functions instead.
-//
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions. This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
-
-//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
inline bool
js::jit::AtomicOperations::isLockfree8()
{
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
@@ -37,19 +24,12 @@ js::jit::AtomicOperations::isLockfree8()
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
# endif
return true;
-# else
- return false;
-# endif
}
inline void
js::jit::AtomicOperations::fenceSeqCst()
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
-# else
__atomic_thread_fence(__ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -57,14 +37,8 @@ inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- T v = *addr;
- __sync_synchronize();
-# else
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-# endif
return v;
}
@@ -73,13 +47,7 @@ inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- *addr = val;
- __sync_synchronize();
-# else
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -87,12 +55,8 @@ inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_val_compare_and_swap(addr, oldval, newval);
-# else
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
-# endif
}
template<typename T>
@@ -100,11 +64,7 @@ inline T
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_add(addr, val);
-# else
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -112,11 +72,7 @@ inline T
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_sub(addr, val);
-# else
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -124,11 +80,7 @@ inline T
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_and(addr, val);
-# else
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -136,11 +88,7 @@ inline T
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_or(addr, val);
-# else
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -148,11 +96,7 @@ inline T
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_xor(addr, val);
-# else
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -186,35 +130,21 @@ inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- T v;
- __sync_synchronize();
- do {
- v = *addr;
- } while (__sync_val_compare_and_swap(addr, v, val) != v);
- return v;
-# else
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
-# endif
}
template<size_t nbytes>
inline void
js::jit::RegionLock::acquire(void* addr)
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
- ;
-# else
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
zero = 0;
continue;
}
-# endif
}
template<size_t nbytes>
@@ -222,16 +152,10 @@ inline void
js::jit::RegionLock::release(void* addr)
{
MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_sub_and_fetch(&spinlock, 1);
-# else
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
}
-# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
# error "Either disable JS shared memory, use GCC or Clang, or add code here"
diff --git a/js/src/jit/none/AtomicOperations-sparc.h b/js/src/jit/none/AtomicOperations-sparc.h
index ca689da7e2..c962a524e7 100644
--- a/js/src/jit/none/AtomicOperations-sparc.h
+++ b/js/src/jit/none/AtomicOperations-sparc.h
@@ -13,22 +13,9 @@
#if defined(__clang__) || defined(__GNUC__)
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>. Where that
-// isn't available, we use GCC's older __sync functions instead.
-//
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions. This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
-
-//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
inline bool
js::jit::AtomicOperations::isLockfree8()
{
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
@@ -36,19 +23,12 @@ js::jit::AtomicOperations::isLockfree8()
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
# endif
return true;
-# else
- return false;
-# endif
}
inline void
js::jit::AtomicOperations::fenceSeqCst()
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
-# else
__atomic_thread_fence(__ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -56,14 +36,8 @@ inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- T v = *addr;
- __sync_synchronize();
-# else
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-# endif
return v;
}
@@ -72,13 +46,7 @@ inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
- *addr = val;
- __sync_synchronize();
-# else
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -86,12 +54,8 @@ inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_val_compare_and_swap(addr, oldval, newval);
-# else
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
-# endif
}
template<typename T>
@@ -101,11 +65,7 @@ js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
#if !defined( __LP64__)
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
#endif
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_add(addr, val);
-# else
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -115,11 +75,7 @@ js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
#if !defined( __LP64__)
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
#endif
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_sub(addr, val);
-# else
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -129,11 +85,7 @@ js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
#if !defined( __LP64__)
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
#endif
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_and(addr, val);
-# else
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -143,11 +95,7 @@ js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
#if !defined( __LP64__)
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
#endif
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_or(addr, val);
-# else
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -157,11 +105,7 @@ js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
#if !defined( __LP64__)
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
#endif
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_xor(addr, val);
-# else
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -195,35 +139,21 @@ inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- T v;
- __sync_synchronize();
- do {
- v = *addr;
- } while (__sync_val_compare_and_swap(addr, v, val) != v);
- return v;
-# else
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
-# endif
}
template<size_t nbytes>
inline void
js::jit::RegionLock::acquire(void* addr)
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
- ;
-# else
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
zero = 0;
continue;
}
-# endif
}
template<size_t nbytes>
@@ -231,16 +161,10 @@ inline void
js::jit::RegionLock::release(void* addr)
{
MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_sub_and_fetch(&spinlock, 1);
-# else
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
}
-# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
# error "Either disable JS shared memory, use GCC or Clang, or add code here"
diff --git a/js/src/jit/x86-shared/AtomicOperations-x86-shared.h b/js/src/jit/x86-shared/AtomicOperations-x86-shared.h
index 1230c7febd..4eee00a3de 100644
--- a/js/src/jit/x86-shared/AtomicOperations-x86-shared.h
+++ b/js/src/jit/x86-shared/AtomicOperations-x86-shared.h
@@ -45,18 +45,6 @@
#if defined(__clang__) || defined(__GNUC__)
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>. Where that
-// isn't available, we use GCC's older __sync functions instead.
-//
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions. This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
-
-// #define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-
// Lock-free 8-byte atomics are assumed on x86 but must be disabled in
// corner cases, see comments below and in isLockfree8().
@@ -77,15 +65,11 @@
inline bool
js::jit::AtomicOperations::isLockfree8()
{
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
-# endif
# ifdef LOCKFREE8
-# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
-# endif
return true;
# else
return false;
@@ -95,11 +79,7 @@ js::jit::AtomicOperations::isLockfree8()
inline void
js::jit::AtomicOperations::fenceSeqCst()
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_synchronize();
-# else
__atomic_thread_fence(__ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -107,16 +87,8 @@ inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- // Inhibit compiler reordering with a volatile load. The x86 does
- // not reorder loads with respect to subsequent loads or stores
- // and no ordering barrier is required here. See more elaborate
- // comments in storeSeqCst.
- T v = *static_cast<T volatile*>(addr);
-# else
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-# endif
return v;
}
@@ -141,43 +113,7 @@ inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- // Inhibit compiler reordering with a volatile store. The x86 may
- // reorder a store with respect to a subsequent load from a
- // different location, hence there is an ordering barrier here to
- // prevent that.
- //
- // By way of background, look to eg
- // http://bartoszmilewski.com/2008/11/05/who-ordered-memory-fences-on-an-x86/
- //
- // Consider:
- //
- // uint8_t x = 0, y = 0; // to start
- //
- // thread1:
- // sx: AtomicOperations::store(&x, 1);
- // gy: uint8_t obs1 = AtomicOperations::loadSeqCst(&y);
- //
- // thread2:
- // sy: AtomicOperations::store(&y, 1);
- // gx: uint8_t obs2 = AtomicOperations::loadSeqCst(&x);
- //
- // Sequential consistency requires a total global ordering of
- // operations: sx-gy-sy-gx, sx-sy-gx-gy, sx-sy-gy-gx, sy-gx-sx-gy,
- // sy-sx-gy-gx, or sy-sx-gx-gy. In every ordering at least one of
- // sx-before-gx or sy-before-gy happens, so *at least one* of
- // obs1/obs2 is 1.
- //
- // If AtomicOperations::{load,store}SeqCst were just volatile
- // {load,store}, x86 could reorder gx/gy before each thread's
- // prior load. That would permit gx-gy-sx-sy: both loads would be
- // 0! Thus after a volatile store we must synchronize to ensure
- // the store happens before the load.
- *static_cast<T volatile*>(addr) = val;
- __sync_synchronize();
-# else
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-# endif
}
# ifndef LOCKFREE8
@@ -201,19 +137,9 @@ inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- T v;
- do {
- // Here I assume the compiler will not hoist the load. It
- // shouldn't, because the CAS could affect* addr.
- v = *addr;
- } while (!__sync_bool_compare_and_swap(addr, v, val));
- return v;
-# else
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
-# endif
}
# ifndef LOCKFREE8
@@ -237,12 +163,8 @@ inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_val_compare_and_swap(addr, oldval, newval);
-# else
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
-# endif
}
# ifndef LOCKFREE8
@@ -266,11 +188,7 @@ inline T
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_add(addr, val);
-# else
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -278,11 +196,7 @@ inline T
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_sub(addr, val);
-# else
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -290,11 +204,7 @@ inline T
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_and(addr, val);
-# else
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -302,11 +212,7 @@ inline T
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_or(addr, val);
-# else
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -314,11 +220,7 @@ inline T
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- return __sync_fetch_and_xor(addr, val);
-# else
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-# endif
}
template<typename T>
@@ -351,17 +253,12 @@ template<size_t nbytes>
inline void
js::jit::RegionLock::acquire(void* addr)
{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
- continue;
-# else
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
zero = 0;
continue;
}
-# endif
}
template<size_t nbytes>
@@ -369,15 +266,10 @@ inline void
js::jit::RegionLock::release(void* addr)
{
MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
- __sync_sub_and_fetch(&spinlock, 1); // Should turn into LOCK XADD
-# else
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
}
-# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
# undef LOCKFREE8
#elif defined(_MSC_VER)