diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake index ae2a262a14a9..0da98b9a489d 100644 --- a/compiler-rt/cmake/config-ix.cmake +++ b/compiler-rt/cmake/config-ix.cmake @@ -179,7 +179,7 @@ set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64}) -set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}) +set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) if(APPLE) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h index 38363e875560..47581282a789 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h @@ -71,16 +71,26 @@ INLINE typename T::Type atomic_exchange(volatile T *a, return v; } -template -INLINE bool atomic_compare_exchange_strong(volatile T *a, - typename T::Type *cmp, +template +INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { typedef typename T::Type Type; Type cmpv = *cmp; - Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); - if (prev == cmpv) - return true; + Type prev; +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + if (sizeof(*a) == 8) { + Type volatile *val_ptr = const_cast(&a->val_dont_use); + prev = __mips_sync_val_compare_and_swap( + reinterpret_cast(val_ptr), reinterpret_cast cmpv, + reinterpret_cast xchg); + } else { + prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); + } +#else + prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); +#endif + if (prev == cmpv) return true; *cmp = prev; return false; } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h index 099b9f7ec8c6..2825c7400d91 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h @@ -17,6 +17,56 @@ namespace __sanitizer { +// MIPS32 does not support atomic > 4 bytes. To address this lack of +// functionality, the sanitizer library provides helper methods which use an +// internal spin lock mechanism to emulate atomic oprations when the size is +// 8 bytes. +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 +static void __spin_lock(volatile int *lock) { + while (__sync_lock_test_and_set(lock, 1)) + while (*lock) { + } +} + +static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); } + + +// Make sure the lock is on its own cache line to prevent false sharing. +// Put it inside a struct that is aligned and padded to the typical MIPS +// cacheline which is 32 bytes. +static struct { + int lock; + char pad[32 - sizeof(int)]; +} __attribute__((aligned(32))) lock = {0}; + +template +T __mips_sync_fetch_and_add(volatile T *ptr, T val) { + T ret; + + __spin_lock(&lock.lock); + + ret = *ptr; + *ptr = ret + val; + + __spin_unlock(&lock.lock); + + return ret; +} + +template +T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) { + T ret; + __spin_lock(&lock.lock); + + ret = *ptr; + if (ret == oldval) *ptr = newval; + + __spin_unlock(&lock.lock); + + return ret; +} +#endif + INLINE void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); } @@ -53,8 +103,15 @@ INLINE typename T::Type atomic_load( // 64-bit load on 32-bit platform. // Gross, but simple and reliable. // Assume that it is not in read-only memory. +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + typename T::Type volatile *val_ptr = + const_cast(&a->val_dont_use); + v = __mips_sync_fetch_and_add( + reinterpret_cast(val_ptr), 0); +#else v = __sync_fetch_and_add( const_cast(&a->val_dont_use), 0); +#endif } return v; } @@ -84,7 +141,15 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { typename T::Type cmp = a->val_dont_use; typename T::Type cur; for (;;) { +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + typename T::Type volatile *val_ptr = + const_cast(&a->val_dont_use); + cur = __mips_sync_val_compare_and_swap( + reinterpret_cast(val_ptr), reinterpret_cast cmp, + reinterpret_cast v); +#else cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v); +#endif if (cmp == v) break; cmp = cur; diff --git a/compiler-rt/test/scudo/random_shuffle.cpp b/compiler-rt/test/scudo/random_shuffle.cpp index 41e67ded67e9..05a432615017 100644 --- a/compiler-rt/test/scudo/random_shuffle.cpp +++ b/compiler-rt/test/scudo/random_shuffle.cpp @@ -7,7 +7,7 @@ // RUN: %run %t 10000 > %T/random_shuffle_tmp_dir/out2 // RUN: not diff %T/random_shuffle_tmp_dir/out? // RUN: rm -rf %T/random_shuffle_tmp_dir -// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux +// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux // Tests that the allocator shuffles the chunks before returning to the user.