diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp index 9d65b86832f5..e490a469d875 100644 --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -250,19 +250,11 @@ struct QuarantineCallback { typedef Quarantine ScudoQuarantine; typedef ScudoQuarantine::Cache ScudoQuarantineCache; COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= - sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); + sizeof(ScudoTSD::QuarantineCachePlaceHolder)); -AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { - return &ThreadContext->Cache; -} - -ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { - return reinterpret_cast< - ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); -} - -ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) { - return &ThreadContext->Prng; +ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) { + return reinterpret_cast( + TSD->QuarantineCachePlaceHolder); } struct ScudoAllocator { @@ -381,12 +373,11 @@ struct ScudoAllocator { uptr AllocSize; if (FromPrimary) { AllocSize = AlignedSize; - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - Salt = getPrng(ThreadContext)->getU8(); - Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext), - AllocSize); - ThreadContext->unlock(); + ScudoTSD *TSD = getTSDAndLock(); + if (LIKELY(TSD)) { + Salt = TSD->Prng.getU8(); + Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize); + TSD->unlock(); } else { SpinMutexLock l(&FallbackMutex); Salt = FallbackPrng.getU8(); @@ -454,11 +445,10 @@ struct ScudoAllocator { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); if (Header->FromPrimary) { - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - getBackendAllocator().deallocatePrimary( - getAllocatorCache(ThreadContext), Ptr); - ThreadContext->unlock(); + ScudoTSD *TSD = getTSDAndLock(); + if (LIKELY(TSD)) { + getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr); + TSD->unlock(); } else { SpinMutexLock Lock(&FallbackMutex); getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr); @@ -476,13 +466,12 @@ struct ScudoAllocator { UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), - QuarantineCallback( - getAllocatorCache(ThreadContext)), + ScudoTSD *TSD = getTSDAndLock(); + if (LIKELY(TSD)) { + AllocatorQuarantine.Put(getQuarantineCache(TSD), + QuarantineCallback(&TSD->Cache), Chunk, EstimatedSize); - ThreadContext->unlock(); + TSD->unlock(); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, @@ -607,11 +596,10 @@ struct ScudoAllocator { return allocate(NMemB * Size, MinAlignment, FromMalloc, true); } - void commitBack(ScudoThreadContext *ThreadContext) { - AllocatorCache *Cache = getAllocatorCache(ThreadContext); - AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), - QuarantineCallback(Cache)); - BackendAllocator.destroyCache(Cache); + void commitBack(ScudoTSD *TSD) { + AllocatorQuarantine.Drain(getQuarantineCache(TSD), + QuarantineCallback(&TSD->Cache)); + BackendAllocator.destroyCache(&TSD->Cache); } uptr getStats(AllocatorStat StatType) { @@ -637,13 +625,13 @@ static void initScudoInternal(const AllocatorOptions &Options) { Instance.init(Options); } -void ScudoThreadContext::init() { +void ScudoTSD::init() { getBackendAllocator().initCache(&Cache); Prng.init(); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } -void ScudoThreadContext::commitBack() { +void ScudoTSD::commitBack() { Instance.commitBack(this); } diff --git a/compiler-rt/lib/scudo/scudo_tls.h b/compiler-rt/lib/scudo/scudo_tls.h index 4784f6a305c9..a3992e264eca 100644 --- a/compiler-rt/lib/scudo/scudo_tls.h +++ b/compiler-rt/lib/scudo/scudo_tls.h @@ -28,7 +28,7 @@ namespace __scudo { #include "scudo_tls_context_android.inc" #include "scudo_tls_context_linux.inc" -struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform { +struct ALIGNED(64) ScudoTSD : public ScudoTSDPlatform { AllocatorCache Cache; ScudoPrng Prng; uptr QuarantineCachePlaceHolder[4]; @@ -38,7 +38,7 @@ struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform { void initThread(bool MinimalInit); -// Platform specific dastpath functions definitions. +// Platform specific fastpath functions definitions. #include "scudo_tls_android.inc" #include "scudo_tls_linux.inc" diff --git a/compiler-rt/lib/scudo/scudo_tls_android.cpp b/compiler-rt/lib/scudo/scudo_tls_android.cpp index c0ea417ab864..3f215a72f0b2 100644 --- a/compiler-rt/lib/scudo/scudo_tls_android.cpp +++ b/compiler-rt/lib/scudo/scudo_tls_android.cpp @@ -24,9 +24,9 @@ namespace __scudo { static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT; static pthread_key_t PThreadKey; -static atomic_uint32_t ThreadContextCurrentIndex; -static ScudoThreadContext *ThreadContexts; -static uptr NumberOfContexts; +static atomic_uint32_t CurrentIndex; +static ScudoTSD *TSDs; +static u32 NumberOfTSDs; // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used as they allocate memory. static uptr getNumberOfCPUs() { @@ -42,52 +42,55 @@ static void initOnce() { // TODO(kostyak): remove and restrict to N and above. CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0); initScudo(); - NumberOfContexts = getNumberOfCPUs(); - ThreadContexts = reinterpret_cast( - MmapOrDie(sizeof(ScudoThreadContext) * NumberOfContexts, __func__)); - for (uptr i = 0; i < NumberOfContexts; i++) - ThreadContexts[i].init(); + NumberOfTSDs = getNumberOfCPUs(); + if (NumberOfTSDs == 0) + NumberOfTSDs = 1; + if (NumberOfTSDs > 32) + NumberOfTSDs = 32; + TSDs = reinterpret_cast( + MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs")); + for (u32 i = 0; i < NumberOfTSDs; i++) + TSDs[i].init(); } void initThread(bool MinimalInit) { pthread_once(&GlobalInitialized, initOnce); // Initial context assignment is done in a plain round-robin fashion. - u32 Index = atomic_fetch_add(&ThreadContextCurrentIndex, 1, - memory_order_relaxed); - ScudoThreadContext *ThreadContext = - &ThreadContexts[Index % NumberOfContexts]; - *get_android_tls_ptr() = reinterpret_cast(ThreadContext); + u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed); + ScudoTSD *TSD = &TSDs[Index % NumberOfTSDs]; + *get_android_tls_ptr() = reinterpret_cast(TSD); } -ScudoThreadContext *getThreadContextAndLockSlow() { - ScudoThreadContext *ThreadContext; - // Go through all the contexts and find the first unlocked one. - for (u32 i = 0; i < NumberOfContexts; i++) { - ThreadContext = &ThreadContexts[i]; - if (ThreadContext->tryLock()) { - *get_android_tls_ptr() = reinterpret_cast(ThreadContext); - return ThreadContext; +ScudoTSD *getTSDAndLockSlow() { + ScudoTSD *TSD; + if (NumberOfTSDs > 1) { + // Go through all the contexts and find the first unlocked one. + for (u32 i = 0; i < NumberOfTSDs; i++) { + TSD = &TSDs[i]; + if (TSD->tryLock()) { + *get_android_tls_ptr() = reinterpret_cast(TSD); + return TSD; + } + } + // No luck, find the one with the lowest Precedence, and slow lock it. + u64 LowestPrecedence = UINT64_MAX; + for (u32 i = 0; i < NumberOfTSDs; i++) { + u64 Precedence = TSDs[i].getPrecedence(); + if (Precedence && Precedence < LowestPrecedence) { + TSD = &TSDs[i]; + LowestPrecedence = Precedence; + } + } + if (LIKELY(LowestPrecedence != UINT64_MAX)) { + TSD->lock(); + *get_android_tls_ptr() = reinterpret_cast(TSD); + return TSD; } } - // No luck, find the one with the lowest precedence, and slow lock it. - u64 Precedence = UINT64_MAX; - for (u32 i = 0; i < NumberOfContexts; i++) { - u64 SlowLockPrecedence = ThreadContexts[i].getSlowLockPrecedence(); - if (SlowLockPrecedence && SlowLockPrecedence < Precedence) { - ThreadContext = &ThreadContexts[i]; - Precedence = SlowLockPrecedence; - } - } - if (LIKELY(Precedence != UINT64_MAX)) { - ThreadContext->lock(); - *get_android_tls_ptr() = reinterpret_cast(ThreadContext); - return ThreadContext; - } - // Last resort (can this happen?), stick with the current one. - ThreadContext = - reinterpret_cast(*get_android_tls_ptr()); - ThreadContext->lock(); - return ThreadContext; + // Last resort, stick with the current one. + TSD = reinterpret_cast(*get_android_tls_ptr()); + TSD->lock(); + return TSD; } } // namespace __scudo diff --git a/compiler-rt/lib/scudo/scudo_tls_android.inc b/compiler-rt/lib/scudo/scudo_tls_android.inc index 6b82e49f55a0..9f3ef1a234a7 100644 --- a/compiler-rt/lib/scudo/scudo_tls_android.inc +++ b/compiler-rt/lib/scudo/scudo_tls_android.inc @@ -26,17 +26,16 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { initThread(MinimalInit); } -ScudoThreadContext *getThreadContextAndLockSlow(); +ScudoTSD *getTSDAndLockSlow(); -ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() { - ScudoThreadContext *ThreadContext = - reinterpret_cast(*get_android_tls_ptr()); - CHECK(ThreadContext); +ALWAYS_INLINE ScudoTSD *getTSDAndLock() { + ScudoTSD *TSD = reinterpret_cast(*get_android_tls_ptr()); + CHECK(TSD); // Try to lock the currently associated context. - if (ThreadContext->tryLock()) - return ThreadContext; + if (TSD->tryLock()) + return TSD; // If it failed, go the slow path. - return getThreadContextAndLockSlow(); + return getTSDAndLockSlow(); } #endif // SANITIZER_LINUX && SANITIZER_ANDROID diff --git a/compiler-rt/lib/scudo/scudo_tls_context_android.inc b/compiler-rt/lib/scudo/scudo_tls_context_android.inc index f1951319d487..4787ec7b613b 100644 --- a/compiler-rt/lib/scudo/scudo_tls_context_android.inc +++ b/compiler-rt/lib/scudo/scudo_tls_context_android.inc @@ -20,33 +20,33 @@ #if SANITIZER_LINUX && SANITIZER_ANDROID -struct ScudoThreadContextPlatform { +struct ScudoTSDPlatform { INLINE bool tryLock() { if (Mutex.TryLock()) { - atomic_store_relaxed(&SlowLockPrecedence, 0); + atomic_store_relaxed(&Precedence, 0); return true; } - if (atomic_load_relaxed(&SlowLockPrecedence) == 0) - atomic_store_relaxed(&SlowLockPrecedence, NanoTime()); + if (atomic_load_relaxed(&Precedence) == 0) + atomic_store_relaxed(&Precedence, NanoTime()); return false; } INLINE void lock() { Mutex.Lock(); - atomic_store_relaxed(&SlowLockPrecedence, 0); + atomic_store_relaxed(&Precedence, 0); } INLINE void unlock() { Mutex.Unlock(); } - INLINE u64 getSlowLockPrecedence() { - return atomic_load_relaxed(&SlowLockPrecedence); + INLINE u64 getPrecedence() { + return atomic_load_relaxed(&Precedence); } private: StaticSpinMutex Mutex; - atomic_uint64_t SlowLockPrecedence; + atomic_uint64_t Precedence; }; #endif // SANITIZER_LINUX && SANITIZER_ANDROID diff --git a/compiler-rt/lib/scudo/scudo_tls_context_linux.inc b/compiler-rt/lib/scudo/scudo_tls_context_linux.inc index 8d292bdbc932..9a24256f2d7d 100644 --- a/compiler-rt/lib/scudo/scudo_tls_context_linux.inc +++ b/compiler-rt/lib/scudo/scudo_tls_context_linux.inc @@ -20,7 +20,7 @@ #if SANITIZER_LINUX && !SANITIZER_ANDROID -struct ScudoThreadContextPlatform { +struct ScudoTSDPlatform { ALWAYS_INLINE void unlock() {} }; diff --git a/compiler-rt/lib/scudo/scudo_tls_linux.cpp b/compiler-rt/lib/scudo/scudo_tls_linux.cpp index f2592266d060..84553945797f 100644 --- a/compiler-rt/lib/scudo/scudo_tls_linux.cpp +++ b/compiler-rt/lib/scudo/scudo_tls_linux.cpp @@ -28,7 +28,7 @@ static pthread_key_t PThreadKey; __attribute__((tls_model("initial-exec"))) THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized; __attribute__((tls_model("initial-exec"))) -THREADLOCAL ScudoThreadContext ThreadLocalContext; +THREADLOCAL ScudoTSD TSD; static void teardownThread(void *Ptr) { uptr I = reinterpret_cast(Ptr); @@ -43,7 +43,7 @@ static void teardownThread(void *Ptr) { reinterpret_cast(I - 1)) == 0)) return; } - ThreadLocalContext.commitBack(); + TSD.commitBack(); ScudoThreadState = ThreadTornDown; } @@ -59,7 +59,7 @@ void initThread(bool MinimalInit) { return; CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast( GetPthreadDestructorIterations())), 0); - ThreadLocalContext.init(); + TSD.init(); ScudoThreadState = ThreadInitialized; } diff --git a/compiler-rt/lib/scudo/scudo_tls_linux.inc b/compiler-rt/lib/scudo/scudo_tls_linux.inc index 53b804485403..492807c5806c 100644 --- a/compiler-rt/lib/scudo/scudo_tls_linux.inc +++ b/compiler-rt/lib/scudo/scudo_tls_linux.inc @@ -29,7 +29,7 @@ enum ThreadState : u8 { __attribute__((tls_model("initial-exec"))) extern THREADLOCAL ThreadState ScudoThreadState; __attribute__((tls_model("initial-exec"))) -extern THREADLOCAL ScudoThreadContext ThreadLocalContext; +extern THREADLOCAL ScudoTSD TSD; ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { if (LIKELY(ScudoThreadState != ThreadNotInitialized)) @@ -37,10 +37,10 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { initThread(MinimalInit); } -ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() { +ALWAYS_INLINE ScudoTSD *getTSDAndLock() { if (UNLIKELY(ScudoThreadState != ThreadInitialized)) return nullptr; - return &ThreadLocalContext; + return &TSD; } #endif // SANITIZER_LINUX && !SANITIZER_ANDROID