[scudo] Scudo thread specific data refactor, part 2

Summary:
Following D38139, we now consolidate the TSD definition, merging the shared
TSD definition with the exclusive TSD definition. We introduce a boolean set
at initializaton denoting the need for the TSD to be unlocked or not. This
adds some unused members to the exclusive TSD, but increases consistency and
reduces the definitions fragmentation.

We remove the fallback mechanism from `scudo_allocator.cpp` and add a fallback
TSD in the non-shared version. Since the shared version doesn't require one,
this makes overall more sense.

There are a couple of additional cosmetic changes: removing the header guards
from the remaining `.inc` files, added error string to a `CHECK`.

Question to reviewers: I thought about friending `getTSDAndLock` in `ScudoTSD`
so that the `FallbackTSD` could `Mutex.Lock()` directly instead of `lock()`
which involved zeroing out the `Precedence`, which is unused otherwise. Is it
worth doing?

Reviewers: alekseyshl, dvyukov, kcc

Reviewed By: dvyukov

Subscribers: srhines, llvm-commits

Differential Revision: https://reviews.llvm.org/D38183

llvm-svn: 314110
This commit is contained in:
Kostya Kortchinsky 2017-09-25 15:12:08 +00:00
parent 4a97df01c4
commit 22396c2f47
8 changed files with 59 additions and 145 deletions

View File

@ -269,14 +269,6 @@ struct ScudoAllocator {
StaticSpinMutex GlobalPrngMutex;
ScudoPrng GlobalPrng;
// The fallback caches are used when the thread local caches have been
// 'detroyed' on thread tear-down. They are protected by a Mutex as they can
// be accessed by different threads.
StaticSpinMutex FallbackMutex;
AllocatorCache FallbackAllocatorCache;
ScudoQuarantineCache FallbackQuarantineCache;
ScudoPrng FallbackPrng;
u32 QuarantineChunksUpToSize;
bool DeallocationTypeMismatch;
@ -284,8 +276,7 @@ struct ScudoAllocator {
bool DeleteSizeMismatch;
explicit ScudoAllocator(LinkerInitialized)
: AllocatorQuarantine(LINKER_INITIALIZED),
FallbackQuarantineCache(LINKER_INITIALIZED) {}
: AllocatorQuarantine(LINKER_INITIALIZED) {}
void init(const AllocatorOptions &Options) {
// Verify that the header offset field can hold the maximum offset. In the
@ -329,8 +320,6 @@ struct ScudoAllocator {
QuarantineChunksUpToSize = Options.QuarantineChunksUpToSize;
GlobalPrng.init();
Cookie = GlobalPrng.getU64();
BackendAllocator.initCache(&FallbackAllocatorCache);
FallbackPrng.init();
}
// Helper function that checks for a valid Scudo chunk. nullptr isn't.
@ -374,16 +363,9 @@ struct ScudoAllocator {
if (FromPrimary) {
AllocSize = AlignedSize;
ScudoTSD *TSD = getTSDAndLock();
if (LIKELY(TSD)) {
Salt = TSD->Prng.getU8();
Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getU8();
Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
AllocSize);
}
Salt = TSD->Prng.getU8();
Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
TSD->unlock();
} else {
{
SpinMutexLock l(&GlobalPrngMutex);
@ -446,13 +428,8 @@ struct ScudoAllocator {
void *Ptr = Chunk->getAllocBeg(Header);
if (Header->FromPrimary) {
ScudoTSD *TSD = getTSDAndLock();
if (LIKELY(TSD)) {
getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
TSD->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
}
getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
TSD->unlock();
} else {
getBackendAllocator().deallocateSecondary(Ptr);
}
@ -467,17 +444,10 @@ struct ScudoAllocator {
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, Header);
ScudoTSD *TSD = getTSDAndLock();
if (LIKELY(TSD)) {
AllocatorQuarantine.Put(getQuarantineCache(TSD),
QuarantineCallback(&TSD->Cache),
Chunk, EstimatedSize);
TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
QuarantineCallback(&FallbackAllocatorCache),
Chunk, EstimatedSize);
}
AllocatorQuarantine.Put(getQuarantineCache(TSD),
QuarantineCallback(&TSD->Cache),
Chunk, EstimatedSize);
TSD->unlock();
}
}
@ -625,7 +595,8 @@ static void initScudoInternal(const AllocatorOptions &Options) {
Instance.init(Options);
}
void ScudoTSD::init() {
void ScudoTSD::init(bool Shared) {
UnlockRequired = Shared;
getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));

View File

@ -24,16 +24,43 @@
namespace __scudo {
// Platform specific base thread context definitions.
#include "scudo_tls_context_android.inc"
#include "scudo_tls_context_linux.inc"
struct ALIGNED(64) ScudoTSD : public ScudoTSDPlatform {
struct ALIGNED(64) ScudoTSD {
AllocatorCache Cache;
ScudoPrng Prng;
uptr QuarantineCachePlaceHolder[4];
void init();
void init(bool Shared);
void commitBack();
INLINE bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
atomic_store_relaxed(&Precedence, NanoTime());
return false;
}
INLINE void lock() {
Mutex.Lock();
atomic_store_relaxed(&Precedence, 0);
}
INLINE void unlock() {
if (!UnlockRequired)
return;
Mutex.Unlock();
}
INLINE u64 getPrecedence() {
return atomic_load_relaxed(&Precedence);
}
private:
bool UnlockRequired;
StaticSpinMutex Mutex;
atomic_uint64_t Precedence;
};
void initThread(bool MinimalInit);

View File

@ -50,7 +50,7 @@ static void initOnce() {
TSDs = reinterpret_cast<ScudoTSD *>(
MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
for (u32 i = 0; i < NumberOfTSDs; i++)
TSDs[i].init();
TSDs[i].init(/*Shared=*/true);
}
void initThread(bool MinimalInit) {

View File

@ -11,9 +11,6 @@
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TLS_ANDROID_H_
#define SCUDO_TLS_ANDROID_H_
#ifndef SCUDO_TLS_H_
# error "This file must be included inside scudo_tls.h."
#endif // SCUDO_TLS_H_
@ -30,7 +27,7 @@ ScudoTSD *getTSDAndLockSlow();
ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
ScudoTSD *TSD = reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
CHECK(TSD);
CHECK(TSD && "No TSD associated with the current thread!");
// Try to lock the currently associated context.
if (TSD->tryLock())
return TSD;
@ -39,5 +36,3 @@ ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
}
#endif // SANITIZER_LINUX && SANITIZER_ANDROID
#endif // SCUDO_TLS_ANDROID_H_

View File

@ -1,54 +0,0 @@
//===-- scudo_tls_context_android.inc ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Android specific base thread context definition.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TLS_CONTEXT_ANDROID_INC_
#define SCUDO_TLS_CONTEXT_ANDROID_INC_
#ifndef SCUDO_TLS_H_
# error "This file must be included inside scudo_tls.h."
#endif // SCUDO_TLS_H_
#if SANITIZER_LINUX && SANITIZER_ANDROID
struct ScudoTSDPlatform {
INLINE bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
atomic_store_relaxed(&Precedence, NanoTime());
return false;
}
INLINE void lock() {
Mutex.Lock();
atomic_store_relaxed(&Precedence, 0);
}
INLINE void unlock() {
Mutex.Unlock();
}
INLINE u64 getPrecedence() {
return atomic_load_relaxed(&Precedence);
}
private:
StaticSpinMutex Mutex;
atomic_uint64_t Precedence;
};
#endif // SANITIZER_LINUX && SANITIZER_ANDROID
#endif // SCUDO_TLS_CONTEXT_ANDROID_INC_

View File

@ -1,29 +0,0 @@
//===-- scudo_tls_context_linux.inc -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Linux specific base thread context definition.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TLS_CONTEXT_LINUX_INC_
#define SCUDO_TLS_CONTEXT_LINUX_INC_
#ifndef SCUDO_TLS_H_
# error "This file must be included inside scudo_tls.h."
#endif // SCUDO_TLS_H_
#if SANITIZER_LINUX && !SANITIZER_ANDROID
struct ScudoTSDPlatform {
ALWAYS_INLINE void unlock() {}
};
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#endif // SCUDO_TLS_CONTEXT_LINUX_INC_

View File

@ -30,6 +30,10 @@ THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
__attribute__((tls_model("initial-exec")))
THREADLOCAL ScudoTSD TSD;
// Fallback TSD for when the thread isn't initialized yet or is torn down. It
// can be shared between multiple threads and as such must be locked.
ScudoTSD FallbackTSD;
static void teardownThread(void *Ptr) {
uptr I = reinterpret_cast<uptr>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
@ -51,6 +55,7 @@ static void teardownThread(void *Ptr) {
static void initOnce() {
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
initScudo();
FallbackTSD.init(/*Shared=*/true);
}
void initThread(bool MinimalInit) {
@ -59,7 +64,7 @@ void initThread(bool MinimalInit) {
return;
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
GetPthreadDestructorIterations())), 0);
TSD.init();
TSD.init(/*Shared=*/false);
ScudoThreadState = ThreadInitialized;
}

View File

@ -12,9 +12,6 @@
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TLS_LINUX_H_
#define SCUDO_TLS_LINUX_H_
#ifndef SCUDO_TLS_H_
# error "This file must be included inside scudo_tls.h."
#endif // SCUDO_TLS_H_
@ -31,6 +28,8 @@ extern THREADLOCAL ThreadState ScudoThreadState;
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL ScudoTSD TSD;
extern ScudoTSD FallbackTSD;
ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
if (LIKELY(ScudoThreadState != ThreadNotInitialized))
return;
@ -38,11 +37,11 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
}
ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
if (UNLIKELY(ScudoThreadState != ThreadInitialized))
return nullptr;
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
return &FallbackTSD;
}
return &TSD;
}
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#endif // SCUDO_TLS_LINUX_H_