[scudo] Simplify internal names (NFC)

Summary:
There is currently too much redundancy in the class/variable/* names in Scudo:
- we are in the namespace `__scudo`, so there is no point in having something
  named `ScudoX` to end up with a final name of `__scudo::ScudoX`;
- there are a lot of types/* that have `Allocator` in the name, given that
  Scudo is an allocator I figure this doubles up as well.

So change a bunch of the Scudo names to make them shorter, less redundant, and
overall simpler. They should still be pretty self explaining (or at least it
looks so to me).

The TSD part will be done in another CL (eg `__scudo::ScudoTSD`).

Reviewers: alekseyshl, eugenis

Reviewed By: alekseyshl

Subscribers: delcypher, #sanitizers, llvm-commits

Differential Revision: https://reviews.llvm.org/D49505

llvm-svn: 337557
This commit is contained in:
Kostya Kortchinsky 2018-07-20 15:07:17 +00:00
parent 534f4e6dd0
commit cccd21d42c
5 changed files with 51 additions and 56 deletions

View File

@ -62,7 +62,7 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
} }
static ScudoBackendAllocator &getBackendAllocator(); static BackendT &getBackend();
namespace Chunk { namespace Chunk {
static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
@ -92,9 +92,9 @@ namespace Chunk {
static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
const uptr ClassId = Header->ClassId; const uptr ClassId = Header->ClassId;
if (ClassId) if (ClassId)
return PrimaryAllocator::ClassIdToSize(ClassId) - getHeaderSize() - return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
(Header->Offset << MinAlignmentLog); (Header->Offset << MinAlignmentLog);
return SecondaryAllocator::GetActuallyAllocatedSize( return SecondaryT::GetActuallyAllocatedSize(
getBackendPtr(Ptr, Header)) - getHeaderSize(); getBackendPtr(Ptr, Header)) - getHeaderSize();
} }
@ -103,7 +103,7 @@ namespace Chunk {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (Header->ClassId) if (Header->ClassId)
return SizeOrUnusedBytes; return SizeOrUnusedBytes;
return SecondaryAllocator::GetActuallyAllocatedSize( return SecondaryT::GetActuallyAllocatedSize(
getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
} }
@ -175,7 +175,7 @@ namespace Chunk {
} // namespace Chunk } // namespace Chunk
struct QuarantineCallback { struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *Cache) explicit QuarantineCallback(AllocatorCacheT *Cache)
: Cache_(Cache) {} : Cache_(Cache) {}
// Chunk recycling function, returns a quarantined chunk to the backend, // Chunk recycling function, returns a quarantined chunk to the backend,
@ -188,10 +188,9 @@ struct QuarantineCallback {
Chunk::eraseHeader(Ptr); Chunk::eraseHeader(Ptr);
void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
if (Header.ClassId) if (Header.ClassId)
getBackendAllocator().deallocatePrimary(Cache_, BackendPtr, getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
Header.ClassId);
else else
getBackendAllocator().deallocateSecondary(BackendPtr); getBackend().deallocateSecondary(BackendPtr);
} }
// Internal quarantine allocation and deallocation functions. We first check // Internal quarantine allocation and deallocation functions. We first check
@ -199,34 +198,33 @@ struct QuarantineCallback {
// TODO(kostyak): figure out the best way to protect the batches. // TODO(kostyak): figure out the best way to protect the batches.
void *Allocate(uptr Size) { void *Allocate(uptr Size) {
const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
return getBackendAllocator().allocatePrimary(Cache_, BatchClassId); return getBackend().allocatePrimary(Cache_, BatchClassId);
} }
void Deallocate(void *Ptr) { void Deallocate(void *Ptr) {
const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId); getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
} }
AllocatorCache *Cache_; AllocatorCacheT *Cache_;
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
}; };
typedef Quarantine<QuarantineCallback, void> ScudoQuarantine; typedef Quarantine<QuarantineCallback, void> QuarantineT;
typedef ScudoQuarantine::Cache ScudoQuarantineCache; typedef QuarantineT::Cache QuarantineCacheT;
COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= COMPILER_CHECK(sizeof(QuarantineCacheT) <=
sizeof(ScudoTSD::QuarantineCachePlaceHolder)); sizeof(ScudoTSD::QuarantineCachePlaceHolder));
ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) { QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
return reinterpret_cast<ScudoQuarantineCache *>( return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
TSD->QuarantineCachePlaceHolder);
} }
struct ScudoAllocator { struct Allocator {
static const uptr MaxAllowedMallocSize = static const uptr MaxAllowedMallocSize =
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
ScudoBackendAllocator BackendAllocator; BackendT Backend;
ScudoQuarantine AllocatorQuarantine; QuarantineT Quarantine;
u32 QuarantineChunksUpToSize; u32 QuarantineChunksUpToSize;
@ -240,8 +238,8 @@ struct ScudoAllocator {
atomic_uint8_t RssLimitExceeded; atomic_uint8_t RssLimitExceeded;
atomic_uint64_t RssLastCheckedAtNS; atomic_uint64_t RssLastCheckedAtNS;
explicit ScudoAllocator(LinkerInitialized) explicit Allocator(LinkerInitialized)
: AllocatorQuarantine(LINKER_INITIALIZED) {} : Quarantine(LINKER_INITIALIZED) {}
NOINLINE void performSanityChecks(); NOINLINE void performSanityChecks();
@ -260,10 +258,10 @@ struct ScudoAllocator {
atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms); Backend.init(common_flags()->allocator_release_to_os_interval_ms);
HardRssLimitMb = common_flags()->hard_rss_limit_mb; HardRssLimitMb = common_flags()->hard_rss_limit_mb;
SoftRssLimitMb = common_flags()->soft_rss_limit_mb; SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
AllocatorQuarantine.Init( Quarantine.Init(
static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10, static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10); static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize; QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
@ -329,18 +327,18 @@ struct ScudoAllocator {
void *BackendPtr; void *BackendPtr;
uptr BackendSize; uptr BackendSize;
u8 ClassId; u8 ClassId;
if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) { if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
BackendSize = AlignedSize; BackendSize = AlignedSize;
ClassId = SizeClassMap::ClassID(BackendSize); ClassId = SizeClassMap::ClassID(BackendSize);
bool UnlockRequired; bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId); BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
if (UnlockRequired) if (UnlockRequired)
TSD->unlock(); TSD->unlock();
} else { } else {
BackendSize = NeededSize; BackendSize = NeededSize;
ClassId = 0; ClassId = 0;
BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment); BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
} }
if (UNLIKELY(!BackendPtr)) { if (UNLIKELY(!BackendPtr)) {
SetAllocatorOutOfMemory(); SetAllocatorOutOfMemory();
@ -351,7 +349,7 @@ struct ScudoAllocator {
// If requested, we will zero out the entire contents of the returned chunk. // If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && ClassId) if ((ForceZeroContents || ZeroContents) && ClassId)
memset(BackendPtr, 0, PrimaryAllocator::ClassIdToSize(ClassId)); memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
UnpackedHeader Header = {}; UnpackedHeader Header = {};
uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize(); uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
@ -391,7 +389,7 @@ struct ScudoAllocator {
// quarantine chunk size threshold. // quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
uptr Size) { uptr Size) {
const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) || const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) ||
(Size > QuarantineChunksUpToSize); (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) { if (BypassQuarantine) {
Chunk::eraseHeader(Ptr); Chunk::eraseHeader(Ptr);
@ -399,12 +397,12 @@ struct ScudoAllocator {
if (Header->ClassId) { if (Header->ClassId) {
bool UnlockRequired; bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr, getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
Header->ClassId); Header->ClassId);
if (UnlockRequired) if (UnlockRequired)
TSD->unlock(); TSD->unlock();
} else { } else {
getBackendAllocator().deallocateSecondary(BackendPtr); getBackend().deallocateSecondary(BackendPtr);
} }
} else { } else {
// If a small memory amount was allocated with a larger alignment, we want // If a small memory amount was allocated with a larger alignment, we want
@ -418,9 +416,8 @@ struct ScudoAllocator {
Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
bool UnlockRequired; bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
AllocatorQuarantine.Put(getQuarantineCache(TSD), Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
QuarantineCallback(&TSD->Cache), Ptr, Ptr, EstimatedSize);
EstimatedSize);
if (UnlockRequired) if (UnlockRequired)
TSD->unlock(); TSD->unlock();
} }
@ -530,15 +527,14 @@ struct ScudoAllocator {
} }
void commitBack(ScudoTSD *TSD) { void commitBack(ScudoTSD *TSD) {
AllocatorQuarantine.Drain(getQuarantineCache(TSD), Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
QuarantineCallback(&TSD->Cache)); Backend.destroyCache(&TSD->Cache);
BackendAllocator.destroyCache(&TSD->Cache);
} }
uptr getStats(AllocatorStat StatType) { uptr getStats(AllocatorStat StatType) {
initThreadMaybe(); initThreadMaybe();
uptr stats[AllocatorStatCount]; uptr stats[AllocatorStatCount];
BackendAllocator.getStats(stats); Backend.getStats(stats);
return stats[StatType]; return stats[StatType];
} }
@ -557,11 +553,11 @@ struct ScudoAllocator {
void printStats() { void printStats() {
initThreadMaybe(); initThreadMaybe();
BackendAllocator.printStats(); Backend.printStats();
} }
}; };
NOINLINE void ScudoAllocator::performSanityChecks() { NOINLINE void Allocator::performSanityChecks() {
// Verify that the header offset field can hold the maximum offset. In the // Verify that the header offset field can hold the maximum offset. In the
// case of the Secondary allocator, it takes care of alignment and the // case of the Secondary allocator, it takes care of alignment and the
// offset will always be 0. In the case of the Primary, the worst case // offset will always be 0. In the case of the Primary, the worst case
@ -596,7 +592,7 @@ NOINLINE void ScudoAllocator::performSanityChecks() {
// Opportunistic RSS limit check. This will update the RSS limit status, if // Opportunistic RSS limit check. This will update the RSS limit status, if
// it can, every 100ms, otherwise it will just return the current one. // it can, every 100ms, otherwise it will just return the current one.
NOINLINE bool ScudoAllocator::isRssLimitExceeded() { NOINLINE bool Allocator::isRssLimitExceeded() {
u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
const u64 CurrentCheck = MonotonicNanoTime(); const u64 CurrentCheck = MonotonicNanoTime();
if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL))) if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
@ -626,10 +622,10 @@ NOINLINE bool ScudoAllocator::isRssLimitExceeded() {
return atomic_load_relaxed(&RssLimitExceeded); return atomic_load_relaxed(&RssLimitExceeded);
} }
static ScudoAllocator Instance(LINKER_INITIALIZED); static Allocator Instance(LINKER_INITIALIZED);
static ScudoBackendAllocator &getBackendAllocator() { static BackendT &getBackend() {
return Instance.BackendAllocator; return Instance.Backend;
} }
void initScudo() { void initScudo() {
@ -637,7 +633,7 @@ void initScudo() {
} }
void ScudoTSD::init() { void ScudoTSD::init() {
getBackendAllocator().initCache(&Cache); getBackend().initCache(&Cache);
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
} }

View File

@ -82,7 +82,7 @@ struct AP64 {
static const uptr kFlags = static const uptr kFlags =
SizeClassAllocator64FlagMasks::kRandomShuffleChunks; SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
}; };
typedef SizeClassAllocator64<AP64> PrimaryAllocator; typedef SizeClassAllocator64<AP64> PrimaryT;
#else #else
static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
# if SANITIZER_WORDSIZE == 32 # if SANITIZER_WORDSIZE == 32
@ -102,16 +102,15 @@ struct AP32 {
SizeClassAllocator32FlagMasks::kRandomShuffleChunks | SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
}; };
typedef SizeClassAllocator32<AP32> PrimaryAllocator; typedef SizeClassAllocator32<AP32> PrimaryT;
#endif // SANITIZER_CAN_USE_ALLOCATOR64 #endif // SANITIZER_CAN_USE_ALLOCATOR64
#include "scudo_allocator_secondary.h" #include "scudo_allocator_secondary.h"
#include "scudo_allocator_combined.h" #include "scudo_allocator_combined.h"
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryT> AllocatorCacheT;
typedef ScudoLargeMmapAllocator SecondaryAllocator; typedef LargeMmapAllocator SecondaryT;
typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache, typedef CombinedAllocator<PrimaryT, AllocatorCacheT, SecondaryT> BackendT;
SecondaryAllocator> ScudoBackendAllocator;
void initScudo(); void initScudo();

View File

@ -21,7 +21,7 @@
template <class PrimaryAllocator, class AllocatorCache, template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> class SecondaryAllocator>
class ScudoCombinedAllocator { class CombinedAllocator {
public: public:
void init(s32 ReleaseToOSIntervalMs) { void init(s32 ReleaseToOSIntervalMs) {
Primary.Init(ReleaseToOSIntervalMs); Primary.Init(ReleaseToOSIntervalMs);

View File

@ -66,7 +66,7 @@ namespace LargeChunk {
} }
} // namespace LargeChunk } // namespace LargeChunk
class ScudoLargeMmapAllocator { class LargeMmapAllocator {
public: public:
void Init() { void Init() {
internal_memset(this, 0, sizeof(*this)); internal_memset(this, 0, sizeof(*this));

View File

@ -24,7 +24,7 @@
namespace __scudo { namespace __scudo {
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD { struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
AllocatorCache Cache; AllocatorCacheT Cache;
uptr QuarantineCachePlaceHolder[4]; uptr QuarantineCachePlaceHolder[4];
void init(); void init();