Revert r369472 and r369441

check-sanitizer does not work on Linux

llvm-svn: 369495
This commit is contained in:
Vitaly Buka 2019-08-21 05:06:21 +00:00
parent 7719495e2c
commit 93a3cbc746
11 changed files with 38 additions and 346 deletions

View File

@ -48,6 +48,8 @@ static u32 RZSize2Log(u32 rz_size) {
return res;
}
static AsanAllocator &get_allocator();
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@ -111,7 +113,7 @@ enum {
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != get_allocator().KMaxSize())
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
get_allocator().GetMetaData(AllocBeg(locked_version)));
@ -428,7 +430,7 @@ struct Allocator {
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!get_allocator().CanAllocate(needed_size, alignment)) {
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
needed_size += rz_size;
using_primary_allocator = false;
}
@ -497,7 +499,7 @@ struct Allocator {
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = get_allocator().KMaxSize();
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
@ -522,10 +524,10 @@ struct Allocator {
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
if (needed_size > get_allocator().KMaxSize())
if (needed_size > SizeClassMap::kMaxSize)
thread_stats.malloc_large++;
else
thread_stats.malloced_by_size[get_allocator().ClassID(needed_size)]++;
thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
@ -789,7 +791,7 @@ struct Allocator {
static Allocator instance(LINKER_INITIALIZED);
AsanAllocator &get_allocator() {
static AsanAllocator &get_allocator() {
return instance.allocator;
}

View File

@ -118,76 +118,39 @@ struct AsanMapUnmapCallback {
void OnUnmap(uptr p, uptr size) const;
};
#if defined(__aarch64__)
// AArch64 supports 39, 42 and 48-bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
#if SANITIZER_ANDROID
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap64;
#else
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap64;
#endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap64 SizeClassMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
using AddressSpaceView = AddressSpaceViewTy;
};
template <typename AddressSpaceView>
using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
typedef CompactSizeClassMap SizeClassMap32;
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 16;
typedef __asan::SizeClassMap32 SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
template <typename AddressSpaceView>
using Allocator32ASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
using Allocator32 = Allocator32ASVT<LocalAddressSpaceView>;
using Allocator32or64 = RuntimeSelectAllocator<Allocator32, Allocator64>;
static const uptr kMaxNumberOfSizeClasses =
SizeClassMap32::kNumClasses < SizeClassMap64::kNumClasses
? SizeClassMap64::kNumClasses
: SizeClassMap32::kNumClasses;
template <typename AddressSpaceView>
using PrimaryAllocatorASVT =
RuntimeSelectAllocator<Allocator32ASVT<AddressSpaceView>,
Allocator64ASVT<AddressSpaceView>>;
#elif SANITIZER_CAN_USE_ALLOCATOR64
#if SANITIZER_CAN_USE_ALLOCATOR64
# if SANITIZER_FUCHSIA
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# elif defined(__sparc__)
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
// Android needs to support 39, 42 and 48 bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
#elif defined(__sparc__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
static const uptr kMaxNumberOfSizeClasses = SizeClassMap::kNumClasses;
# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
@ -201,9 +164,9 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#else // Fallback to SizeClassAllocator32.
typedef CompactSizeClassMap SizeClassMap;
static const uptr kMaxNumberOfSizeClasses = SizeClassMap::kNumClasses;
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
@ -217,14 +180,16 @@ struct AP32 {
};
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
template <typename AddressSpaceView>
using AsanAllocatorASVT =
CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>;
using AllocatorCache = AsanAllocator::AllocatorCache;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
@ -261,7 +226,5 @@ void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
AsanAllocator &get_allocator();
} // namespace __asan
#endif // ASAN_ALLOCATOR_H

View File

@ -10,7 +10,6 @@
//
// Code related to statistics collected by AddressSanitizer.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stats.h"
@ -31,9 +30,9 @@ void AsanStats::Clear() {
}
static void PrintMallocStatsArray(const char *prefix,
uptr *array, uptr size) {
uptr (&array)[kNumberOfSizeClasses]) {
Printf("%s", prefix);
for (uptr i = 0; i < size; i++) {
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (!array[i]) continue;
Printf("%zu:%zu; ", i, array[i]);
}
@ -51,8 +50,7 @@ void AsanStats::Print() {
(mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
mmaps, munmaps);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size,
get_allocator().KNumClasses());
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
Printf("Stats: malloc large: %zu\n", malloc_large);
}

View File

@ -38,7 +38,7 @@ struct AsanStats {
uptr munmaps;
uptr munmaped;
uptr malloc_large;
uptr malloced_by_size[kMaxNumberOfSizeClasses];
uptr malloced_by_size[kNumberOfSizeClasses];
// Ctor for global AsanStats (accumulated stats for dead threads).
explicit AsanStats(LinkerInitialized) { }

View File

@ -49,46 +49,8 @@ struct ChunkMetadata {
u32 stack_trace_id;
};
#if defined(__aarch64__)
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
using AddressSpaceView = AddressSpaceViewTy;
};
template <typename AddressSpaceView>
using Allocator32ASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
template <typename AddressSpaceView>
using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
using Allocator32 = Allocator32ASVT<LocalAddressSpaceView>;
using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
template <typename AddressSpaceView>
using PrimaryAllocatorASVT =
RuntimeSelectAllocator<Allocator32ASVT<AddressSpaceView>,
Allocator64ASVT<AddressSpaceView>>;
#elif defined(__mips64) || defined(__i386__) || defined(__arm__)
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
defined(__arm__)
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
@ -102,6 +64,7 @@ struct AP32 {
};
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#elif defined(__x86_64__) || defined(__powerpc64__)
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
@ -123,13 +86,13 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#endif
template <typename AddressSpaceView>
using AllocatorASVT = CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
using Allocator = AllocatorASVT<LocalAddressSpaceView>;
using AllocatorCache = Allocator::AllocatorCache;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
Allocator::AllocatorCache *GetAllocatorCache();

View File

@ -75,7 +75,6 @@ INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
#include "sanitizer_allocator_local_cache.h"
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
#include "sanitizer_runtime_select_allocator.h"
} // namespace __sanitizer

View File

@ -41,10 +41,6 @@ class CombinedAllocator {
secondary_.Init();
}
bool CanAllocate(uptr size, uptr alignment) {
return primary_.CanAllocate(size, alignment);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
@ -198,10 +194,6 @@ class CombinedAllocator {
secondary_.ForEachChunk(callback, arg);
}
uptr KNumClasses() { return primary_.KNumClasses(); }
uptr KMaxSize() { return primary_.KMaxSize(); }
uptr ClassID(uptr size) { return primary_.ClassID(size); }
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;

View File

@ -271,9 +271,6 @@ class SizeClassAllocator32 {
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static uptr KNumClasses() { return SizeClassMap::kNumClasses; }
static uptr KMaxSize() { return SizeClassMap::kMaxSize; }
private:
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;

View File

@ -319,9 +319,6 @@ class SizeClassAllocator64 {
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
static uptr KNumClasses() { return SizeClassMap::kNumClasses; }
static uptr KMaxSize() { return SizeClassMap::kMaxSize; }
// A packed array of counters. Each counter occupies 2^n bits, enough to store
// counter's max_value. Ctor will try to allocate the required buffer via
// mapper->MapPackedCounterArrayBuffer and the caller is expected to check

View File

@ -1,179 +0,0 @@
//===-- sanitizer_runtime_select_allocator.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Select one of the two allocators at runtime.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_RUNTIME_SELECT_ALLOCATOR_H
#define SANITIZER_RUNTIME_SELECT_ALLOCATOR_H
template <class Allocator1, class Allocator2>
class RuntimeSelectAllocator {
Allocator1 a1;
Allocator2 a2;
public:
bool use_first_allocator;
class RuntimeSelectAllocatorCache {
typename Allocator1::AllocatorCache a1;
typename Allocator2::AllocatorCache a2;
public:
void Init(AllocatorGlobalStats *s) {
if (this->use_first_allocator)
a1.Init(s);
else
a2.Init(s);
}
void *Allocate(RuntimeSelectAllocator *allocator, uptr class_id) {
if (allocator->use_first_allocator)
return a1.Allocate(&allocator->a1, class_id);
return a2.Allocate(&allocator->a2, class_id);
}
void Deallocate(RuntimeSelectAllocator *allocator, uptr class_id, void *p) {
if (allocator->use_first_allocator)
a1.Deallocate(&allocator->a1, class_id, p);
else
a2.Deallocate(&allocator->a2, class_id, p);
}
void Drain(RuntimeSelectAllocator *allocator) {
if (allocator->use_first_allocator)
a1.Drain(&allocator->a1);
else
a2.Drain(&allocator->a2);
}
void Destroy(RuntimeSelectAllocator *allocator, AllocatorGlobalStats *s) {
if (allocator->use_first_allocator)
a1.Destroy(&allocator->a1, s);
else
a2.Destroy(&allocator->a2, s);
}
};
using MapUnmapCallback = typename Allocator1::MapUnmapCallback;
using AddressSpaceView = typename Allocator1::AddressSpaceView;
using AllocatorCache = RuntimeSelectAllocatorCache;
void Init(s32 release_to_os_interval_ms) {
// Use the first allocator when the address
// space is too small for the 64-bit allocator.
use_first_allocator = GetMaxVirtualAddress() < (((uptr)1ULL << 48) - 1);
if (use_first_allocator)
a1.Init(release_to_os_interval_ms);
else
a2.Init(release_to_os_interval_ms);
}
bool CanAllocate(uptr size, uptr alignment) {
if (use_first_allocator)
return Allocator1::CanAllocate(size, alignment);
return Allocator2::CanAllocate(size, alignment);
}
uptr ClassID(uptr size) {
if (use_first_allocator)
return Allocator1::ClassID(size);
return Allocator2::ClassID(size);
}
uptr KNumClasses() {
if (use_first_allocator)
return Allocator1::KNumClasses();
return Allocator2::KNumClasses();
}
uptr KMaxSize() {
if (use_first_allocator)
return Allocator1::KMaxSize();
return Allocator2::KMaxSize();
}
bool PointerIsMine(const void *p) {
if (use_first_allocator)
return a1.PointerIsMine(p);
return a2.PointerIsMine(p);
}
void *GetMetaData(const void *p) {
if (use_first_allocator)
return a1.GetMetaData(p);
return a2.GetMetaData(p);
}
uptr GetSizeClass(const void *p) {
if (use_first_allocator)
return a1.GetSizeClass(p);
return a2.GetSizeClass(p);
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
if (use_first_allocator)
a1.ForEachChunk(callback, arg);
else
a2.ForEachChunk(callback, arg);
}
void TestOnlyUnmap() {
if (use_first_allocator)
a1.TestOnlyUnmap();
else
a2.TestOnlyUnmap();
}
void ForceLock() {
if (use_first_allocator)
a1.ForceLock();
else
a2.ForceLock();
}
void ForceUnlock() {
if (use_first_allocator)
a1.ForceUnlock();
else
a2.ForceUnlock();
}
void *GetBlockBegin(const void *p) {
if (use_first_allocator)
return a1.GetBlockBegin(p);
return a2.GetBlockBegin(p);
}
uptr GetActuallyAllocatedSize(void *p) {
if (use_first_allocator)
return a1.GetActuallyAllocatedSize(p);
return a2.GetActuallyAllocatedSize(p);
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
if (use_first_allocator)
a1.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
else
a2.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
s32 ReleaseToOSIntervalMs() const {
if (use_first_allocator)
return a1.ReleaseToOSIntervalMs();
return a2.ReleaseToOSIntervalMs();
}
void ForceReleaseToOS() {
if (use_first_allocator)
a1.ForceReleaseToOS();
else
a2.ForceReleaseToOS();
}
void PrintStats() {
if (use_first_allocator)
a1.PrintStats();
else
a2.PrintStats();
}
};
#endif // SANITIZER_RUNTIME_SELECT_ALLOCATOR_H

View File

@ -160,9 +160,6 @@ using Allocator32CompactASVT =
SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
using Allocator32or64Compact =
RuntimeSelectAllocator<Allocator32Compact, Allocator64Compact>;
template <class SizeClassMap>
void TestSizeClassMap() {
typedef SizeClassMap SCMap;
@ -277,13 +274,6 @@ TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64Compact) {
Allocator32or64Compact::UseAllocator1 = false;
TestSizeClassAllocator<Allocator32or64Compact>();
Allocator32or64Compact::UseAllocator1 = true;
TestSizeClassAllocator<Allocator32or64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator64Dense) {
TestSizeClassAllocator<Allocator64Dense>();
}
@ -367,12 +357,6 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactMetadataStress) {
Allocator32or64Compact::UseAllocator1 = false;
SizeClassAllocatorMetadataStress<Allocator32or64Compact>();
Allocator32or64Compact::UseAllocator1 = true;
SizeClassAllocatorMetadataStress<Allocator32or64Compact>();
}
#endif
#endif
@ -420,12 +404,6 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactGetBlockBegin) {
Allocator32or64Compact::UseAllocator1 = false;
SizeClassAllocatorGetBlockBeginStress<Allocator32or64Compact>(1ULL << 33);
Allocator32or64Compact::UseAllocator1 = true;
SizeClassAllocatorGetBlockBeginStress<Allocator32or64Compact>(1ULL << 33);
}
#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
// Does not have > 4Gb for each class.
@ -716,12 +694,6 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact>();
}
TEST(SanitizerCommon, CombinedRuntimeSelectAllocator) {
Allocator32or64Compact::UseAllocator1 = false;
TestCombinedAllocator<Allocator32or64Compact>();
Allocator32or64Compact::UseAllocator1 = true;
TestCombinedAllocator<Allocator32or64Compact>();
}
#endif
TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
@ -783,12 +755,6 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactLocalCache) {
Allocator32or64Compact::UseAllocator1 = false;
TestSizeClassAllocatorLocalCache<Allocator32or64Compact>();
Allocator32or64Compact::UseAllocator1 = true;
TestSizeClassAllocatorLocalCache<Allocator32or64Compact>();
}
#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
@ -1367,12 +1333,6 @@ TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactReleaseFreeMemoryToOS) {
Allocator32or64Compact::UseAllocator1 = false;
TestReleaseFreeMemoryToOS<Allocator32or64Compact>();
Allocator32or64Compact::UseAllocator1 = true;
TestReleaseFreeMemoryToOS<Allocator32or64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();