[Sanitizers] New sanitizer API to purge allocator quarantine.

Summary:
Purging allocator quarantine and returning memory to OS might be desired
between fuzzer iterations since, most likely, the quarantine is not
going to catch bugs in the code under fuzz, but reducing RSS might
significantly prolong the fuzzing session.

Reviewers: cryptoad

Subscribers: kubamracek, llvm-commits

Differential Revision: https://reviews.llvm.org/D39153

llvm-svn: 316347
This commit is contained in:
Alex Shlyapnikov 2017-10-23 17:12:07 +00:00
parent b791802aef
commit 028c4cddad
9 changed files with 89 additions and 26 deletions

View File

@ -76,6 +76,13 @@ extern "C" {
void (*malloc_hook)(const volatile void *, size_t),
void (*free_hook)(const volatile void *));
/* Drains allocator quarantines (calling thread's and global ones), returns
freed memory back to OS and releases other non-essential internal allocator
resources in attempt to reduce process RSS.
Currently available with ASan only.
*/
void __sanitizer_purge_allocator();
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -716,6 +716,22 @@ struct Allocator {
return AsanChunkView(m1);
}
void Purge() {
AsanThread *t = GetCurrentThread();
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
quarantine.DrainAndRecycle(GetQuarantineCache(ms),
QuarantineCallback(GetAllocatorCache(ms)));
}
{
SpinMutexLock l(&fallback_mutex);
quarantine.DrainAndRecycle(&fallback_quarantine_cache,
QuarantineCallback(&fallback_allocator_cache));
}
allocator.ForceReleaseToOS();
}
void PrintStats() {
allocator.PrintStats();
quarantine.PrintStats();
@ -1011,6 +1027,10 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
void __sanitizer_purge_allocator() {
instance.Purge();
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,

View File

@ -77,6 +77,10 @@ class CombinedAllocator {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
void ForceReleaseToOS() {
primary_.ForceReleaseToOS();
}
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))

View File

@ -38,6 +38,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_purge_allocator();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C"

View File

@ -120,6 +120,10 @@ class SizeClassAllocator32 {
// This is empty here. Currently only implemented in 64-bit allocator.
}
void ForceReleaseToOS() {
// Currently implemented in 64-bit allocator only.
}
void *MapWithCallback(uptr size) {
void *res = MmapOrDie(size, "SizeClassAllocator32");
MapUnmapCallback().OnMap((uptr)res, size);

View File

@ -92,6 +92,13 @@ class SizeClassAllocator64 {
memory_order_relaxed);
}
void ForceReleaseToOS() {
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
MaybeReleaseToOS(class_id, true /*force*/);
}
}
static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
@ -116,7 +123,7 @@ class SizeClassAllocator64 {
region->num_freed_chunks = new_num_freed_chunks;
region->stats.n_freed += n_chunks;
MaybeReleaseToOS(class_id);
MaybeReleaseToOS(class_id, false /*force*/);
}
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
@ -786,7 +793,7 @@ class SizeClassAllocator64 {
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
void MaybeReleaseToOS(uptr class_id) {
void MaybeReleaseToOS(uptr class_id, bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
@ -799,12 +806,16 @@ class SizeClassAllocator64 {
return; // Nothing new to release.
}
s32 interval_ms = ReleaseToOSIntervalMs();
if (interval_ms < 0)
return;
if (!force) {
s32 interval_ms = ReleaseToOSIntervalMs();
if (interval_ms < 0)
return;
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime())
return; // Memory was returned recently.
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
NanoTime()) {
return; // Memory was returned recently.
}
}
MemoryMapper memory_mapper(*this, class_id);

View File

@ -34,6 +34,7 @@ INTERFACE_FUNCTION(__sanitizer_get_heap_size)
INTERFACE_FUNCTION(__sanitizer_get_ownership)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)

View File

@ -87,15 +87,14 @@ class Quarantine {
// is zero (it allows us to perform just one atomic read per Put() call).
CHECK((size == 0 && cache_size == 0) || cache_size != 0);
atomic_store(&max_size_, size, memory_order_relaxed);
atomic_store(&min_size_, size / 10 * 9,
memory_order_relaxed); // 90% of max size.
atomic_store(&max_cache_size_, cache_size, memory_order_relaxed);
atomic_store_relaxed(&max_size_, size);
atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
atomic_store_relaxed(&max_cache_size_, cache_size);
}
uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); }
uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
uptr GetCacheSize() const {
return atomic_load(&max_cache_size_, memory_order_relaxed);
return atomic_load_relaxed(&max_cache_size_);
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
@ -117,7 +116,16 @@ class Quarantine {
cache_.Transfer(c);
}
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
Recycle(cb);
Recycle(atomic_load_relaxed(&min_size_), cb);
}
void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
recycle_mutex_.Lock();
Recycle(0, cb);
}
void PrintStats() const {
@ -139,9 +147,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(Callback cb) {
void NOINLINE Recycle(uptr min_size, Callback cb) {
Cache tmp;
uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
{
SpinMutexLock l(&cache_mutex_);
// Go over the batches and merge partially filled ones to
@ -201,7 +208,7 @@ class QuarantineCache {
// Total memory used, including internal accounting.
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
return atomic_load_relaxed(&size_);
}
// Memory used for internal accounting.
@ -225,7 +232,7 @@ class QuarantineCache {
list_.append_back(&from_cache->list_);
SizeAdd(from_cache->Size());
atomic_store(&from_cache->size_, 0, memory_order_relaxed);
atomic_store_relaxed(&from_cache->size_, 0);
}
void EnqueueBatch(QuarantineBatch *b) {
@ -296,10 +303,10 @@ class QuarantineCache {
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed);
atomic_store_relaxed(&size_, Size() + add);
}
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
atomic_store_relaxed(&size_, Size() - sub);
}
};

View File

@ -1,18 +1,21 @@
// Tests ASAN_OPTIONS=allocator_release_to_os=1
//
// RUN: %clangxx_asan -std=c++11 %s -o %t
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE
//
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t force 2>&1 | FileCheck %s --check-prefix=FORCE_RELEASE
// REQUIRES: x86_64-target-arch
#include <stdlib.h>
#include <stdio.h>
#include <algorithm>
#include <stdint.h>
#include <assert.h>
#include <random>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sanitizer/allocator_interface.h>
#include <sanitizer/asan_interface.h>
void MallocReleaseStress() {
@ -39,10 +42,13 @@ void MallocReleaseStress() {
delete[] p;
}
int main() {
int main(int argc, char **argv) {
MallocReleaseStress();
if (argc > 1 && !strcmp("force", argv[1]))
__sanitizer_purge_allocator();
__asan_print_accumulated_stats();
}
// RELEASE: mapped:{{.*}}releases: {{[1-9]}}
// NO_RELEASE: mapped:{{.*}}releases: 0
// FORCE_RELEASE: mapped:{{.*}}releases: {{[1-9]}}