[asan] asan_allocator2: by default use the StackDepot to store the stack traces instead of storing them in the redzones

llvm-svn: 171099
This commit is contained in:
Kostya Serebryany 2012-12-26 06:30:02 +00:00
parent a1d2436b5f
commit a11cd57a78
6 changed files with 59 additions and 10 deletions

View File

@ -26,6 +26,7 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@ -162,9 +163,8 @@ struct AsanChunk: ChunkBase {
}
void *AllocBeg() {
if (from_memalign)
return reinterpret_cast<uptr>(
allocator.GetBlockBegin(reinterpret_cast<void *>(this)));
return Beg() - ComputeRZSize(0);
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
return reinterpret_cast<void*>(Beg() - ComputeRZSize(0));
}
// We store the alloc/free stack traces in the chunk itself.
u32 *AllocStackBeg() {
@ -189,14 +189,29 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static void GetStackTraceFromId(u32 id, StackTrace *stack) {
CHECK(id);
uptr size = 0;
const uptr *trace = StackDepotGet(id, &size);
CHECK_LT(size, kStackTraceMax);
internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
stack->size = size;
}
void AsanChunkView::GetAllocStack(StackTrace *stack) {
StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
chunk_->AllocStackSize());
if (flags()->use_stack_depot)
GetStackTraceFromId(chunk_->alloc_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
chunk_->AllocStackSize());
}
void AsanChunkView::GetFreeStack(StackTrace *stack) {
StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
chunk_->FreeStackSize());
if (flags()->use_stack_depot)
GetStackTraceFromId(chunk_->free_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
chunk_->FreeStackSize());
}
class Quarantine: public AsanChunkFifoList {
@ -341,7 +356,13 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
m->user_requested_size = SizeClassMap::kMaxSize;
*reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)) = size;
}
StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
if (flags()->use_stack_depot) {
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->alloc_context_id = 0;
StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
}
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@ -391,7 +412,12 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = asanThreadRegistry().GetCurrent();
m->free_tid = t ? t->tid() : 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
if (flags()->use_stack_depot) {
m->free_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->free_context_id = 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
}
CHECK(m->chunk_state == CHUNK_QUARANTINE);
// Poison the region.
PoisonShadow(m->Beg(),

View File

@ -104,6 +104,8 @@ struct Flags {
bool poison_heap;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
};
Flags *flags();

View File

@ -108,6 +108,7 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
}
void InitializeFlags(Flags *f, const char *env) {
@ -145,6 +146,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
f->alloc_dealloc_mismatch = false;
f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());

View File

@ -17,6 +17,7 @@
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@ -62,6 +63,9 @@ static void PrintAccumulatedStats() {
// Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
}
} // namespace __asan

View File

@ -42,6 +42,12 @@ static struct {
atomic_uint32_t seq[kPartCount]; // Unique id generators.
} depot;
static StackDepotStats stats;
StackDepotStats *StackDepotGetStats() {
return &stats;
}
static u32 hash(const uptr *stack, uptr size) {
// murmur2
const u32 m = 0x5bd1e995;
@ -77,7 +83,7 @@ static StackDesc *tryallocDesc(uptr memsz) {
}
static StackDesc *allocDesc(uptr size) {
// Frist, try to allocate optimisitically.
// First, try to allocate optimisitically.
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
StackDesc *s = tryallocDesc(memsz);
if (s)
@ -93,6 +99,7 @@ static StackDesc *allocDesc(uptr size) {
if (allocsz < memsz)
allocsz = memsz;
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
stats.mapped += allocsz;
atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
atomic_store(&depot.region_pos, mem, memory_order_release);
}
@ -156,6 +163,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) {
}
uptr part = (h % kTabSize) / kPartSize;
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
stats.n_uniq_ids++;
CHECK_LT(id, kMaxId);
id |= part << kPartShift;
CHECK_NE(id, 0);

View File

@ -24,6 +24,13 @@ u32 StackDepotPut(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
struct StackDepotStats {
uptr n_uniq_ids;
uptr mapped;
};
StackDepotStats *StackDepotGetStats();
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H