[Sanitizer] Make StackTrace a lightweight reference to array of PCs, and

introduce a BufferedStackTrace class, which owns this array.

Summary:
This change splits __sanitizer::StackTrace class into a lightweight
__sanitizer::StackTrace, which doesn't own array of PCs, and BufferedStackTrace,
which owns it. This would allow us to simplify the interface of StackDepot,
and eventually merge __sanitizer::StackTrace with __tsan::StackTrace.

Test Plan: regression test suite.

Reviewers: kcc, dvyukov

Reviewed By: dvyukov

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D5985

llvm-svn: 220635
This commit is contained in:
Alexey Samsonov 2014-10-26 03:35:14 +00:00
parent 106c8e0898
commit 9c85927012
26 changed files with 241 additions and 272 deletions

View File

@ -45,8 +45,8 @@ class AsanChunkView {
uptr AllocTid();
uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack);
StackTrace GetAllocStack();
StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@ -139,20 +139,20 @@ struct AsanThreadLocalMallocStorage {
AsanThreadLocalMallocStorage() {}
};
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
void asan_sized_free(void *ptr, uptr size, StackTrace *stack,
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
void *asan_malloc(uptr size, StackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
void *asan_realloc(void *p, uptr size, StackTrace *stack);
void *asan_valloc(uptr size, StackTrace *stack);
void *asan_pvalloc(uptr size, StackTrace *stack);
void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
void *asan_valloc(uptr size, BufferedStackTrace *stack);
void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
BufferedStackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);

View File

@ -182,20 +182,19 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static void GetStackTraceFromId(u32 id, StackTrace *stack) {
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
uptr size = 0;
const uptr *trace = StackDepotGet(id, &size);
CHECK(trace);
stack->CopyFrom(trace, size);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
void AsanChunkView::GetAllocStack(StackTrace *stack) {
GetStackTraceFromId(chunk_->alloc_context_id, stack);
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
}
void AsanChunkView::GetFreeStack(StackTrace *stack) {
GetStackTraceFromId(chunk_->free_context_id, stack);
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
}
struct QuarantineCallback;
@ -263,7 +262,7 @@ void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
@ -391,15 +390,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
return res;
}
static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
static void ReportInvalidFree(void *ptr, u8 chunk_state,
BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
static void AtomicallySetQuarantineFlag(AsanChunk *m,
void *ptr, StackTrace *stack) {
static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
@ -410,8 +410,8 @@ static void AtomicallySetQuarantineFlag(AsanChunk *m,
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
static void QuarantineChunk(AsanChunk *m, void *ptr,
StackTrace *stack, AllocType alloc_type) {
static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
@ -447,7 +447,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
}
}
static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack,
static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
@ -464,7 +464,8 @@ static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack,
QuarantineChunk(m, ptr, stack, alloc_type);
}
static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
static void *Reallocate(void *old_ptr, uptr new_size,
BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
@ -577,25 +578,25 @@ void PrintInternalAllocatorStats() {
allocator.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
Deallocate(ptr, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, StackTrace *stack,
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, StackTrace *stack) {
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
@ -606,7 +607,7 @@ void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
return ptr;
}
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
@ -616,11 +617,11 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
return Reallocate(p, size, stack);
}
void *asan_valloc(uptr size, StackTrace *stack) {
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
@ -631,7 +632,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
BufferedStackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;

View File

@ -86,22 +86,19 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
StackTrace stack;
StackTrace stack(nullptr, 0);
if (alloc_stack) {
if (chunk.AllocTid() == kInvalidTid) return 0;
chunk.GetAllocStack(&stack);
stack = chunk.GetAllocStack();
if (thread_id) *thread_id = chunk.AllocTid();
} else {
if (chunk.FreeTid() == kInvalidTid) return 0;
chunk.GetFreeStack(&stack);
stack = chunk.GetFreeStack();
if (thread_id) *thread_id = chunk.FreeTid();
}
if (trace && size) {
if (size > kStackTraceMax)
size = kStackTraceMax;
if (size > stack.size)
size = stack.size;
size = Min(size, Min(stack.size, kStackTraceMax));
for (uptr i = 0; i < size; i++)
trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);

View File

@ -440,16 +440,15 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
StackTrace alloca_stack;
#if defined(__powerpc64__) && defined(__BIG_ENDIAN__)
// On PowerPC64 ELFv1, the address of a function actually points to a
// three-doubleword data structure with the first field containing
// the address of the function's code.
access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
#endif
alloca_stack.trace[0] = access.frame_pc + 16;
alloca_stack.size = 1;
access.frame_pc += 16;
Printf("%s", d.EndLocation());
StackTrace alloca_stack(&access.frame_pc, 1);
alloca_stack.Print();
InternalMmapVector<StackVarDescr> vars(16);
@ -519,8 +518,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread =
GetThreadContextByTidLocked(chunk.AllocTid());
StackTrace alloc_stack;
chunk.GetAllocStack(&alloc_stack);
StackTrace alloc_stack = chunk.GetAllocStack();
char tname[128];
Decorator d;
AsanThreadContext *free_thread = 0;
@ -530,8 +528,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
StackTrace free_stack;
chunk.GetFreeStack(&free_stack);
StackTrace free_stack = chunk.GetFreeStack();
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid,
@ -581,9 +578,7 @@ void DescribeThread(AsanThreadContext *context) {
" created by T%d%s here:\n", context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
Printf("%s", str.data());
uptr stack_size;
const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
StackTrace::PrintStack(stack_trace, stack_size);
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@ -684,7 +679,7 @@ void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
ReportErrorSummary("SEGV", &stack);
}
void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -703,7 +698,7 @@ void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
}
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
StackTrace *free_stack) {
BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -726,7 +721,7 @@ void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
}
void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -743,7 +738,7 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
ReportErrorSummary("bad-free", &stack);
}
void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
static const char *alloc_names[] =
@ -766,7 +761,7 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -779,7 +774,8 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
ReportErrorSummary("bad-malloc_usable_size", stack);
}
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -792,9 +788,10 @@ void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
}
void ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack) {
void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset1, uptr length1,
const char *offset2, uptr length2,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
char bug_type[100];
@ -811,7 +808,7 @@ void ReportStringFunctionMemoryRangesOverlap(
}
void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
StackTrace *stack) {
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
const char *bug_type = "negative-size-param";
@ -825,7 +822,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
StackTrace *stack) {
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Report("ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_annotate_contiguous_container:\n"
@ -855,12 +852,9 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
if (stack_id1 && stack_id2) {
Printf("These globals were registered at these points:\n");
Printf(" [1]:\n");
uptr stack_size;
const uptr *stack_trace = StackDepotGet(stack_id1, &stack_size);
StackTrace::PrintStack(stack_trace, stack_size);
StackDepotGet(stack_id1).Print();
Printf(" [2]:\n");
stack_trace = StackDepotGet(stack_id2, &stack_size);
StackTrace::PrintStack(stack_trace, stack_size);
StackDepotGet(stack_id2).Print();
}
Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
@ -900,8 +894,8 @@ static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
}
// ----------------------- Mac-specific reports ----------------- {{{1
void WarnMacFreeUnallocated(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack) {
// Just print a warning here.
Printf("free_common(%p) -- attempting to free unallocated memory.\n"
"AddressSanitizer is ignoring this error on Mac OS now.\n",
@ -911,8 +905,8 @@ void WarnMacFreeUnallocated(
DescribeHeapAddress(addr, 1);
}
void ReportMacMzReallocUnknown(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",
@ -922,8 +916,8 @@ void ReportMacMzReallocUnknown(
DescribeHeapAddress(addr, 1);
}
void ReportMacCfReallocUnknown(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",

View File

@ -57,35 +57,41 @@ void NORETURN
void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
void *context, uptr addr);
void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
StackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
BufferedStackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
StackTrace *stack);
void NORETURN
ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack);
void NORETURN ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack);
ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
void NORETURN
ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack);
ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack);
void NORETURN
ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid,
uptr new_mid, StackTrace *stack);
ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset1, uptr length1,
const char *offset2, uptr length2,
BufferedStackTrace *stack);
void NORETURN ReportStringFunctionSizeOverflow(uptr offset, uptr size,
BufferedStackTrace *stack);
void NORETURN
ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
BufferedStackTrace *stack);
void NORETURN
ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
void NORETURN ReportMacMzReallocUnknown(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
void NORETURN ReportMacCfReallocUnknown(
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack);
void NORETURN ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
const char *zone_name,
BufferedStackTrace *stack);
void NORETURN ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
const char *zone_name,
BufferedStackTrace *stack);
} // namespace __asan

View File

@ -25,8 +25,9 @@ namespace __asan {
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
uptr bp, void *context, bool fast) {
void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr pc, uptr bp, void *context,
bool fast) {
#if SANITIZER_WINDOWS
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
@ -53,14 +54,14 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
// don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \
BufferedStackTrace stack; \
if (max_size <= 2) { \
stack.size = max_size; \
if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace[0] = StackTrace::GetCurrentPc(); \
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \
stack.trace[1] = GET_CALLER_PC(); \
stack.trace_buffer[1] = GET_CALLER_PC(); \
} \
} else { \
GetStackTraceWithPcBpAndContext(&stack, max_size, \
@ -69,12 +70,12 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
}
#define GET_STACK_TRACE_FATAL(pc, bp) \
StackTrace stack; \
BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
StackTrace stack; \
BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
common_flags()->fast_unwind_on_fatal)

View File

@ -52,19 +52,19 @@ TEST(AddressSanitizer, InternalSimpleDeathTest) {
static void MallocStress(size_t n) {
u32 seed = my_rand();
StackTrace stack1;
stack1.trace[0] = 0xa123;
stack1.trace[1] = 0xa456;
BufferedStackTrace stack1;
stack1.trace_buffer[0] = 0xa123;
stack1.trace_buffer[1] = 0xa456;
stack1.size = 2;
StackTrace stack2;
stack2.trace[0] = 0xb123;
stack2.trace[1] = 0xb456;
BufferedStackTrace stack2;
stack2.trace_buffer[0] = 0xb123;
stack2.trace_buffer[1] = 0xb456;
stack2.size = 2;
StackTrace stack3;
stack3.trace[0] = 0xc123;
stack3.trace[1] = 0xc456;
BufferedStackTrace stack3;
stack3.trace_buffer[0] = 0xc123;
stack3.trace_buffer[1] = 0xc456;
stack3.size = 2;
std::vector<void *> vec;
@ -140,8 +140,8 @@ TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
}
TEST(AddressSanitizer, QuarantineTest) {
StackTrace stack;
stack.trace[0] = 0x890;
BufferedStackTrace stack;
stack.trace_buffer[0] = 0x890;
stack.size = 1;
const int size = 1024;
@ -161,8 +161,8 @@ TEST(AddressSanitizer, QuarantineTest) {
void *ThreadedQuarantineTestWorker(void *unused) {
(void)unused;
u32 seed = my_rand();
StackTrace stack;
stack.trace[0] = 0x890;
BufferedStackTrace stack;
stack.trace_buffer[0] = 0x890;
stack.size = 1;
for (size_t i = 0; i < 1000; i++) {
@ -188,8 +188,8 @@ TEST(AddressSanitizer, ThreadedQuarantineTest) {
void *ThreadedOneSizeMallocStress(void *unused) {
(void)unused;
StackTrace stack;
stack.trace[0] = 0x890;
BufferedStackTrace stack;
stack.trace_buffer[0] = 0x890;
stack.size = 1;
const size_t kNumMallocs = 1000;
for (int iter = 0; iter < 1000; iter++) {
@ -241,8 +241,8 @@ TEST(AddressSanitizer, LoadStoreCallbacks) {
uptr buggy_ptr;
__asan_test_only_reported_buggy_pointer = &buggy_ptr;
StackTrace stack;
stack.trace[0] = 0x890;
BufferedStackTrace stack;
stack.trace_buffer[0] = 0x890;
stack.size = 1;
for (uptr len = 16; len <= 32; len++) {

View File

@ -15,17 +15,17 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
/* context */ 0, stack_top, stack_bottom, fast); \
#define GET_STACK_TRACE(max_size, fast) \
BufferedStackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
/* context */ 0, stack_top, stack_bottom, fast); \
}
#define GET_STACK_TRACE_FATAL \

View File

@ -355,9 +355,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id);
uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size);
StackTrace::PrintStack(trace, size);
StackDepotGet(stack_trace_id).Print();
}
// ForEachChunk callback. Aggregates information about unreachable chunks into
@ -372,10 +370,9 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
uptr resolution = flags()->resolution;
u32 stack_trace_id = 0;
if (resolution > 0) {
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
stack_trace_id = StackDepotPut(trace, size);
StackTrace stack = StackDepotGet(m.stack_trace_id());
uptr size = Min(stack.size, resolution);
stack_trace_id = StackDepotPut(stack.trace, size);
} else {
stack_trace_id = m.stack_trace_id();
}
@ -487,11 +484,10 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
}
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size);
for (uptr i = 0; i < size; i++) {
Suppression *s =
GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
StackTrace stack = StackDepotGet(stack_trace_id);
for (uptr i = 0; i < stack.size; i++) {
Suppression *s = GetSuppressionForAddr(
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s) return s;
}
return 0;

View File

@ -187,7 +187,7 @@ static void InitializeFlags(Flags *f, const char *options) {
ParseFlagsFromString(f, options);
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
bool request_fast_unwind) {
MsanThread *t = GetCurrentThread();
if (!t || !StackTrace::WillUseFastUnwind(request_fast_unwind)) {

View File

@ -72,7 +72,7 @@ void MsanDie();
void PrintWarning(uptr pc, uptr bp);
void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin);
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
bool request_fast_unwind);
void ReportUMR(StackTrace *stack, u32 origin);
@ -97,27 +97,24 @@ void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack);
// the previous origin id.
u32 ChainOrigin(u32 id, StackTrace *stack);
#define GET_MALLOC_STACK_TRACE \
StackTrace stack; \
stack.size = 0; \
if (__msan_get_track_origins() && msan_inited) \
GetStackTrace(&stack, common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
common_flags()->fast_unwind_on_malloc)
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
StackTrace stack; \
stack.size = 0; \
if (__msan_get_track_origins() > 1 && msan_inited) \
GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
if (__msan_get_track_origins() && msan_inited) \
GetStackTrace(&stack, common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
common_flags()->fast_unwind_on_malloc)
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
StackTrace stack; \
stack.size = 0; \
if (msan_inited) \
GetStackTrace(&stack, kStackTraceMax, pc, bp, \
common_flags()->fast_unwind_on_fatal)
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (__msan_get_track_origins() > 1 && msan_inited) \
GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
common_flags()->fast_unwind_on_malloc)
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (msan_inited) \
GetStackTrace(&stack, kStackTraceMax, pc, bp, \
common_flags()->fast_unwind_on_fatal)
#define GET_STORE_STACK_TRACE \
GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())

View File

@ -54,7 +54,7 @@ static void DescribeStackOrigin(const char *so, uptr pc) {
// For some reason function address in LLVM IR is 1 less then the address
// of the first instruction.
pc += 1;
StackTrace::PrintStack(&pc, 1);
StackTrace(&pc, 1).Print();
}
}
@ -77,20 +77,16 @@ static void DescribeOrigin(u32 id) {
DescribeStackOrigin(so, pc);
break;
} else if (prev_o.isHeapRoot()) {
uptr size = 0;
const uptr *trace = StackDepotGet(stack_id, &size);
Printf(" %sUninitialized value was created by a heap allocation%s\n",
d.Origin(), d.End());
StackTrace::PrintStack(trace, size);
StackDepotGet(stack_id).Print();
break;
} else {
// chained origin
uptr size = 0;
const uptr *trace = StackDepotGet(stack_id, &size);
// FIXME: copied? modified? passed through? observed?
Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
d.End());
StackTrace::PrintStack(trace, size);
StackDepotGet(stack_id).Print();
id = prev_id;
}
}

View File

@ -18,6 +18,7 @@
namespace __sanitizer {
// FIXME: Get rid of this class in favor of StackTrace.
struct StackDepotDesc {
const uptr *stack;
uptr size;
@ -122,10 +123,9 @@ StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
return theDepot.Put(desc);
}
const uptr *StackDepotGet(u32 id, uptr *size) {
StackTrace StackDepotGet(u32 id) {
StackDepotDesc desc = theDepot.Get(id);
*size = desc.size;
return desc.stack;
return StackTrace(desc.stack, desc.size);
}
void StackDepotLockAll() {

View File

@ -15,6 +15,7 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
@ -35,10 +36,11 @@ struct StackDepotHandle {
const int kStackDepotMaxUseCount = 1U << 20;
StackDepotStats *StackDepotGetStats();
// FIXME: Pass StackTrace as an input argument here.
u32 StackDepotPut(const uptr *stack, uptr size);
StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();

View File

@ -65,11 +65,10 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
#endif
}
void StackTrace::FastUnwindStack(uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom,
uptr max_depth) {
void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, uptr max_depth) {
CHECK_GE(max_depth, 2);
trace[0] = pc;
trace_buffer[0] = pc;
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
@ -82,7 +81,7 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp,
size < max_depth) {
uhwptr pc1 = frame[1];
if (pc1 != pc) {
trace[size++] = (uptr) pc1;
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
@ -93,15 +92,15 @@ static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
}
void StackTrace::PopStackFrames(uptr count) {
void BufferedStackTrace::PopStackFrames(uptr count) {
CHECK_LT(count, size);
size -= count;
for (uptr i = 0; i < size; ++i) {
trace[i] = trace[i + count];
trace_buffer[i] = trace_buffer[i + count];
}
}
uptr StackTrace::LocatePcInTrace(uptr pc) {
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
const int kPcThreshold = 288;

View File

@ -30,25 +30,13 @@ static const uptr kStackTraceMax = 256;
#endif
struct StackTrace {
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
uptr top_frame_bp;
const uptr *trace;
uptr size;
uptr trace[kStackTraceMax];
StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {}
// Prints a symbolized stacktrace, followed by an empty line.
static void PrintStack(const uptr *addr, uptr size);
void Print() const {
PrintStack(trace, size);
}
void CopyFrom(const uptr *src, uptr src_size) {
top_frame_bp = 0;
size = src_size;
if (size > kStackTraceMax) size = kStackTraceMax;
for (uptr i = 0; i < size; i++)
trace[i] = src[i];
}
void Print() const;
static bool WillUseFastUnwind(bool request_fast_unwind) {
// Check if fast unwind is available. Fast unwind is the only option on Mac.
@ -62,11 +50,21 @@ struct StackTrace {
return request_fast_unwind;
}
void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc);
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
};
// StackTrace that owns the buffer used to store the addresses.
struct BufferedStackTrace : public StackTrace {
uptr trace_buffer[kStackTraceMax];
uptr top_frame_bp; // Optional bp of a top frame.
BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,

View File

@ -22,18 +22,18 @@ static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
buffer->append(" #%zu 0x%zx", frame_num, pc);
}
void StackTrace::PrintStack(const uptr *addr, uptr size) {
if (addr == 0 || size == 0) {
void StackTrace::Print() const {
if (trace == nullptr || size == 0) {
Printf(" <empty stack>\n\n");
return;
}
InternalScopedBuffer<AddressInfo> addr_frames(64);
InternalScopedString frame_desc(GetPageSizeCached() * 2);
uptr frame_num = 0;
for (uptr i = 0; i < size && addr[i]; i++) {
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(addr[i]);
uptr pc = GetPreviousInstructionPc(trace[i]);
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
pc, addr_frames.data(), addr_frames.size());
if (addr_frames_num == 0) {
@ -68,9 +68,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size) {
Printf("\n");
}
void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
uptr stack_top, uptr stack_bottom,
bool request_fast_unwind) {
void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
uptr stack_top, uptr stack_bottom,
bool request_fast_unwind) {
top_frame_bp = (max_depth > 0) ? bp : 0;
// Avoid doing any work for small max_depth.
if (max_depth == 0) {
@ -79,7 +79,7 @@ void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
}
if (max_depth == 1) {
size = 1;
trace[0] = pc;
trace_buffer[0] = pc;
return;
}
if (!WillUseFastUnwind(request_fast_unwind)) {

View File

@ -95,7 +95,7 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
}
struct UnwindTraceArg {
StackTrace *stack;
BufferedStackTrace *stack;
uptr max_depth;
};
@ -103,27 +103,27 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx);
arg->stack->trace[arg->stack->size++] = pc;
arg->stack->trace_buffer[arg->stack->size++] = pc;
if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace[0] belongs to the current function so we always pop it.
// trace_buffer[0] belongs to the current function so we always pop it.
if (to_pop == 0)
to_pop = 1;
PopStackFrames(to_pop);
trace[0] = pc;
trace_buffer[0] = pc;
}
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
CHECK_GE(max_depth, 2);
if (!unwind_backtrace_signal_arch) {
SlowUnwindStack(pc, max_depth);
@ -145,7 +145,7 @@ void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
// +2 compensate for libcorkscrew unwinder returning addresses of call
// instructions instead of raw return addresses.
for (sptr i = 0; i < res; ++i)
trace[size++] = frames[i].absolute_pc + 2;
trace_buffer[size++] = frames[i].absolute_pc + 2;
}
} // namespace __sanitizer

View File

@ -444,7 +444,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
}
#if !SANITIZER_GO
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
@ -459,8 +459,8 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
PopStackFrames(pc_location);
}
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
CONTEXT ctx = *(CONTEXT *)context;
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));

View File

@ -20,30 +20,26 @@ namespace __sanitizer {
TEST(SanitizerCommon, StackDepotBasic) {
uptr s1[] = {1, 2, 3, 4, 5};
u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
uptr sz1 = 0;
const uptr *sp1 = StackDepotGet(i1, &sz1);
EXPECT_NE(sp1, (uptr*)0);
EXPECT_EQ(sz1, ARRAY_SIZE(s1));
EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(s1), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1)));
}
TEST(SanitizerCommon, StackDepotAbsent) {
uptr sz1 = 0;
const uptr *sp1 = StackDepotGet((1 << 30) - 1, &sz1);
EXPECT_EQ(sp1, (uptr*)0);
StackTrace stack = StackDepotGet((1 << 30) - 1);
EXPECT_EQ((uptr*)0, stack.trace);
}
TEST(SanitizerCommon, StackDepotEmptyStack) {
u32 i1 = StackDepotPut(0, 0);
uptr sz1 = 0;
const uptr *sp1 = StackDepotGet(i1, &sz1);
EXPECT_EQ(sp1, (uptr*)0);
StackTrace stack = StackDepotGet(i1);
EXPECT_EQ((uptr*)0, stack.trace);
}
TEST(SanitizerCommon, StackDepotZeroId) {
uptr sz1 = 0;
const uptr *sp1 = StackDepotGet(0, &sz1);
EXPECT_EQ(sp1, (uptr*)0);
StackTrace stack = StackDepotGet(0);
EXPECT_EQ((uptr*)0, stack.trace);
}
TEST(SanitizerCommon, StackDepotSame) {
@ -51,11 +47,10 @@ TEST(SanitizerCommon, StackDepotSame) {
u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
u32 i2 = StackDepotPut(s1, ARRAY_SIZE(s1));
EXPECT_EQ(i1, i2);
uptr sz1 = 0;
const uptr *sp1 = StackDepotGet(i1, &sz1);
EXPECT_NE(sp1, (uptr*)0);
EXPECT_EQ(sz1, ARRAY_SIZE(s1));
EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(s1), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1)));
}
TEST(SanitizerCommon, StackDepotSeveral) {
@ -80,12 +75,12 @@ TEST(SanitizerCommon, StackDepotReverseMap) {
StackDepotReverseMap map;
for (uptr i = 0; i < 4; i++) {
uptr sz_depot, sz_map;
const uptr *sp_depot, *sp_map;
sp_depot = StackDepotGet(ids[i], &sz_depot);
uptr sz_map;
const uptr *sp_map;
StackTrace stack = StackDepotGet(ids[i]);
sp_map = map.Get(ids[i], &sz_map);
EXPECT_EQ(sz_depot, sz_map);
EXPECT_EQ(sp_depot, sp_map);
EXPECT_EQ(stack.size, sz_map);
EXPECT_EQ(stack.trace, sp_map);
}
}

View File

@ -35,7 +35,7 @@ class FastUnwindTest : public ::testing::Test {
uptr start_pc;
uptr fake_top;
uptr fake_bottom;
StackTrace trace;
BufferedStackTrace trace;
};
static uptr PC(uptr idx) {
@ -139,7 +139,7 @@ TEST_F(FastUnwindTest, FPBelowPrevFP) {
TEST(SlowUnwindTest, ShortStackTrace) {
if (StackTrace::WillUseFastUnwind(false))
return;
StackTrace stack;
BufferedStackTrace stack;
uptr pc = StackTrace::GetCurrentPc();
uptr bp = GET_CURRENT_FRAME();
stack.Unwind(0, pc, bp, 0, 0, 0, false);

View File

@ -475,12 +475,11 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
uptr dummy_pc = 0x42;
for (int i = 0; i < r->n; i++) {
uptr size;
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j];
if (stk) {
const uptr *trace = StackDepotGet(stk, &size);
stacks[i].Init(const_cast<uptr *>(trace), size);
__sanitizer::StackTrace stack = StackDepotGet(stk);
stacks[i].Init(const_cast<uptr *>(stack.trace), stack.size);
} else {
// Sometimes we fail to extract the stack trace (FIXME: investigate),
// but we should still produce some stack trace in the report.

View File

@ -107,12 +107,11 @@ static void StackStripMain(ReportStack *stack) {
ReportStack *SymbolizeStackId(u32 stack_id) {
if (stack_id == 0)
return 0;
uptr ssz = 0;
const uptr *stack = StackDepotGet(stack_id, &ssz);
if (stack == 0)
__sanitizer::StackTrace stack = StackDepotGet(stack_id);
if (stack.trace == nullptr)
return 0;
StackTrace trace;
trace.Init(stack, ssz);
trace.Init(stack.trace, stack.size);
return SymbolizeStack(trace);
}
@ -691,14 +690,15 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
void PrintCurrentStackSlow() {
#ifndef TSAN_GO
__sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
__sanitizer::BufferedStackTrace *ptrace = new(
internal_alloc(MBlockStackTrace, sizeof(__sanitizer::BufferedStackTrace)))
__sanitizer::BufferedStackTrace();
ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
uptr tmp = ptrace->trace[i];
ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
ptrace->trace[ptrace->size - i - 1] = tmp;
uptr tmp = ptrace->trace_buffer[i];
ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
StackTrace trace;
trace.Init(ptrace->trace, ptrace->size);

View File

@ -10,7 +10,6 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
//#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_stack_trace.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
@ -87,11 +86,6 @@ void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
}
}
void StackTrace::CopyFrom(const StackTrace& other) {
Reset();
Init(other.Begin(), other.Size());
}
bool StackTrace::IsEmpty() const {
return n_ == 0;
}

View File

@ -13,16 +13,11 @@
#ifndef TSAN_STACK_TRACE_H
#define TSAN_STACK_TRACE_H
//#include "sanitizer_common/sanitizer_atomic.h"
//#include "sanitizer_common/sanitizer_common.h"
//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
//#include "tsan_clock.h"
//#include "tsan_mutex.h"
//#include "tsan_dense_alloc.h"
namespace __tsan {
// FIXME: Delete this class in favor of __sanitizer::StackTrace.
class StackTrace {
public:
StackTrace();
@ -38,7 +33,6 @@ class StackTrace {
uptr Size() const;
uptr Get(uptr i) const;
const uptr *Begin() const;
void CopyFrom(const StackTrace& other);
private:
uptr n_;

View File

@ -33,7 +33,7 @@ static void MaybePrintStackTrace(uptr pc, uptr bp) {
// under ASan).
if (StackTrace::WillUseFastUnwind(false))
return;
StackTrace stack;
BufferedStackTrace stack;
stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
stack.Print();
}