[sanitizer] Add a fast version of StackDepotGet() for use in LSan.

Add a class that holds a snapshot of the StackDepot optimized for querying by
ID. This allows us to speed up LSan dramatically.

llvm-svn: 189217
This commit is contained in:
Sergey Matveev 2013-08-26 13:24:43 +00:00
parent ef7db73e11
commit 9e3e80208f
6 changed files with 129 additions and 5 deletions

View File

@ -90,26 +90,34 @@ void ProcessGlobalRegions(Frontier *frontier) {
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
static uptr GetCallerPC(u32 stack_id) {
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
uptr size = 0;
const uptr *trace = StackDepotGet(stack_id, &size);
const uptr *trace = map->Get(stack_id, &size);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
if (size >= 2)
return trace[1];
return 0;
}
struct ProcessPlatformAllocParam {
Frontier *frontier;
StackDepotReverseMap *stack_depot_reverse_map;
};
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
// reachable. Marks them as reachable and adds them to the frontier.
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
CHECK(arg);
ProcessPlatformAllocParam *param =
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) {
if (linker->containsAddress(
GetCallerPC(m.stack_trace_id(), param->stack_depot_reverse_map))) {
m.set_tag(kReachable);
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
param->frontier->push_back(chunk);
}
}
}
@ -119,7 +127,9 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier);
StackDepotReverseMap stack_depot_reverse_map;
ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
} // namespace __lsan

View File

@ -380,6 +380,22 @@ void InternalSort(Container *v, uptr size, Compare comp) {
}
}
template<class Container, class Value, class Compare>
uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
const Value &val, Compare comp) {
uptr not_found = last + 1;
while (last >= first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
else if (comp(val, v[mid]))
last = mid - 1;
else
return mid;
}
return not_found;
}
} // namespace __sanitizer
#endif // SANITIZER_COMMON_H

View File

@ -201,4 +201,38 @@ const uptr *StackDepotGet(u32 id, uptr *size) {
return 0;
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {
return a.id < b.id;
}
StackDepotReverseMap::StackDepotReverseMap()
: map_(StackDepotGetStats()->n_uniq_ids + 100) {
for (int idx = 0; idx < kTabSize; idx++) {
atomic_uintptr_t *p = &depot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
StackDesc *s = (StackDesc*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
}
}
InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
}
const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
if (!map_.size()) return 0;
IdDescPair pair = {id, 0};
uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
IdDescPair::IdComparator);
if (idx > map_.size()) {
*size = 0;
return 0;
}
StackDesc *desc = map_[idx].desc;
*size = desc->size;
return desc->stack;
}
} // namespace __sanitizer

View File

@ -13,6 +13,7 @@
#ifndef SANITIZER_STACKDEPOT_H
#define SANITIZER_STACKDEPOT_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
@ -31,6 +32,31 @@ struct StackDepotStats {
StackDepotStats *StackDepotGetStats();
struct StackDesc;
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
// StackDepot, but the snapshot is only guaranteed to contain those stack traces
// which were stored before it was instantiated.
class StackDepotReverseMap {
public:
StackDepotReverseMap();
const uptr *Get(u32 id, uptr *size);
private:
struct IdDescPair {
u32 id;
StackDesc *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
InternalMmapVector<IdDescPair> map_;
// Disallow evil constructors.
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H

View File

@ -158,4 +158,19 @@ TEST(SanitizerCommon, ThreadStackTlsWorker) {
pthread_join(t, 0);
}
bool UptrLess(uptr a, uptr b) {
return a < b;
}
TEST(SanitizerCommon, InternalBinarySearch) {
const uptr sz = 5;
uptr arr[sz];
for (uptr i = 0; i < sz; i++) arr[i] = i * i;
for (uptr i = 0; i < sz; i++)
ASSERT_EQ(InternalBinarySearch(arr, 0, sz, i * i, UptrLess), i);
ASSERT_EQ(InternalBinarySearch(arr, 0, sz, 7, UptrLess), sz + 1);
}
} // namespace __sanitizer

View File

@ -66,4 +66,27 @@ TEST(SanitizerCommon, StackDepotSeveral) {
EXPECT_NE(i1, i2);
}
TEST(SanitizerCommon, StackDepotReverseMap) {
uptr s1[] = {1, 2, 3, 4, 5};
uptr s2[] = {7, 1, 3, 0};
uptr s3[] = {10, 2, 5, 3};
uptr s4[] = {1, 3, 2, 5};
u32 ids[4] = {0};
ids[0] = StackDepotPut(s1, ARRAY_SIZE(s1));
ids[1] = StackDepotPut(s2, ARRAY_SIZE(s2));
ids[2] = StackDepotPut(s3, ARRAY_SIZE(s3));
ids[3] = StackDepotPut(s4, ARRAY_SIZE(s4));
StackDepotReverseMap map;
for (uptr i = 0; i < 4; i++) {
uptr sz_depot, sz_map;
const uptr *sp_depot, *sp_map;
sp_depot = StackDepotGet(ids[i], &sz_depot);
sp_map = map.Get(ids[i], &sz_map);
EXPECT_EQ(sz_depot, sz_map);
EXPECT_EQ(sp_depot, sp_map);
}
}
} // namespace __sanitizer