[msan] Allocator statistics interface and malloc hooks.

llvm-svn: 187653
This commit is contained in:
Evgeniy Stepanov 2013-08-02 14:26:58 +00:00
parent 0389d883fc
commit 44b77c26e4
7 changed files with 211 additions and 5 deletions

View File

@ -83,6 +83,57 @@ extern "C" {
Memory will be marked uninitialized, with origin at the call site. */
void __msan_allocated_memory(const void* data, size_t size);
/* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
/***********************************/
/* Allocator statistics interface. */
/* Returns the estimated number of bytes that will be reserved by allocator
for request of "size" bytes. If Msan allocator can't allocate that much
memory, returns the maximal possible allocation size, otherwise returns
"size". */
size_t __msan_get_estimated_allocated_size(size_t size);
/* Returns true if p was returned by the Msan allocator and
is not yet freed. */
bool __msan_get_ownership(const void *p);
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
size_t __msan_get_allocated_size(const void *p);
/* Number of bytes, allocated and not yet freed by the application. */
size_t __msan_get_current_allocated_bytes();
/* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
Generally, for request of X bytes, allocator can reserve and add to free
lists a large number of chunks of size X to use them for future requests.
All these chunks count toward the heap size. Currently, allocator never
releases memory to OS (instead, it just puts freed chunks to free
lists). */
size_t __msan_get_heap_size();
/* Number of bytes, mmaped by msan allocator, which can be used to fulfill
allocation requests. When a user program frees memory chunk, it can first
fall into quarantine and will count toward __msan_get_free_bytes()
later. */
size_t __msan_get_free_bytes();
/* Number of bytes in unmapped pages, that are released to OS. Currently,
always returns 0. */
size_t __msan_get_unmapped_bytes();
/* Malloc hooks that may be optionally provided by user.
__msan_malloc_hook(ptr, size) is called immediately after
allocation of "size" bytes, which returned "ptr".
__msan_free_hook(ptr) is called immediately before
deallocation of "ptr". */
void __msan_malloc_hook(void *ptr, size_t size);
void __msan_free_hook(void *ptr);
#else // __has_feature(memory_sanitizer)
#define __msan_get_origin_descr_if_stack(id) ((const char*)0)

View File

@ -0,0 +1,36 @@
// RUN: %clangxx_msan -O2 %s -o %t
// RUN: %t 2>&1 | FileCheck %s
#include <stdlib.h>
#include <unistd.h>
extern "C" {
bool __msan_get_ownership(const void *p);
void *global_ptr;
// Note: avoid calling functions that allocate memory in malloc/free
// to avoid infinite recursion.
void __msan_malloc_hook(void *ptr, size_t sz) {
if (__msan_get_ownership(ptr)) {
write(1, "MallocHook\n", sizeof("MallocHook\n"));
global_ptr = ptr;
}
}
void __msan_free_hook(void *ptr) {
if (__msan_get_ownership(ptr) && ptr == global_ptr)
write(1, "FreeHook\n", sizeof("FreeHook\n"));
}
} // extern "C"
int main() {
volatile int *x = new int;
// CHECK: MallocHook
// Check that malloc hook was called with correct argument.
if (global_ptr != (void*)x) {
_exit(1);
}
*x = 0;
delete x;
// CHECK: FreeHook
return 0;
}

View File

@ -87,4 +87,9 @@ void UnpoisonParam(uptr n);
} // namespace __msan
#define MSAN_MALLOC_HOOK(ptr, size) \
if (&__msan_malloc_hook) __msan_malloc_hook(ptr, size)
#define MSAN_FREE_HOOK(ptr) \
if (&__msan_free_hook) __msan_free_hook(ptr)
#endif // MSAN_H

View File

@ -61,14 +61,17 @@ static void *MsanAllocate(StackTrace *stack, uptr size,
CHECK_EQ((stack_id >> 31), 0); // Higher bit is occupied by stack origins.
__msan_set_origin(res, size, stack_id);
}
MSAN_MALLOC_HOOK(res, size);
return res;
}
void MsanDeallocate(void *p) {
CHECK(p);
Init();
MSAN_FREE_HOOK(p);
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(p));
uptr size = meta->requested_size;
meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
__msan_poison(p, size);
@ -104,4 +107,52 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return new_p;
}
static uptr AllocationSize(const void *p) {
if (p == 0)
return 0;
const void *beg = allocator.GetBlockBegin(p);
if (beg != p)
return 0;
Metadata *b = (Metadata*)allocator.GetMetaData(p);
return b->requested_size;
}
} // namespace __msan
using namespace __msan;
uptr __msan_get_current_allocated_bytes() {
u64 stats[AllocatorStatCount];
allocator.GetStats(stats);
u64 m = stats[AllocatorStatMalloced];
u64 f = stats[AllocatorStatFreed];
return m >= f ? m - f : 1;
}
uptr __msan_get_heap_size() {
u64 stats[AllocatorStatCount];
allocator.GetStats(stats);
u64 m = stats[AllocatorStatMmapped];
u64 f = stats[AllocatorStatUnmapped];
return m >= f ? m - f : 1;
}
uptr __msan_get_free_bytes() {
return 1;
}
uptr __msan_get_unmapped_bytes() {
return 1;
}
uptr __msan_get_estimated_allocated_size(uptr size) {
return size;
}
bool __msan_get_ownership(const void *p) {
return AllocationSize(p) != 0;
}
uptr __msan_get_allocated_size(const void *p) {
return AllocationSize(p);
}

View File

@ -141,6 +141,33 @@ void __sanitizer_unaligned_store32(uu32 *p, u32 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *p, u64 x);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
bool __msan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_free_bytes();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __msan_get_unmapped_bytes();
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
/* OPTIONAL */ void __msan_malloc_hook(void *ptr, uptr size);
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
/* OPTIONAL */ void __msan_free_hook(void *ptr);
} // extern "C"
#endif // MSAN_INTERFACE_INTERNAL_H

View File

@ -2785,3 +2785,39 @@ TEST(MemorySanitizer, CallocOverflow) {
TEST(MemorySanitizerStress, DISABLED_MallocStackTrace) {
RecursiveMalloc(22);
}
TEST(MemorySanitizerAllocator, get_estimated_allocated_size) {
size_t sizes[] = {0, 20, 5000, 1<<20};
for (size_t i = 0; i < sizeof(sizes) / sizeof(*sizes); ++i) {
size_t alloc_size = __msan_get_estimated_allocated_size(sizes[i]);
EXPECT_EQ(alloc_size, sizes[i]);
}
}
TEST(MemorySanitizerAllocator, get_allocated_size_and_ownership) {
char *array = reinterpret_cast<char*>(malloc(100));
int *int_ptr = new int;
EXPECT_TRUE(__msan_get_ownership(array));
EXPECT_EQ(100, __msan_get_allocated_size(array));
EXPECT_TRUE(__msan_get_ownership(int_ptr));
EXPECT_EQ(sizeof(*int_ptr), __msan_get_allocated_size(int_ptr));
void *wild_addr = reinterpret_cast<void*>(0x1);
EXPECT_FALSE(__msan_get_ownership(wild_addr));
EXPECT_EQ(0, __msan_get_allocated_size(wild_addr));
EXPECT_FALSE(__msan_get_ownership(array + 50));
EXPECT_EQ(0, __msan_get_allocated_size(array + 50));
// NULL is a valid argument for GetAllocatedSize but is not owned.
EXPECT_FALSE(__msan_get_ownership(NULL));
EXPECT_EQ(0, __msan_get_allocated_size(NULL));
free(array);
EXPECT_FALSE(__msan_get_ownership(array));
EXPECT_EQ(0, __msan_get_allocated_size(array));
delete int_ptr;
}

View File

@ -377,7 +377,7 @@ class SizeClassAllocator64 {
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
@ -640,7 +640,7 @@ class SizeClassAllocator32 {
alignment <= SizeClassMap::kMaxSize;
}
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
@ -1014,7 +1014,7 @@ class LargeMmapAllocator {
}
// At least page_size_/2 metadata bytes is available.
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
return GetHeader(p) + 1;
@ -1127,7 +1127,7 @@ class LargeMmapAllocator {
CHECK(IsAligned(p, page_size_));
return reinterpret_cast<Header*>(p - page_size_);
}
Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
Header *GetHeader(const void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
void *GetUser(Header *h) {
CHECK(IsAligned((uptr)h, page_size_));
@ -1222,7 +1222,7 @@ class CombinedAllocator {
return primary_.PointerIsMine(p);
}
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetMetaData(p);
return secondary_.GetMetaData(p);