Implement standalone lsan interceptors for OS X

Summary:
Mimicks the existing tsan and asan implementations of
Darwin interception.

Reviewers: kubamracek, kcc, glider

Subscribers: llvm-commits, mgorny

Differential Revision: https://reviews.llvm.org/D31889

llvm-svn: 299979
This commit is contained in:
Francis Ricci 2017-04-11 20:05:02 +00:00
parent 84f17f32ad
commit 03b2a8e47e
8 changed files with 131 additions and 32 deletions

View File

@ -13,6 +13,7 @@ set(LSAN_SOURCES
lsan_allocator.cc
lsan_linux.cc
lsan_interceptors.cc
lsan_malloc_mac.cc
lsan_preinit.cc
lsan_thread.cc)

View File

@ -76,6 +76,7 @@ extern "C" void __lsan_init() {
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();

View File

@ -41,6 +41,13 @@
namespace __lsan {
void InitializeInterceptors();
void ReplaceSystemMalloc();
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
__lsan_init(); \
} while (0)
} // namespace __lsan

View File

@ -117,6 +117,37 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
void *lsan_malloc(uptr size, const StackTrace &stack) {
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
void lsan_free(void *p) {
Deallocate(p);
}
void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
return Reallocate(stack, p, size, 1);
}
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
size *= nmemb;
return Allocate(stack, size, 1, true);
}
void *lsan_valloc(uptr size, const StackTrace &stack) {
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
uptr lsan_mz_size(const void *p) {
return GetMallocUsableSize(p);
}
///// Interface to the common LSan module. /////
void LockAllocator() {

View File

@ -36,6 +36,8 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
const bool kAlwaysClearMemory = true;
struct ChunkMetadata {
u8 allocated : 8; // Must be first.
ChunkTag tag : 2;
@ -72,6 +74,15 @@ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
AllocatorCache *GetAllocatorCache();
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_malloc(uptr size, const StackTrace &stack);
void lsan_free(void *p);
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
void *lsan_valloc(uptr size, const StackTrace &stack);
uptr lsan_mz_size(const void *p);
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H

View File

@ -39,29 +39,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
__lsan_init(); \
} while (0)
///// Malloc/free interceptors. /////
const bool kAlwaysClearMemory = true;
namespace std {
struct nothrow_t;
}
#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, 1, kAlwaysClearMemory);
return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
Deallocate(p);
lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
@ -79,28 +72,42 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
size *= nmemb;
return Allocate(stack, size, 1, true);
return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return Reallocate(stack, q, size, 1);
return lsan_realloc(q, size, stack);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
*memptr = lsan_memalign(alignment, size, stack);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
}
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_valloc(size, stack);
}
#endif
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
@ -114,29 +121,13 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
#else
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
#endif
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
}
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;

View File

@ -26,6 +26,8 @@ void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
static THREADLOCAL AllocatorCache allocator_cache;
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
void ReplaceSystemMalloc() {}
} // namespace __lsan
#endif // SANITIZER_LINUX

View File

@ -0,0 +1,55 @@
//===-- lsan_malloc_mac.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer (LSan), a memory leak detector.
//
// Mac-specific malloc interception.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
using namespace __lsan;
#define COMMON_MALLOC_ZONE_NAME "lsan"
#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
#define COMMON_MALLOC_FORCE_LOCK()
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_memalign(alignment, size, stack)
#define COMMON_MALLOC_MALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_malloc(size, stack)
#define COMMON_MALLOC_REALLOC(ptr, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_realloc(ptr, size, stack)
#define COMMON_MALLOC_CALLOC(count, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_calloc(count, size, stack)
#define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_valloc(size, stack)
#define COMMON_MALLOC_FREE(ptr) \
lsan_free(ptr)
#define COMMON_MALLOC_SIZE(ptr) \
uptr size = lsan_mz_size(ptr)
#define COMMON_MALLOC_FILL_STATS(zone, stats)
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
(void)zone_name; \
Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
#define COMMON_MALLOC_NAMESPACE __lsan
#include "sanitizer_common/sanitizer_malloc_mac.inc"
#endif // SANITIZER_MAC