[sanitizer] Use mmap to zero-fill large shadow regions.

This is covered by existing ASan test.
This does not change anything for TSan by default (but provides a flag to 
change the threshold size).
Based on a patch by florent.bruneau here:
  https://code.google.com/p/address-sanitizer/issues/detail?id=256

llvm-svn: 201400
This commit is contained in:
Evgeniy Stepanov 2014-02-14 11:41:26 +00:00
parent 81a2b466e9
commit 05938a23f5
6 changed files with 86 additions and 9 deletions

View File

@ -15,6 +15,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@ -34,10 +35,29 @@ void PoisonShadowPartialRightRedzone(uptr addr,
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
if (value ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
MmapFixedNoReserve(page_beg, page_end - page_beg);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(

View File

@ -1279,28 +1279,53 @@ void *fast_memcpy(void *dst, const void *src, SIZE_T n) {
return internal_memcpy(dst, src, n);
}
static void PoisonShadow(uptr ptr, uptr size, u8 value) {
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(ptr);
uptr shadow_end = MEM_TO_SHADOW(ptr + size);
if (value ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
fast_memset((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
fast_memset((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
fast_memset((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
fast_memset((void *)page_end, 0, shadow_end - page_end);
}
MmapFixedNoReserve(page_beg, page_end - page_beg);
}
}
}
// These interface functions reside here so that they can use
// fast_memset, etc.
void __msan_unpoison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
fast_memset((void*)MEM_TO_SHADOW((uptr)a), 0, size);
PoisonShadow((uptr)a, size, 0);
}
void __msan_poison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
fast_memset((void*)MEM_TO_SHADOW((uptr)a),
__msan::flags()->poison_heap_with_zeroes ? 0 : -1, size);
PoisonShadow((uptr)a, size,
__msan::flags()->poison_heap_with_zeroes ? 0 : -1);
}
void __msan_poison_stack(void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
fast_memset((void*)MEM_TO_SHADOW((uptr)a),
__msan::flags()->poison_stack_with_zeroes ? 0 : -1, size);
PoisonShadow((uptr)a, size,
__msan::flags()->poison_stack_with_zeroes ? 0 : -1);
}
void __msan_clear_and_unpoison(void *a, uptr size) {
fast_memset(a, 0, size);
fast_memset((void*)MEM_TO_SHADOW((uptr)a), 0, size);
PoisonShadow((uptr)a, size, 0);
}
u32 get_origin_if_poisoned(uptr a, uptr size) {

View File

@ -326,10 +326,27 @@ TEST(MemorySanitizer, Realloc) {
TEST(MemorySanitizer, Calloc) {
S4 *x = (int*)Ident(calloc(1, sizeof(S4)));
EXPECT_NOT_POISONED(*x); // Should not be poisoned.
// EXPECT_EQ(0, *x);
EXPECT_EQ(0, *x);
free(x);
}
TEST(MemorySanitizer, CallocReturnsZeroMem) {
size_t sizes[] = {16, 1000, 10000, 100000, 2100000};
for (size_t s = 0; s < sizeof(sizes)/sizeof(sizes[0]); s++) {
size_t size = sizes[s];
for (size_t iter = 0; iter < 5; iter++) {
char *x = Ident((char*)calloc(1, size));
EXPECT_EQ(x[0], 0);
EXPECT_EQ(x[size - 1], 0);
EXPECT_EQ(x[size / 2], 0);
EXPECT_EQ(x[size / 3], 0);
EXPECT_EQ(x[size / 4], 0);
memset(x, 0x42, size);
free(Ident(x));
}
}
}
TEST(MemorySanitizer, AndOr) {
U4 *p = GetPoisoned<U4>();
// We poison two bytes in the midle of a 4-byte word to make the test

View File

@ -40,6 +40,7 @@ void SetCommonFlagsDefaults(CommonFlags *f) {
f->handle_segv = SANITIZER_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = false;
f->clear_shadow_mmap_threshold = 64 * 1024;
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
@ -61,6 +62,8 @@ void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
ParseFlag(str, &f->handle_segv, "handle_segv");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
ParseFlag(str, &f->clear_shadow_mmap_threshold,
"clear_shadow_mmap_threshold");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
@ -138,6 +141,14 @@ void ParseFlag(const char *env, int *flag, const char *name) {
*flag = static_cast<int>(internal_atoll(value));
}
void ParseFlag(const char *env, uptr *flag, const char *name) {
const char *value;
int value_length;
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<uptr>(internal_atoll(value));
}
static LowLevelAllocator allocator_for_flags;
void ParseFlag(const char *env, const char **flag, const char *name) {

View File

@ -20,6 +20,7 @@ namespace __sanitizer {
void ParseFlag(const char *env, bool *flag, const char *name);
void ParseFlag(const char *env, int *flag, const char *name);
void ParseFlag(const char *env, uptr *flag, const char *name);
void ParseFlag(const char *env, const char **flag, const char *name);
struct CommonFlags {
@ -70,6 +71,9 @@ struct CommonFlags {
bool allow_user_segv_handler;
// If set, uses alternate stack for signal handling.
bool use_sigaltstack;
// Large shadow regions are zero-filled using mmap(NORESERVE) instead of
// memset. This is the threshold size in bytes.
uptr clear_shadow_mmap_threshold;
};
inline CommonFlags *common_flags() {

View File

@ -618,7 +618,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
if (kGoMode || size < 64*1024) {
if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));