Revert r315533 "Reland "[sanitizer] Introduce ReservedAddressRange to sanitizer_common""

The SanitizerCommon.ReservedAddressRangeUnmap test fails on Windows:

FAIL: SanitizerCommon-Unit :: ./Sanitizer-x86_64-Test.exe/SanitizerCommon.ReservedAddressRangeUnmap (34003 of 35554)
******************** TEST 'SanitizerCommon-Unit :: ./Sanitizer-x86_64-Test.exe/SanitizerCommon.ReservedAddressRangeUnmap' FAILED ********************
Note: Google Test filter = SanitizerCommon.ReservedAddressRangeUnmap
[==========] Running 1 test from 1 test case.
[----------] Global test environment set-up.
[----------] 1 test from SanitizerCommon
[ RUN      ] SanitizerCommon.ReservedAddressRangeUnmap
==3780==ERROR: SanitizerTool failed to deallocate 0x1000 (4096) bytes at address 0x0000000c3000 (error code: 487)
==3780==Sanitizer CHECK failed: E:\b\build\slave\win_upload_clang\build\src\third_party\llvm\projects\compiler-rt\lib\sanitizer_common\sanitizer_win.cc:129 (("unable to unmap" && 0)) != (0) (0, 0)
********************
Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
Testing Time: 299.76s
********************
Failing Tests (1):
    SanitizerCommon-Unit :: ./Sanitizer-x86_64-Test.exe/SanitizerCommon.ReservedAddressRangeUnmap

> In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
> VMAR, which means that MmapNoAccess can only be called once. This works
> for the sanitizer allocator but *not* for the Scudo allocator.
>
> Hence, this changeset introduces a new ReservedAddressRange object to
> serve as the new API for these calls. In this changeset, the object
> still calls into the old Mmap implementations.
>
> The next changeset two changesets will convert the sanitizer and scudo
> allocators to use the new APIs, respectively. (ReservedAddressRange will
> replace the SecondaryHeader in Scudo.)
>
> Finally, a last changeset will update the Fuchsia implementation.
>
> Patch by Julia Hansbrough
>
> Differential Revision: https://reviews.llvm.org/D38437

llvm-svn: 315553
This commit is contained in:
Hans Wennborg 2017-10-12 03:23:31 +00:00
parent be85529d2b
commit eb2218ceae
5 changed files with 0 additions and 180 deletions

View File

@ -128,20 +128,6 @@ void CheckVMASize();
void RunMallocHooks(const void *ptr, uptr size);
void RunFreeHooks(const void *ptr);
class ReservedAddressRange {
public:
uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
uptr Map(uptr fixed_addr, uptr size, bool tolerate_enomem = false);
void Unmap(uptr addr, uptr size);
void *base() const { return base_; }
uptr size() const { return size_; }
private:
void* base_;
uptr size_;
const char* name_;
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
/*out*/uptr *stats, uptr stats_size);

View File

@ -236,37 +236,6 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
return DoAnonymousMmapOrDie(size, mem_type, false, false);
}
uptr ReservedAddressRange::Init(uptr init_size, const char* name = nullptr,
uptr fixed_addr = uptr(0)) {
base_ = MmapNoAccess(init_size);
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
bool tolerate_enomem = true) {
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size,
tolerate_enomem));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
// Instead of doing exactly what they say, we make MmapNoAccess actually
// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie

View File

@ -337,42 +337,6 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return (void *)p;
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
if (fixed_addr) {
base_ = MmapFixedNoAccess(fixed_addr, size, name);
} else {
base_ = MmapNoAccess(size);
}
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
bool tolerate_enomem) {
if (tolerate_enomem) {
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
}
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;

View File

@ -235,31 +235,6 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return p;
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
bool tolerate_enomem) {
if (tolerate_enomem) {
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
}
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
@ -277,18 +252,6 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type);
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
if (fixed_addr) {
base_ = MmapFixedNoAccess(fixed_addr, size, name);
} else {
base_ = MmapNoAccess(size);
}
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
(void)name; // unsupported
void *res = VirtualAlloc((LPVOID)fixed_addr, size,

View File

@ -320,66 +320,4 @@ TEST(SanitizerCommon, GetRandom) {
}
#endif
TEST(SanitizerCommon, ReservedAddressRangeInit) {
uptr init_size = 0xffff;
ReservedAddressRange address_range;
uptr res = address_range.Init(init_size);
CHECK_NE(res, (void*)-1);
UnmapOrDie((void*)res, init_size);
// Should be able to map into the same space now.
ReservedAddressRange address_range2;
uptr res2 = address_range2.Init(init_size, nullptr, res);
CHECK_EQ(res, res2);
// TODO(flowerhack): Once this is switched to the "real" implementation
// (rather than passing through to MmapNoAccess*), enforce and test "no
// double initializations allowed"
}
TEST(SanitizerCommon, ReservedAddressRangeMap) {
constexpr uptr init_size = 0xffff;
ReservedAddressRange address_range;
uptr res = address_range.Init(init_size);
CHECK_NE(res, (void*) -1);
// Valid mappings should succeed.
CHECK_EQ(res, address_range.Map(res, init_size));
// Valid mappings should be readable.
unsigned char buffer[init_size];
memcpy(buffer, &res, sizeof(buffer));
// Invalid mappings should fail.
EXPECT_DEATH(address_range.Map(res, 0), ".*");
// TODO(flowerhack): Once this is switched to the "real" implementation, make
// sure you can only mmap into offsets in the Init range.
}
TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
uptr PageSize = GetPageSizeCached();
uptr init_size = PageSize * 4;
ReservedAddressRange address_range;
uptr base_addr = address_range.Init(init_size);
CHECK_NE(base_addr, (void*)-1);
CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
// Unmapping at the beginning should succeed.
address_range.Unmap(base_addr, PageSize);
CHECK_EQ(base_addr + PageSize, address_range.base());
CHECK_EQ(init_size - PageSize, address_range.size());
// Unmapping at the end should succeed.
uptr old_size = address_range.size();
void* old_base = address_range.base();
uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
address_range.size() - PageSize;
address_range.Unmap(new_start, PageSize);
CHECK_EQ(old_size - PageSize, address_range.size());
CHECK_EQ(old_base, address_range.base());
// Unmapping in the middle of the ReservedAddressRange should fail.
EXPECT_DEATH(address_range.Unmap(base_addr + 0xf, 0xff), ".*");
}
} // namespace __sanitizer