Inline function to get mac segment address range

Summary:
This function is only called once and is fairly simple. Inline to
keep API simple.

Reviewers: alekseyshl, kubamracek

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D35270

llvm-svn: 307695
This commit is contained in:
Francis Ricci 2017-07-11 19:40:53 +00:00
parent 67b0e53dc1
commit edd53cb652
2 changed files with 58 additions and 63 deletions

View File

@ -86,8 +86,6 @@ class MemoryMappingLayout {
# elif SANITIZER_MAC
template <u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(MemoryMappedSegment *segment);
void GetSegmentAddrRange(MemoryMappedSegment *segment, uptr vmaddr,
uptr vmsize);
int current_image_;
u32 current_magic_;
u32 current_filetype_;

View File

@ -88,6 +88,48 @@ void MemoryMappingLayout::LoadFromCache() {
// No-op on Mac for now.
}
// _dyld_get_image_header() and related APIs don't report dyld itself.
// We work around this by manually recursing through the memory map
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
// so it will be hit after only a few iterations.
static mach_header *get_dyld_image_header() {
mach_port_name_t port;
if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
KERN_SUCCESS) {
return nullptr;
}
unsigned depth = 1;
vm_size_t size = 0;
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
struct vm_region_submap_info_64 info;
err = vm_region_recurse_64(port, &address, &size, &depth,
(vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
hdr->filetype == MH_DYLINKER) {
return hdr;
}
}
address += size;
}
}
const mach_header *get_dyld_hdr() {
if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
return dyld_hdr;
}
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Google Perftools, https://github.com/gperftools/gperftools.
@ -101,7 +143,22 @@ bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
GetSegmentAddrRange(segment, sc->vmaddr, sc->vmsize);
if (current_image_ == kDyldImageIdx) {
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
// it contains an absolute address rather than an offset for dyld.
// To make matters even more complicated, this absolute address
// isn't actually the absolute segment address, but the offset portion
// of the address is accurate when combined with the dyld base address,
// and the mask will give just this offset.
segment->start = (sc->vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
segment->end = (sc->vmaddr & 0xfffff) + sc->vmsize + (uptr)get_dyld_hdr();
} else {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
segment->start = sc->vmaddr + dlloff;
segment->end = sc->vmaddr + sc->vmsize + dlloff;
}
// Return the initial protection.
segment->protection = sc->initprot;
segment->offset =
@ -174,66 +231,6 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
return false;
}
// _dyld_get_image_header() and related APIs don't report dyld itself.
// We work around this by manually recursing through the memory map
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
// so it will be hit after only a few iterations.
static mach_header *get_dyld_image_header() {
mach_port_name_t port;
if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
KERN_SUCCESS) {
return nullptr;
}
unsigned depth = 1;
vm_size_t size = 0;
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
struct vm_region_submap_info_64 info;
err = vm_region_recurse_64(port, &address, &size, &depth,
(vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
hdr->filetype == MH_DYLINKER) {
return hdr;
}
}
address += size;
}
}
const mach_header *get_dyld_hdr() {
if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
return dyld_hdr;
}
void MemoryMappingLayout::GetSegmentAddrRange(MemoryMappedSegment *segment,
uptr vmaddr, uptr vmsize) {
if (current_image_ == kDyldImageIdx) {
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
// it contains an absolute address rather than an offset for dyld.
// To make matters even more complicated, this absolute address
// isn't actually the absolute segment address, but the offset portion
// of the address is accurate when combined with the dyld base address,
// and the mask will give just this offset.
segment->start = (vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
segment->end = (vmaddr & 0xfffff) + vmsize + (uptr)get_dyld_hdr();
} else {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
segment->start = vmaddr + dlloff;
segment->end = vmaddr + vmsize + dlloff;
}
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; current_image_ >= kDyldImageIdx; current_image_--) {
const mach_header *hdr = (current_image_ == kDyldImageIdx)