Removed platform-specific ifdefs from sanitizer_procmaps.h

Summary: Removed platform-specific ifdefs for linux, mac, freebsd and netbsd from sanitizer_procmaps.h

Patch by Yicheng Wang <yichengfb@fb.com>

Reviewers: kcc, kubamracek, alekseyshl, fjricci, vitalybuka

Reviewed By: fjricci, vitalybuka

Subscribers: vitalybuka, emaste, krytarowski, llvm-commits

Differential Revision: https://reviews.llvm.org/D38098

llvm-svn: 313999
This commit is contained in:
Francis Ricci 2017-09-22 17:48:24 +00:00
parent a1e7ecc734
commit fbccb0a8d2
7 changed files with 132 additions and 132 deletions

View File

@ -28,6 +28,19 @@ namespace __sanitizer {
// the one in <dirent.h>, which is used by readdir().
struct linux_dirent;
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
struct MemoryMappingLayoutData {
ProcSelfMapsBuff proc_self_maps;
const char *current;
};
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);

View File

@ -20,6 +20,17 @@
namespace __sanitizer {
struct MemoryMappingLayoutData {
int current_image;
u32 current_magic;
u32 current_filetype;
ModuleArch current_arch;
u8 current_uuid[kModuleUUIDSize];
int current_load_cmd_count;
char *current_load_cmd_addr;
bool current_instrumented;
};
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,

View File

@ -16,20 +16,12 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
#include "sanitizer_mac.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
// Reads process memory map in an OS-specific way.
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
// Memory protection masks.
static const uptr kProtectionRead = 1;
@ -87,25 +79,7 @@ class MemoryMappingLayout {
// FIXME: Hide implementation details for different platforms in
// platform-specific files.
# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
ProcSelfMapsBuff proc_self_maps_;
const char *current_;
// Static mappings cache.
static ProcSelfMapsBuff cached_proc_self_maps_;
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
# elif SANITIZER_MAC
template <u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(MemoryMappedSegment *segment);
int current_image_;
u32 current_magic_;
u32 current_filetype_;
ModuleArch current_arch_;
u8 current_uuid_[kModuleUUIDSize];
int current_load_cmd_count_;
char *current_load_cmd_addr_;
bool current_instrumented_;
# endif
MemoryMappingLayoutData data_;
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,

View File

@ -20,9 +20,8 @@
namespace __sanitizer {
// Linker initialized.
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
static ProcSelfMapsBuff cached_proc_self_maps;
static StaticSpinMutex cache_lock;
static int TranslateDigit(char c) {
if (c >= '0' && c <= '9')
@ -71,14 +70,14 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
}
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
ReadProcMaps(&proc_self_maps_);
ReadProcMaps(&data_.proc_self_maps);
if (cache_enabled) {
if (proc_self_maps_.mmaped_size == 0) {
if (data_.proc_self_maps.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
CHECK_GT(data_.proc_self_maps.len, 0);
}
} else {
CHECK_GT(proc_self_maps_.mmaped_size, 0);
CHECK_GT(data_.proc_self_maps.mmaped_size, 0);
}
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
@ -89,24 +88,22 @@ MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
MemoryMappingLayout::~MemoryMappingLayout() {
// Only unmap the buffer if it is different from the cached one. Otherwise
// it will be unmapped when the cache is refreshed.
if (proc_self_maps_.data != cached_proc_self_maps_.data) {
UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
if (data_.proc_self_maps.data != cached_proc_self_maps.data) {
UnmapOrDie(data_.proc_self_maps.data, data_.proc_self_maps.mmaped_size);
}
}
void MemoryMappingLayout::Reset() {
current_ = proc_self_maps_.data;
}
void MemoryMappingLayout::Reset() { data_.current = data_.proc_self_maps.data; }
// static
void MemoryMappingLayout::CacheMemoryMappings() {
SpinMutexLock l(&cache_lock_);
SpinMutexLock l(&cache_lock);
// Don't invalidate the cache if the mappings are unavailable.
ProcSelfMapsBuff old_proc_self_maps;
old_proc_self_maps = cached_proc_self_maps_;
ReadProcMaps(&cached_proc_self_maps_);
if (cached_proc_self_maps_.mmaped_size == 0) {
cached_proc_self_maps_ = old_proc_self_maps;
old_proc_self_maps = cached_proc_self_maps;
ReadProcMaps(&cached_proc_self_maps);
if (cached_proc_self_maps.mmaped_size == 0) {
cached_proc_self_maps = old_proc_self_maps;
} else {
if (old_proc_self_maps.mmaped_size) {
UnmapOrDie(old_proc_self_maps.data,
@ -116,9 +113,9 @@ void MemoryMappingLayout::CacheMemoryMappings() {
}
void MemoryMappingLayout::LoadFromCache() {
SpinMutexLock l(&cache_lock_);
if (cached_proc_self_maps_.data) {
proc_self_maps_ = cached_proc_self_maps_;
SpinMutexLock l(&cache_lock);
if (cached_proc_self_maps.data) {
data_.proc_self_maps = cached_proc_self_maps;
}
}

View File

@ -67,9 +67,9 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
if (data_.current >= last) return false;
struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry *)data_.current;
segment->start = (uptr)VmEntry->kve_start;
segment->end = (uptr)VmEntry->kve_end;
@ -90,9 +90,9 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
#if SANITIZER_FREEBSD
current_ += VmEntry->kve_structsize;
data_.current += VmEntry->kve_structsize;
#else
current_ += sizeof(*VmEntry);
data_.current += sizeof(*VmEntry);
#endif
return true;

View File

@ -27,48 +27,48 @@ static bool IsOneOf(char c, char c1, char c2) {
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
if (data_.current >= last) return false;
char *next_line =
(char *)internal_memchr(data_.current, '\n', last - data_.current);
if (next_line == 0)
next_line = last;
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
segment->start = ParseHex(&current_);
CHECK_EQ(*current_++, '-');
segment->end = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
CHECK(IsOneOf(*current_, '-', 'r'));
segment->start = ParseHex(&data_.current);
CHECK_EQ(*data_.current++, '-');
segment->end = ParseHex(&data_.current);
CHECK_EQ(*data_.current++, ' ');
CHECK(IsOneOf(*data_.current, '-', 'r'));
segment->protection = 0;
if (*current_++ == 'r') segment->protection |= kProtectionRead;
CHECK(IsOneOf(*current_, '-', 'w'));
if (*current_++ == 'w') segment->protection |= kProtectionWrite;
CHECK(IsOneOf(*current_, '-', 'x'));
if (*current_++ == 'x') segment->protection |= kProtectionExecute;
CHECK(IsOneOf(*current_, 's', 'p'));
if (*current_++ == 's') segment->protection |= kProtectionShared;
CHECK_EQ(*current_++, ' ');
segment->offset = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
ParseHex(&current_);
CHECK_EQ(*current_++, ':');
ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
while (IsDecimal(*current_))
current_++;
if (*data_.current++ == 'r') segment->protection |= kProtectionRead;
CHECK(IsOneOf(*data_.current, '-', 'w'));
if (*data_.current++ == 'w') segment->protection |= kProtectionWrite;
CHECK(IsOneOf(*data_.current, '-', 'x'));
if (*data_.current++ == 'x') segment->protection |= kProtectionExecute;
CHECK(IsOneOf(*data_.current, 's', 'p'));
if (*data_.current++ == 's') segment->protection |= kProtectionShared;
CHECK_EQ(*data_.current++, ' ');
segment->offset = ParseHex(&data_.current);
CHECK_EQ(*data_.current++, ' ');
ParseHex(&data_.current);
CHECK_EQ(*data_.current++, ':');
ParseHex(&data_.current);
CHECK_EQ(*data_.current++, ' ');
while (IsDecimal(*data_.current)) data_.current++;
// Qemu may lack the trailing space.
// https://github.com/google/sanitizers/issues/160
// CHECK_EQ(*current_++, ' ');
// CHECK_EQ(*data_.current++, ' ');
// Skip spaces.
while (current_ < next_line && *current_ == ' ')
current_++;
while (data_.current < next_line && *data_.current == ' ') data_.current++;
// Fill in the filename.
if (segment->filename) {
uptr len = Min((uptr)(next_line - current_), segment->filename_size - 1);
internal_strncpy(segment->filename, current_, len);
uptr len =
Min((uptr)(next_line - data_.current), segment->filename_size - 1);
internal_strncpy(segment->filename, data_.current, len);
segment->filename[len] = 0;
}
current_ = next_line + 1;
data_.current = next_line + 1;
return true;
}

View File

@ -108,13 +108,13 @@ void MemoryMappingLayout::Reset() {
// _dyld_image_count is thread-unsafe. We need to register callbacks for
// adding and removing images which will invalidate the MemoryMappingLayout
// state.
current_image_ = _dyld_image_count();
current_load_cmd_count_ = -1;
current_load_cmd_addr_ = 0;
current_magic_ = 0;
current_filetype_ = 0;
current_arch_ = kModuleArchUnknown;
internal_memset(current_uuid_, 0, kModuleUUIDSize);
data_.current_image = _dyld_image_count();
data_.current_load_cmd_count = -1;
data_.current_load_cmd_addr = 0;
data_.current_magic = 0;
data_.current_filetype = 0;
data_.current_arch = kModuleArchUnknown;
internal_memset(data_.current_uuid, 0, kModuleUUIDSize);
}
// The dyld load address should be unchanged throughout process execution,
@ -183,14 +183,14 @@ const mach_header *get_dyld_hdr() {
// segment.
// Note that the segment addresses are not necessarily sorted.
template <u32 kLCSegment, typename SegmentCommand>
bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
const char *lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
static bool NextSegmentLoad(MemoryMappedSegment *segment,
MemoryMappedSegmentData *seg_data, MemoryMappingLayoutData &layout_data) {
const char *lc = layout_data.current_load_cmd_addr;
layout_data.current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
if (current_image_ == kDyldImageIdx) {
if (layout_data.current_image == kDyldImageIdx) {
base_virt_addr = (uptr)get_dyld_hdr();
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
// it contains an absolute address rather than an offset for dyld.
@ -200,37 +200,40 @@ bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
// and the mask will give just this offset.
addr_mask = 0xfffff;
} else {
base_virt_addr = (uptr)_dyld_get_image_vmaddr_slide(current_image_);
base_virt_addr =
(uptr)_dyld_get_image_vmaddr_slide(layout_data.current_image);
addr_mask = ~0;
}
segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
segment->end = segment->start + sc->vmsize;
// Most callers don't need section information, so only fill this struct
// when required.
if (segment->data_) {
segment->data_->nsects = sc->nsects;
segment->data_->current_load_cmd_addr =
if (seg_data) {
seg_data->nsects = sc->nsects;
seg_data->current_load_cmd_addr =
(char *)lc + sizeof(SegmentCommand);
segment->data_->lc_type = kLCSegment;
segment->data_->base_virt_addr = base_virt_addr;
segment->data_->addr_mask = addr_mask;
internal_strncpy(segment->data_->name, sc->segname,
ARRAY_SIZE(segment->data_->name));
seg_data->lc_type = kLCSegment;
seg_data->base_virt_addr = base_virt_addr;
seg_data->addr_mask = addr_mask;
internal_strncpy(seg_data->name, sc->segname,
ARRAY_SIZE(seg_data->name));
}
// Return the initial protection.
segment->protection = sc->initprot;
segment->offset =
(current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff;
segment->offset = (layout_data.current_filetype ==
/*MH_EXECUTE*/ 0x2)
? sc->vmaddr
: sc->fileoff;
if (segment->filename) {
const char *src = (current_image_ == kDyldImageIdx)
const char *src = (layout_data.current_image == kDyldImageIdx)
? kDyldPath
: _dyld_get_image_name(current_image_);
: _dyld_get_image_name(layout_data.current_image);
internal_strncpy(segment->filename, src, segment->filename_size);
}
segment->arch = current_arch_;
internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize);
segment->arch = layout_data.current_arch;
internal_memcpy(segment->uuid, layout_data.current_uuid, kModuleUUIDSize);
return true;
}
return false;
@ -292,50 +295,52 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; current_image_ >= kDyldImageIdx; current_image_--) {
const mach_header *hdr = (current_image_ == kDyldImageIdx)
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
const mach_header *hdr = (data_.current_image == kDyldImageIdx)
? get_dyld_hdr()
: _dyld_get_image_header(current_image_);
: _dyld_get_image_header(data_.current_image);
if (!hdr) continue;
if (current_load_cmd_count_ < 0) {
if (data_.current_load_cmd_count < 0) {
// Set up for this image;
current_load_cmd_count_ = hdr->ncmds;
current_magic_ = hdr->magic;
current_filetype_ = hdr->filetype;
current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
switch (current_magic_) {
data_.current_load_cmd_count = hdr->ncmds;
data_.current_magic = hdr->magic;
data_.current_filetype = hdr->filetype;
data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
switch (data_.current_magic) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
data_.current_load_cmd_addr = (char *)hdr + sizeof(mach_header_64);
break;
}
#endif
case MH_MAGIC: {
current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
data_.current_load_cmd_addr = (char *)hdr + sizeof(mach_header);
break;
}
default: {
continue;
}
}
FindUUID((const load_command *)current_load_cmd_addr_, &current_uuid_[0]);
current_instrumented_ =
IsModuleInstrumented((const load_command *)current_load_cmd_addr_);
FindUUID((const load_command *)data_.current_load_cmd_addr,
data_.current_uuid);
data_.current_instrumented = IsModuleInstrumented(
(const load_command *)data_.current_load_cmd_addr);
}
for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
switch (current_magic_) {
// current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
switch (data_.current_magic) {
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
segment))
segment, segment->data_, data_))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(segment))
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
segment, segment->data_, data_))
return true;
break;
}
@ -364,7 +369,7 @@ void MemoryMappingLayout::DumpListOfModules(
modules->push_back(LoadedModule());
cur_module = &modules->back();
cur_module->set(segment.filename, segment.start, segment.arch,
segment.uuid, current_instrumented_);
segment.uuid, data_.current_instrumented);
}
segment.AddAddressRanges(cur_module);
}