[HWASan] Port HWASan to Linux x86-64 (compiler-rt)

Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.

The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one

Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.

Also added the CHECK() failure handler with more detailed message and
stack reporting.

Reviewers: eugenis

Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits

Differential Revision: https://reviews.llvm.org/D44705

llvm-svn: 328385
This commit is contained in:
Alex Shlyapnikov 2018-03-23 23:38:04 +00:00
parent 5317f2e4c9
commit 5cd35ed4fe
6 changed files with 96 additions and 43 deletions

View File

@ -204,7 +204,7 @@ else()
set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64})
endif()
set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64})
set(ALL_HWASAN_SUPPORTED_ARCH ${ARM64})
set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64})
set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64}
${MIPS32} ${MIPS64} ${S390X})
set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64})

View File

@ -143,6 +143,14 @@ void PrintWarning(uptr pc, uptr bp) {
ReportInvalidAccess(&stack, 0);
}
static void HWAsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
line, cond, (uptr)v1, (uptr)v2);
PRINT_CURRENT_STACK_CHECK();
Die();
}
} // namespace __hwasan
// Interface.
@ -160,6 +168,9 @@ void __hwasan_init() {
CacheBinaryName();
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(HWAsanCheckFailed);
__sanitizer_set_report_path(common_flags()->log_path);
InitializeInterceptors();
@ -240,11 +251,23 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
template<unsigned X>
__attribute__((always_inline))
static void SigTrap() {
static void SigTrap(uptr p) {
#if defined(__aarch64__)
asm("brk %0\n\t" ::"n"(X));
#elif defined(__x86_64__) || defined(__i386__)
asm("ud2\n\t");
(void)p;
// 0x900 is added to do not interfere with the kernel use of lower values of
// brk immediate.
// FIXME: Add a constraint to put the pointer into x0, the same as x86 branch.
asm("brk %0\n\t" ::"n"(0x900 + X));
#elif defined(__x86_64__)
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
// total. The pointer is passed via rdi.
// 0x40 is added as a safeguard, to help distinguish our trap from others and
// to avoid 0 offsets in the command (otherwise it'll be reduced to a
// different nop command, the three bytes one).
asm volatile(
"int3\n"
"nopl %c0(%%rax)\n"
:: "n"(0x40 + X), "D"(p));
#else
// FIXME: not always sigill.
__builtin_trap();
@ -261,8 +284,8 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(ptr_raw);
if (UNLIKELY(ptr_tag != mem_tag)) {
SigTrap<0x900 + 0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + LogSize>();
SigTrap<0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + LogSize>(p);
if (EA == ErrorAction::Abort) __builtin_unreachable();
}
}
@ -277,8 +300,8 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
tag_t *shadow_last = (tag_t *)MEM_TO_SHADOW(ptr_raw + sz - 1);
for (tag_t *t = shadow_first; t <= shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
SigTrap<0x900 + 0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + 0xf>();
SigTrap<0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + 0xf>(p);
if (EA == ErrorAction::Abort) __builtin_unreachable();
}
}

View File

@ -137,6 +137,15 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
#define GET_FATAL_STACK_TRACE_HERE \
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
#define PRINT_CURRENT_STACK_CHECK() \
{ \
GET_FATAL_STACK_TRACE_HERE; \
stack.Print(); \
}
class ScopedThreadLocalStateBackup {
public:
ScopedThreadLocalStateBackup() { Backup(); }

View File

@ -70,8 +70,8 @@ struct HwasanMapUnmapCallback {
}
};
#if !defined(__aarch64__)
#error unsupported platform
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
#endif
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G

View File

@ -258,6 +258,8 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_init_is_running))
ENSURE_HWASAN_INITED();
if (UNLIKELY(!hwasan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);

View File

@ -69,13 +69,13 @@ static void ProtectGap(uptr addr, uptr size) {
Report(
"ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
"HWASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
// LowMem covers as much of the first 4GB as possible.
const uptr kLowMemEnd = 1UL<<32;
const uptr kLowMemEnd = 1UL << 32;
const uptr kLowShadowEnd = kLowMemEnd >> kShadowScale;
const uptr kLowShadowStart = kLowShadowEnd >> kShadowScale;
static uptr kHighShadowStart;
@ -85,7 +85,6 @@ static uptr kHighMemStart;
bool InitShadow() {
const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
// HighMem covers the upper part of the address space.
kHighShadowEnd = (maxVirtualAddress >> kShadowScale) + 1;
kHighShadowStart = Max(kLowMemEnd, kHighShadowEnd >> kShadowScale);
@ -186,43 +185,57 @@ struct AccessInfo {
bool recover;
};
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// Access type is passed in a platform dependent way (see below) and encoded
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
// recoverable. Valid values of Y are 0 to 4, which are interpreted as
// log2(access_size), and 0xF, which means that access size is passed via
// platform dependent register (see below).
#if defined(__aarch64__)
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// Access type is encoded in BRK immediate as 0x9XY,
// where X&1 is 1 for store, 0 for load,
// and X&2 is 1 if the error is recoverable.
// Valid values of Y are 0 to 4, which are interpreted as log2(access_size),
// and 0xF, which means that access size is stored in X1 register.
// Access address is always in X0 register.
AccessInfo ai;
// Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
// access size is stored in X1 register. Access address is always in X0
// register.
uptr pc = (uptr)info->si_addr;
unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
if ((code & 0xff00) != 0x900)
return AccessInfo{0, 0, false, false}; // Not ours.
bool is_store = code & 0x10;
bool recover = code & 0x20;
unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
return AccessInfo{0, 0, false, false}; // Not ours.
return AccessInfo{}; // Not ours.
const bool is_store = code & 0x10;
const bool recover = code & 0x20;
const const uptr addr = uc->uc_mcontext.regs[0];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
return AccessInfo{}; // Not ours.
const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
#elif defined(__x86_64__)
// Access type is encoded in the instruction following INT3 as
// NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
// RSI register. Access address is always in RDI register.
uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
uint8_t *nop = (uint8_t*)pc;
if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
*(nop + 3) < 0x40)
return AccessInfo{}; // Not ours.
const unsigned code = *(nop + 3);
const bool is_store = code & 0x10;
const bool recover = code & 0x20;
const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
return AccessInfo{}; // Not ours.
const uptr size =
size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
ai.is_store = is_store;
ai.is_load = !is_store;
ai.addr = uc->uc_mcontext.regs[0];
if (size_log == 0xf)
ai.size = uc->uc_mcontext.regs[1];
else
ai.size = 1U << size_log;
ai.recover = recover;
return ai;
}
#else
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
return AccessInfo{0, 0, false, false};
}
# error Unsupported architecture
#endif
return AccessInfo{addr, size, is_store, !is_store, recover};
}
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
SignalContext sig{info, uc};
AccessInfo ai = GetAccessInfo(info, uc);
if (!ai.is_store && !ai.is_load)
return false;
@ -230,6 +243,7 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
SignalContext sig{info, uc};
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, uc,
common_flags()->fast_unwind_on_fatal);
@ -239,7 +253,12 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
if (flags()->halt_on_error || !ai.recover)
Die();
#if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
#elif defined(__x86_64__)
#else
# error Unsupported architecture
#endif
return true;
}