tsan: even if races between atomic and plain memory accesses are turned off (report_atomic_races=0),

still report races between atomic accesses and free().

llvm-svn: 174175
This commit is contained in:
Dmitry Vyukov 2013-02-01 14:41:58 +00:00
parent 36a6dd04ef
commit 87c6bb9716
8 changed files with 101 additions and 12 deletions

View File

@ -0,0 +1,19 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include <pthread.h>
#include <unistd.h>
void *Thread(void *a) {
__atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
return 0;
}
int main() {
int *a = new int(0);
pthread_t t;
pthread_create(&t, 0, Thread, a);
sleep(1);
delete a;
pthread_join(t, 0);
}
// CHECK: WARNING: ThreadSanitizer: data race

View File

@ -0,0 +1,19 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include <pthread.h>
#include <unistd.h>
void *Thread(void *a) {
sleep(1);
__atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
return 0;
}
int main() {
int *a = new int(0);
pthread_t t;
pthread_create(&t, 0, Thread, a);
delete a;
pthread_join(t, 0);
}
// CHECK: WARNING: ThreadSanitizer: heap-use-after-free

View File

@ -0,0 +1,24 @@
// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include <pthread.h>
#include <stdio.h>
#include <stddef.h>
#include <unistd.h>
void *Thread(void *x) {
pthread_mutex_lock((pthread_mutex_t*)x);
pthread_mutex_unlock((pthread_mutex_t*)x);
return 0;
}
int main() {
pthread_mutex_t Mtx;
pthread_mutex_init(&Mtx, 0);
pthread_t t;
pthread_create(&t, 0, Thread, &Mtx);
sleep(1);
pthread_mutex_destroy(&Mtx);
pthread_join(t, 0);
return 0;
}
// CHECK: WARNING: ThreadSanitizer: data race

View File

@ -250,8 +250,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
if (flags()->report_atomic_races)
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return *a;
}
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
@ -260,8 +259,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
if (flags()->report_atomic_races)
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
@ -269,8 +267,7 @@ template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@ -292,8 +289,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
@ -353,8 +349,7 @@ template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))

View File

@ -535,7 +535,10 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
CHECK_EQ(thr->is_freeing, false);
thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
thr->is_freeing = false;
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();

View File

@ -280,6 +280,10 @@ class Shadow : public FastState {
x_ |= kFreedBit;
}
bool IsFreed() const {
return x_ & kFreedBit;
}
bool GetFreedAndReset() {
bool res = x_ & kFreedBit;
x_ &= ~kFreedBit;
@ -372,6 +376,7 @@ struct ThreadState {
int in_rtl;
bool in_symbolizer;
bool is_alive;
bool is_freeing;
const uptr stk_addr;
const uptr stk_size;
const uptr tls_addr;

View File

@ -26,8 +26,12 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr))
if (!linker_init && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
@ -49,8 +53,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
if (s == 0)
return;
if (IsAppMem(addr))
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {

View File

@ -531,11 +531,27 @@ static bool IsJavaNonsense(const ReportDesc *rep) {
return false;
}
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
if (!s0.IsAtomic() && !s1.IsAtomic())
return true;
if (s0.IsAtomic() && s1.IsFreed())
return true;
if (s1.IsAtomic() && thr->is_freeing)
return true;
return false;
}
void ReportRace(ThreadState *thr) {
if (!flags()->report_bugs)
return;
ScopedInRtl in_rtl;
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
if (thr->in_signal_handler)
Printf("ThreadSanitizer: printing report from signal handler."
" Can crash or hang.\n");