sanitizer_common: remove BlockingMutex and RWMutex

Remove the legacy typedefs and use Mutex/Lock types directly.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D107043
This commit is contained in:
Dmitry Vyukov 2021-07-29 09:44:48 +02:00
parent b8f4232823
commit 4e15ee2867
16 changed files with 44 additions and 52 deletions

View File

@ -37,7 +37,7 @@ namespace __hwasan {
class ScopedReport {
public:
ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
BlockingMutexLock lock(&error_message_lock_);
Lock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
++hwasan_report_count;
}
@ -45,7 +45,7 @@ class ScopedReport {
~ScopedReport() {
void (*report_cb)(const char *);
{
BlockingMutexLock lock(&error_message_lock_);
Lock lock(&error_message_lock_);
report_cb = error_report_callback_;
error_message_ptr_ = nullptr;
}
@ -61,7 +61,7 @@ class ScopedReport {
}
static void MaybeAppendToErrorMessage(const char *msg) {
BlockingMutexLock lock(&error_message_lock_);
Lock lock(&error_message_lock_);
if (!error_message_ptr_)
return;
uptr len = internal_strlen(msg);
@ -72,7 +72,7 @@ class ScopedReport {
}
static void SetErrorReportCallback(void (*callback)(const char *)) {
BlockingMutexLock lock(&error_message_lock_);
Lock lock(&error_message_lock_);
error_report_callback_ = callback;
}
@ -82,12 +82,12 @@ class ScopedReport {
bool fatal;
static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_;
static Mutex error_message_lock_;
static void (*error_report_callback_)(const char *);
};
InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_;
Mutex ScopedReport::error_message_lock_;
void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.

View File

@ -1092,7 +1092,7 @@ struct MSanAtExitRecord {
};
struct InterceptorContext {
BlockingMutex atexit_mu;
Mutex atexit_mu;
Vector<struct MSanAtExitRecord *> AtExitStack;
InterceptorContext()
@ -1108,7 +1108,7 @@ InterceptorContext *interceptor_ctx() {
void MSanAtExitWrapper() {
MSanAtExitRecord *r;
{
BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
Lock l(&interceptor_ctx()->atexit_mu);
uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
r = interceptor_ctx()->AtExitStack[element];
@ -1159,7 +1159,7 @@ static int setup_at_exit_wrapper(void(*f)(), void *arg, void *dso) {
// NetBSD does not preserve the 2nd argument if dso is equal to 0
// Store ctx in a local stack-like structure
BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
Lock l(&interceptor_ctx()->atexit_mu);
res = REAL(__cxa_atexit)((void (*)(void *a))MSanAtExitWrapper, 0, 0);
if (!res) {

View File

@ -56,7 +56,7 @@ class AddrHashMap {
static const uptr kBucketSize = 3;
struct Bucket {
RWMutex mtx;
Mutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};

View File

@ -161,7 +161,7 @@ class SizeClassAllocator64 {
void ForceReleaseToOS() {
MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
Lock l(&GetRegionInfo(class_id)->mutex);
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@ -178,7 +178,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex);
Lock l(&region->mutex);
uptr old_num_chunks = region->num_freed_chunks;
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
// Failure to allocate free array space while releasing memory is non
@ -204,7 +204,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex);
Lock l(&region->mutex);
#if SANITIZER_WINDOWS
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
explicit and immediate, so unmapped regions must be explicitly mapped back
@ -665,7 +665,7 @@ class SizeClassAllocator64 {
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
BlockingMutex mutex;
Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.
uptr allocated_user; // Bytes allocated for user memory.

View File

@ -89,7 +89,7 @@ class TracePcGuardController final {
}
void Dump() {
BlockingMutexLock locked(&setup_lock_);
Lock locked(&setup_lock_);
if (array_) {
CHECK_NE(vmo_, ZX_HANDLE_INVALID);
@ -125,7 +125,7 @@ class TracePcGuardController final {
size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
u32 Setup(u32 num_guards) {
BlockingMutexLock locked(&setup_lock_);
Lock locked(&setup_lock_);
DCHECK(common_flags()->coverage);
if (next_index_ == 0) {

View File

@ -22,7 +22,7 @@ LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_);
Lock lock(&mutex_);
if (count_ >= kMaxLibs) {
Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
kMaxLibs);
@ -36,7 +36,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
Lock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&

View File

@ -77,7 +77,7 @@ class LibIgnore {
LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
// Cold part:
BlockingMutex mutex_;
Mutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
bool track_instrumented_libs_;

View File

@ -372,15 +372,7 @@ class SCOPED_LOCK GenericScopedReadLock {
void operator=(const GenericScopedReadLock &) = delete;
};
// TODO: Temporary measure for incremental migration.
// These typedefs should be removed and all uses renamed to Mutex.
typedef Mutex BlockingMutex;
typedef Mutex RWMutex;
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;

View File

@ -108,7 +108,7 @@ struct TracerThreadArgument {
void *callback_argument;
// The tracer thread waits on this mutex while the parent finishes its
// preparations.
BlockingMutex mutex;
Mutex mutex;
// Tracer thread signals its completion by setting done.
atomic_uintptr_t done;
uptr parent_pid;

View File

@ -68,7 +68,7 @@ class SuspendedThreadsListNetBSD final : public SuspendedThreadsList {
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
BlockingMutex mutex;
Mutex mutex;
atomic_uintptr_t done;
uptr parent_pid;
};

View File

@ -158,7 +158,7 @@ class Symbolizer final {
// its method should be protected by |mu_|.
class ModuleNameOwner {
public:
explicit ModuleNameOwner(BlockingMutex *synchronized_by)
explicit ModuleNameOwner(Mutex *synchronized_by)
: last_match_(nullptr), mu_(synchronized_by) {
storage_.reserve(kInitialCapacity);
}
@ -169,7 +169,7 @@ class Symbolizer final {
InternalMmapVector<const char*> storage_;
const char *last_match_;
BlockingMutex *mu_;
Mutex *mu_;
} module_names_;
/// Platform-specific function for creating a Symbolizer object.
@ -192,7 +192,7 @@ class Symbolizer final {
// Mutex locked from public methods of |Symbolizer|, so that the internals
// (including individual symbolizer tools and platform-specific methods) are
// always synchronized.
BlockingMutex mu_;
Mutex mu_;
IntrusiveList<SymbolizerTool> tools_;

View File

@ -83,7 +83,7 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
}
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
@ -103,7 +103,7 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
}
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
@ -124,7 +124,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
const char *module_name = nullptr;
if (!FindModuleNameAndOffsetForAddress(
addr, &module_name, &info->module_offset, &info->module_arch))
@ -141,7 +141,7 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
const char *internal_module_name = nullptr;
ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
@ -154,7 +154,7 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
}
void Symbolizer::Flush() {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
tool.Flush();
@ -162,7 +162,7 @@ void Symbolizer::Flush() {
}
const char *Symbolizer::Demangle(const char *name) {
BlockingMutexLock l(&mu_);
Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))

View File

@ -119,7 +119,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
uptr *alive) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
if (total)
*total = threads_.size();
if (running) *running = running_threads_;
@ -127,13 +127,13 @@ void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
}
uptr ThreadRegistry::GetMaxAliveThreads() {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
return max_alive_threads_;
}
u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void *arg) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
u32 tid = kInvalidTid;
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
@ -179,7 +179,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
}
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
@ -211,7 +211,7 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
}
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@ -220,7 +220,7 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
}
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && tctx->user_id == user_id &&
@ -232,7 +232,7 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
}
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@ -252,7 +252,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
bool destroyed = false;
do {
{
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@ -275,7 +275,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
// thread before trying to create it, and then failed to actually
// create it, and so never called StartThread.
ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
ThreadContextBase *tctx = threads_[tid];
@ -301,7 +301,7 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
void *arg) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
running_threads_++;
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
@ -334,7 +334,7 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() {
}
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
BlockingMutexLock l(&mtx_);
ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_NE(tctx->status, ThreadStatusInvalid);

View File

@ -135,7 +135,7 @@ class MUTEX ThreadRegistry {
const u32 thread_quarantine_size_;
const u32 max_reuse_;
BlockingMutex mtx_;
Mutex mtx_;
u64 total_threads_; // Total number of created threads. May be greater than
// max_threads_ if contexts were reused.

View File

@ -38,7 +38,7 @@ static void PrintStackTrace(Thread *thr, u32 stk) {
static void ReportDeadlock(Thread *thr, DDReport *rep) {
if (rep == 0)
return;
BlockingMutexLock lock(&ctx->report_mutex);
Lock lock(&ctx->report_mutex);
Printf("==============================\n");
Printf("WARNING: lock-order-inversion (potential deadlock)\n");
for (int i = 0; i < rep->n; i++) {

View File

@ -42,7 +42,7 @@ typedef AddrHashMap<UserMutex, 31051> MutexHashMap;
struct Context {
DDetector *dd;
BlockingMutex report_mutex;
Mutex report_mutex;
MutexHashMap mutex_map;
};