[XRay] Add atomic fences around non-atomic reads and writes
Summary: We need these fences to ensure that other threads attempting to read bytes in the buffer will see thw writes committed before the extents are updated. Without these, the writes can be un-committed by the time the buffer extents counter is updated -- the fences should ensure that the records written into the log have completed by the time we observe the buffer extents from different threads. Reviewers: mboerger Subscribers: jfb, llvm-commits Differential Revision: https://reviews.llvm.org/D54291 llvm-svn: 346474
This commit is contained in:
parent
da375a67f8
commit
aeae545064
|
@ -63,6 +63,10 @@ class FDRLogWriter {
|
||||||
template <class T> void writeRecord(const T &R) {
|
template <class T> void writeRecord(const T &R) {
|
||||||
internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
|
internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
|
||||||
NextRecord += sizeof(T);
|
NextRecord += sizeof(T);
|
||||||
|
// We need this atomic fence here to ensure that other threads attempting to
|
||||||
|
// read the bytes in the buffer will see the writes committed before the
|
||||||
|
// extents are updated.
|
||||||
|
atomic_thread_fence(memory_order_release);
|
||||||
atomic_fetch_add(&Buffer.Extents, sizeof(T), memory_order_acq_rel);
|
atomic_fetch_add(&Buffer.Extents, sizeof(T), memory_order_acq_rel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,6 +93,10 @@ public:
|
||||||
constexpr auto Size = sizeof(MetadataRecord) * N;
|
constexpr auto Size = sizeof(MetadataRecord) * N;
|
||||||
internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
|
internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
|
||||||
NextRecord += Size;
|
NextRecord += Size;
|
||||||
|
// We need this atomic fence here to ensure that other threads attempting to
|
||||||
|
// read the bytes in the buffer will see the writes committed before the
|
||||||
|
// extents are updated.
|
||||||
|
atomic_thread_fence(memory_order_release);
|
||||||
atomic_fetch_add(&Buffer.Extents, Size, memory_order_acq_rel);
|
atomic_fetch_add(&Buffer.Extents, Size, memory_order_acq_rel);
|
||||||
return Size;
|
return Size;
|
||||||
}
|
}
|
||||||
|
@ -129,6 +137,10 @@ public:
|
||||||
NextRecord = reinterpret_cast<char *>(internal_memcpy(
|
NextRecord = reinterpret_cast<char *>(internal_memcpy(
|
||||||
NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
|
NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
|
||||||
sizeof(A);
|
sizeof(A);
|
||||||
|
// We need this atomic fence here to ensure that other threads attempting to
|
||||||
|
// read the bytes in the buffer will see the writes committed before the
|
||||||
|
// extents are updated.
|
||||||
|
atomic_thread_fence(memory_order_release);
|
||||||
atomic_fetch_add(&Buffer.Extents, sizeof(R) + sizeof(A),
|
atomic_fetch_add(&Buffer.Extents, sizeof(R) + sizeof(A),
|
||||||
memory_order_acq_rel);
|
memory_order_acq_rel);
|
||||||
return true;
|
return true;
|
||||||
|
@ -149,6 +161,11 @@ public:
|
||||||
NextRecord = reinterpret_cast<char *>(
|
NextRecord = reinterpret_cast<char *>(
|
||||||
internal_memcpy(NextRecord, Event, EventSize)) +
|
internal_memcpy(NextRecord, Event, EventSize)) +
|
||||||
EventSize;
|
EventSize;
|
||||||
|
|
||||||
|
// We need this atomic fence here to ensure that other threads attempting to
|
||||||
|
// read the bytes in the buffer will see the writes committed before the
|
||||||
|
// extents are updated.
|
||||||
|
atomic_thread_fence(memory_order_release);
|
||||||
atomic_fetch_add(&Buffer.Extents, sizeof(R) + EventSize,
|
atomic_fetch_add(&Buffer.Extents, sizeof(R) + EventSize,
|
||||||
memory_order_acq_rel);
|
memory_order_acq_rel);
|
||||||
return true;
|
return true;
|
||||||
|
@ -167,6 +184,11 @@ public:
|
||||||
NextRecord = reinterpret_cast<char *>(
|
NextRecord = reinterpret_cast<char *>(
|
||||||
internal_memcpy(NextRecord, Event, EventSize)) +
|
internal_memcpy(NextRecord, Event, EventSize)) +
|
||||||
EventSize;
|
EventSize;
|
||||||
|
|
||||||
|
// We need this atomic fence here to ensure that other threads attempting to
|
||||||
|
// read the bytes in the buffer will see the writes committed before the
|
||||||
|
// extents are updated.
|
||||||
|
atomic_thread_fence(memory_order_release);
|
||||||
atomic_fetch_add(&Buffer.Extents, EventSize, memory_order_acq_rel);
|
atomic_fetch_add(&Buffer.Extents, EventSize, memory_order_acq_rel);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -243,6 +243,13 @@ XRayBuffer fdrIterator(const XRayBuffer B) {
|
||||||
// out to disk. The difference here would be that we still write "empty"
|
// out to disk. The difference here would be that we still write "empty"
|
||||||
// buffers, or at least go through the iterators faithfully to let the
|
// buffers, or at least go through the iterators faithfully to let the
|
||||||
// handlers see the empty buffers in the queue.
|
// handlers see the empty buffers in the queue.
|
||||||
|
//
|
||||||
|
// We need this atomic fence here to ensure that writes happening to the
|
||||||
|
// buffer have been committed before we load the extents atomically. Because
|
||||||
|
// the buffer is not explicitly synchronised across threads, we rely on the
|
||||||
|
// fence ordering to ensure that writes we expect to have been completed
|
||||||
|
// before the fence are fully committed before we read the extents.
|
||||||
|
atomic_thread_fence(memory_order_acquire);
|
||||||
auto BufferSize = atomic_load(&It->Extents, memory_order_acquire);
|
auto BufferSize = atomic_load(&It->Extents, memory_order_acquire);
|
||||||
SerializedBufferSize = BufferSize + sizeof(MetadataRecord);
|
SerializedBufferSize = BufferSize + sizeof(MetadataRecord);
|
||||||
CurrentBuffer = allocateBuffer(SerializedBufferSize);
|
CurrentBuffer = allocateBuffer(SerializedBufferSize);
|
||||||
|
|
Loading…
Reference in New Issue