Auto merge of #2601 - DrMeepster:windows_init_once, r=RalfJung

Add Windows InitOnceBeginInitialize and InitOnceComplete shims

Fixes #2595
This commit is contained in:
bors 2022-10-20 22:03:17 +00:00
commit 53e3577486
12 changed files with 616 additions and 194 deletions

View File

@ -1 +1 @@
538f118da1409759ba198acc0ff62070bc6d2dce
edabf59ca4646b3fc1a961c26431215001043f6a

View File

@ -0,0 +1,204 @@
use std::collections::VecDeque;
use std::num::NonZeroU32;
use rustc_index::vec::Idx;
use super::sync::EvalContextExtPriv;
use super::thread::MachineCallback;
use super::vector_clock::VClock;
use crate::*;
declare_id!(InitOnceId);
/// A thread waiting on an InitOnce object.
struct InitOnceWaiter<'mir, 'tcx> {
/// The thread that is waiting.
thread: ThreadId,
/// The callback that should be executed, after the thread has been woken up.
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>,
}
impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("InitOnce")
.field("thread", &self.thread)
.field("callback", &"dyn MachineCallback")
.finish()
}
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
/// The current status of a one time initialization.
pub enum InitOnceStatus {
#[default]
Uninitialized,
Begun,
Complete,
}
/// The one time initialization state.
#[derive(Default, Debug)]
pub(super) struct InitOnce<'mir, 'tcx> {
status: InitOnceStatus,
waiters: VecDeque<InitOnceWaiter<'mir, 'tcx>>,
data_race: VClock,
}
impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for waiter in self.waiters.iter() {
waiter.callback.visit_tags(visit);
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, InitOnceId> {
let this = self.eval_context_mut();
this.init_once_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
/// Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None,
/// otherwise returns the value from the closure.
#[inline]
fn init_once_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, InitOnceId>
where
F: FnOnce(
&mut MiriInterpCx<'mir, 'tcx>,
InitOnceId,
) -> InterpResult<'tcx, Option<InitOnceId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.init_onces.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.init_onces.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}
#[inline]
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus {
let this = self.eval_context_ref();
this.machine.threads.sync.init_onces[id].status
}
/// Put the thread into the queue waiting for the initialization.
#[inline]
fn init_once_enqueue_and_block(
&mut self,
id: InitOnceId,
thread: ThreadId,
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>,
) {
let this = self.eval_context_mut();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once");
init_once.waiters.push_back(InitOnceWaiter { thread, callback });
this.block_thread(thread);
}
/// Begin initializing this InitOnce. Must only be called after checking that it is currently
/// uninitialized.
#[inline]
fn init_once_begin(&mut self, id: InitOnceId) {
let this = self.eval_context_mut();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Uninitialized,
"begining already begun or complete init once"
);
init_once.status = InitOnceStatus::Begun;
}
#[inline]
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Begun,
"completing already complete or uninit init once"
);
init_once.status = InitOnceStatus::Complete;
// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
}
// Wake up everyone.
// need to take the queue to avoid having `this` be borrowed multiple times
for waiter in std::mem::take(&mut init_once.waiters) {
// End of the wait happens-before woken-up thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(
&this.machine.threads.sync.init_onces[id].data_race,
waiter.thread,
);
}
this.unblock_thread(waiter.thread);
// Call callback, with the woken-up thread as `current`.
this.set_active_thread(waiter.thread);
waiter.callback.call(this)?;
this.set_active_thread(current_thread);
}
Ok(())
}
#[inline]
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Begun,
"failing already completed or uninit init once"
);
// Each complete happens-before the end of the wait
// FIXME: should this really induce synchronization? If we think of it as a lock, then yes,
// but the docs don't talk about such details.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
}
// Wake up one waiting thread, so they can go ahead and try to init this.
if let Some(waiter) = init_once.waiters.pop_front() {
// End of the wait happens-before woken-up thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(
&this.machine.threads.sync.init_onces[id].data_race,
waiter.thread,
);
}
this.unblock_thread(waiter.thread);
// Call callback, with the woken-up thread as `current`.
this.set_active_thread(waiter.thread);
waiter.callback.call(this)?;
this.set_active_thread(current_thread);
} else {
// Nobody there to take this, so go back to 'uninit'
init_once.status = InitOnceStatus::Uninitialized;
}
Ok(())
}
}

View File

@ -1,6 +1,8 @@
pub mod data_race;
mod range_object_map;
#[macro_use]
pub mod sync;
pub mod init_once;
pub mod thread;
mod vector_clock;
pub mod weak_memory;

View File

@ -7,9 +7,15 @@ use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::{Idx, IndexVec};
use super::init_once::InitOnce;
use super::vector_clock::VClock;
use crate::*;
pub trait SyncId {
fn from_u32(id: u32) -> Self;
fn to_u32(&self) -> u32;
}
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
/// sentinel value meaning that the identifier is not assigned. This is because
/// the pthreads static initializers initialize memory with zeros (see the
@ -21,11 +27,14 @@ macro_rules! declare_id {
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct $name(NonZeroU32);
impl $name {
impl SyncId for $name {
// Panics if `id == 0`.
pub fn from_u32(id: u32) -> Self {
fn from_u32(id: u32) -> Self {
Self(NonZeroU32::new(id).unwrap())
}
fn to_u32(&self) -> u32 {
self.0.get()
}
}
impl Idx for $name {
@ -151,16 +160,58 @@ struct FutexWaiter {
/// The state of all synchronization variables.
#[derive(Default, Debug)]
pub(crate) struct SynchronizationState {
pub(crate) struct SynchronizationState<'mir, 'tcx> {
mutexes: IndexVec<MutexId, Mutex>,
rwlocks: IndexVec<RwLockId, RwLock>,
condvars: IndexVec<CondvarId, Condvar>,
futexes: FxHashMap<u64, Futex>,
pub(super) init_onces: IndexVec<InitOnceId, InitOnce<'mir, 'tcx>>,
}
impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for init_once in self.init_onces.iter() {
init_once.visit_tags(visit);
}
}
}
// Private extension trait for local helper methods
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
crate::MiriInterpCxExt<'mir, 'tcx>
{
#[inline]
// Miri sync structures contain zero-initialized ids stored at some offset behind a pointer
fn get_or_create_id<Id: SyncId>(
&mut self,
next_id: Id,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, Option<Id>> {
let this = self.eval_context_mut();
let value_place =
this.deref_operand_and_offset(lock_op, offset, this.machine.layouts.u32)?;
let (old, success) = this
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, this.machine.layouts.u32),
Scalar::from_u32(next_id.to_u32()),
AtomicRwOrd::Relaxed, // deliberately *no* synchronization
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(Id::from_u32(old.to_u32().expect("layout is u32")))
})
}
/// Take a reader out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
@ -210,11 +261,31 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// situations.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
/// Create state for a new mutex.
fn mutex_create(&mut self) -> MutexId {
fn mutex_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
this.machine.threads.sync.mutexes.push(Default::default())
this.mutex_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
fn rwlock_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, RwLockId> {
let this = self.eval_context_mut();
this.rwlock_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
fn condvar_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, CondvarId> {
let this = self.eval_context_mut();
this.condvar_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
#[inline]
@ -301,8 +372,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[inline]
/// Put the thread into the queue waiting for the mutex.
#[inline]
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
@ -310,16 +381,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.block_thread(thread);
}
#[inline]
/// Create state for a new read write lock.
fn rwlock_create(&mut self) -> RwLockId {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks.push(Default::default())
}
#[inline]
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
/// otherwise returns the value from the closure
#[inline]
fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,
@ -349,8 +413,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
rwlock.writer.is_some() || rwlock.readers.is_empty().not()
}
#[inline]
/// Check if write locked.
#[inline]
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
@ -407,8 +471,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
true
}
#[inline]
/// Put the reader in the queue waiting for the lock and block it.
#[inline]
fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
@ -416,8 +480,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.block_thread(reader);
}
#[inline]
/// Lock by setting the writer that owns the lock.
#[inline]
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
@ -429,8 +493,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[inline]
/// Try to unlock by removing the writer.
#[inline]
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
@ -467,8 +531,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[inline]
/// Put the writer in the queue waiting for the lock.
#[inline]
fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
@ -476,16 +540,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.block_thread(writer);
}
#[inline]
/// Create state for a new conditional variable.
fn condvar_create(&mut self) -> CondvarId {
let this = self.eval_context_mut();
this.machine.threads.sync.condvars.push(Default::default())
}
#[inline]
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
/// otherwise returns the value from the closure
#[inline]
fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
where
F: FnOnce(
@ -504,8 +561,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[inline]
/// Is the conditional variable awaited?
#[inline]
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
let this = self.eval_context_mut();
!this.machine.threads.sync.condvars[id].waiters.is_empty()

View File

@ -30,8 +30,7 @@ pub enum SchedulingAction {
Stop,
}
/// Timeout callbacks can be created by synchronization primitives to tell the
/// scheduler that they should be called once some period of time passes.
/// Trait for callbacks that can be executed when some event happens, such as after a timeout.
pub trait MachineCallback<'mir, 'tcx>: VisitTags {
fn call(&self, ecx: &mut InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>) -> InterpResult<'tcx>;
}
@ -269,7 +268,7 @@ pub struct ThreadManager<'mir, 'tcx> {
threads: IndexVec<ThreadId, Thread<'mir, 'tcx>>,
/// This field is pub(crate) because the synchronization primitives
/// (`crate::sync`) need a way to access it.
pub(crate) sync: SynchronizationState,
pub(crate) sync: SynchronizationState<'mir, 'tcx>,
/// A mapping from a thread-local static to an allocation id of a thread
/// specific allocation.
thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), Pointer<Provenance>>>,
@ -303,7 +302,7 @@ impl VisitTags for ThreadManager<'_, '_> {
timeout_callbacks,
active_thread: _,
yield_active_thread: _,
sync: _,
sync,
} = self;
for thread in threads {
@ -315,6 +314,7 @@ impl VisitTags for ThreadManager<'_, '_> {
for callback in timeout_callbacks.values() {
callback.callback.visit_tags(visit);
}
sync.visit_tags(visit);
}
}

View File

@ -83,34 +83,28 @@ pub use crate::shims::EvalContextExt as _;
pub use crate::clock::{Clock, Instant};
pub use crate::concurrency::{
data_race::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd,
EvalContextExt as DataRaceEvalContextExt,
},
sync::{CondvarId, EvalContextExt as SyncEvalContextExt, MutexId, RwLockId},
thread::{
EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager,
ThreadState, Time,
},
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time},
};
pub use crate::diagnostics::{
report_error, EvalContextExt as DiagnosticsEvalContextExt, NonHaltingDiagnostic,
TerminationInfo,
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
};
pub use crate::eval::{
create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith,
};
pub use crate::helpers::{CurrentSpan, EvalContextExt as HelpersEvalContextExt};
pub use crate::helpers::{CurrentSpan, EvalContextExt as _};
pub use crate::intptrcast::ProvenanceMode;
pub use crate::machine::{
AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, Provenance,
ProvenanceExtra, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::operator::EvalContextExt as _;
pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
CallId, EvalContextExt as StackedBorEvalContextExt, Item, Permission, SbTag, Stack, Stacks,
CallId, EvalContextExt as _, Item, Permission, SbTag, Stack, Stacks,
};
pub use crate::tag_gc::{EvalContextExt as _, VisitTags};

View File

@ -274,7 +274,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
this.update_environ()?;
}
Ok(Scalar::from_i32(1)) // return non-zero on success
Ok(this.eval_windows("c", "TRUE")?)
} else {
let value = this.read_os_str_from_wide_str(value_ptr)?;
let var_ptr = alloc_env_var_as_wide_str(&name, &value, this)?;
@ -282,7 +282,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
}
this.update_environ()?;
Ok(Scalar::from_i32(1)) // return non-zero on success
Ok(this.eval_windows("c", "TRUE")?)
}
}
@ -411,14 +411,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.reject_in_isolation("`SetCurrentDirectoryW`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied)?;
return Ok(Scalar::from_i32(0));
return this.eval_windows("c", "FALSE");
}
match env::set_current_dir(path) {
Ok(()) => Ok(Scalar::from_i32(1)),
Ok(()) => this.eval_windows("c", "TRUE"),
Err(e) => {
this.set_last_error_from_io_error(e.kind())?;
Ok(Scalar::from_i32(0))
this.eval_windows("c", "FALSE")
}
}
}

View File

@ -19,6 +19,10 @@ use crate::*;
/// in `pthread_mutexattr_settype` function.
const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
const MUTEX_ID_OFFSET: u64 = 4;
const RWLOCK_ID_OFFSET: u64 = 4;
const CONDVAR_ID_OFFSET: u64 = 4;
fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
kind: Scalar<Provenance>,
@ -108,33 +112,6 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
)
}
fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, MutexId> {
let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;
ecx.mutex_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
// Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
@ -149,33 +126,6 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed)
}
fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, RwLockId> {
let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;
ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
// pthread_condattr_t
// Our chosen memory layout for emulation (does not have to match the platform layout!):
@ -232,33 +182,6 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
)
}
fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, CondvarId> {
let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;
ecx.condvar_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
fn cond_get_clock_id<'mir, 'tcx: 'mir>(
ecx: &MiriInterpCx<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Provenance>,
@ -435,7 +358,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.mutex_is_locked(id) {
@ -475,7 +398,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.mutex_is_locked(id) {
@ -511,7 +434,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
@ -545,7 +468,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
if this.mutex_is_locked(id) {
throw_ub_format!("destroyed a locked mutex");
@ -568,7 +491,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -586,7 +509,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -603,7 +526,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -633,7 +556,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -650,7 +573,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
#[allow(clippy::if_same_then_else)]
@ -669,7 +592,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
if this.rwlock_is_locked(id) {
throw_ub_format!("destroyed a locked rwlock");
@ -772,7 +695,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
if let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, mutex)?;
}
@ -785,7 +708,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
cond_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
while let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, mutex)?;
@ -801,8 +724,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
@ -822,8 +745,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.check_no_isolation("`pthread_cond_timedwait`")?;
let id = cond_get_or_create_id(this, cond_op)?;
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
// Extract the timeout.
@ -899,7 +822,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
if this.condvar_is_awaited(id) {
throw_ub_format!("destroying an awaited conditional variable");
}

View File

@ -261,6 +261,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let ret = this.TryAcquireSRWLockShared(ptr)?;
this.write_scalar(ret, dest)?;
}
"InitOnceBeginInitialize" => {
let [ptr, flags, pending, context] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.InitOnceBeginInitialize(ptr, flags, pending, context)?;
this.write_scalar(result, dest)?;
}
"InitOnceComplete" => {
let [ptr, flags, context] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.InitOnceComplete(ptr, flags, context)?;
this.write_scalar(result, dest)?;
}
// Dynamic symbol loading
"GetProcAddress" => {

View File

@ -1,41 +1,17 @@
use crate::concurrency::init_once::InitOnceStatus;
use crate::concurrency::thread::MachineCallback;
use crate::*;
// Locks are pointer-sized pieces of data, initialized to 0.
// We use the first 4 bytes to store the RwLockId.
fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, RwLockId> {
let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?;
ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
const SRWLOCK_ID_OFFSET: u64 = 0;
const INIT_ONCE_ID_OFFSET: u64 = 0;
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
#[allow(non_snake_case)]
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[allow(non_snake_case)]
fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -54,13 +30,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -72,10 +47,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if !this.rwlock_writer_unlock(id, active_thread) {
@ -88,10 +62,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(())
}
#[allow(non_snake_case)]
fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -103,13 +76,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockShared(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -120,10 +92,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if !this.rwlock_reader_unlock(id, active_thread) {
@ -135,4 +106,119 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(())
}
fn InitOnceBeginInitialize(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
pending_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?;
let flags = this.read_scalar(flags_op)?.to_u32()?;
let pending_place = this.deref_operand(pending_op)?.into();
let context = this.read_pointer(context_op)?;
if flags != 0 {
throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`");
}
if !this.ptr_is_null(context)? {
throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`");
}
match this.init_once_status(id) {
InitOnceStatus::Uninitialized => {
this.init_once_begin(id);
this.write_scalar(this.eval_windows("c", "TRUE")?, &pending_place)?;
}
InitOnceStatus::Begun => {
// Someone else is already on it.
// Block this thread until they are done.
// When we are woken up, set the `pending` flag accordingly.
struct Callback<'tcx> {
init_once_id: InitOnceId,
pending_place: PlaceTy<'tcx, Provenance>,
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let Callback { init_once_id: _, pending_place } = self;
pending_place.visit_tags(visit);
}
}
impl<'mir, 'tcx> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
let pending = match this.init_once_status(self.init_once_id) {
InitOnceStatus::Uninitialized =>
unreachable!(
"status should have either been set to begun or complete"
),
InitOnceStatus::Begun => this.eval_windows("c", "TRUE")?,
InitOnceStatus::Complete => this.eval_windows("c", "FALSE")?,
};
this.write_scalar(pending, &self.pending_place)?;
Ok(())
}
}
this.init_once_enqueue_and_block(
id,
active_thread,
Box::new(Callback { init_once_id: id, pending_place }),
)
}
InitOnceStatus::Complete =>
this.write_scalar(this.eval_windows("c", "FALSE")?, &pending_place)?,
}
// This always succeeds (even if the thread is blocked, we will succeed if we ever unblock).
this.eval_windows("c", "TRUE")
}
fn InitOnceComplete(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?;
let flags = this.read_scalar(flags_op)?.to_u32()?;
let context = this.read_pointer(context_op)?;
let success = if flags == 0 {
true
} else if flags == this.eval_windows("c", "INIT_ONCE_INIT_FAILED")?.to_u32()? {
false
} else {
throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`");
};
if !this.ptr_is_null(context)? {
throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`");
}
if this.init_once_status(id) != InitOnceStatus::Begun {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!(
"calling InitOnceComplete on a one time initialization that has not begun or is already completed"
);
}
if success {
this.init_once_complete(id)?;
} else {
this.init_once_fail(id)?;
}
this.eval_windows("c", "TRUE")
}
}

View File

@ -0,0 +1,138 @@
//@only-target-windows: Uses win32 api functions
// We are making scheduler assumptions here.
//@compile-flags: -Zmiri-preemption-rate=0
use std::ffi::c_void;
use std::ptr::null_mut;
use std::thread;
#[derive(Copy, Clone)]
struct SendPtr<T>(*mut T);
unsafe impl<T> Send for SendPtr<T> {}
extern "system" {
fn InitOnceBeginInitialize(
init: *mut *mut c_void,
flags: u32,
pending: *mut i32,
context: *mut c_void,
) -> i32;
fn InitOnceComplete(init: *mut *mut c_void, flags: u32, context: *mut c_void) -> i32;
}
const TRUE: i32 = 1;
const FALSE: i32 = 0;
const INIT_ONCE_INIT_FAILED: u32 = 4;
fn single_thread() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
assert_eq!(InitOnceComplete(&mut init_once, 0, null_mut()), TRUE);
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, FALSE);
}
let mut init_once = null_mut();
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
assert_eq!(InitOnceComplete(&mut init_once, INIT_ONCE_INIT_FAILED, null_mut()), TRUE);
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
}
fn block_until_complete() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
let init_once_ptr = SendPtr(&mut init_once);
let waiter = move || unsafe {
let mut pending = 0;
assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, FALSE);
println!("finished waiting for initialization");
};
let waiter1 = thread::spawn(waiter);
let waiter2 = thread::spawn(waiter);
// this yield ensures `waiter1` & `waiter2` are blocked on the main thread
thread::yield_now();
println!("completing initialization");
unsafe {
assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE);
}
waiter1.join().unwrap();
waiter2.join().unwrap();
}
fn retry_on_fail() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
let init_once_ptr = SendPtr(&mut init_once);
let waiter = move || unsafe {
let mut pending = 0;
assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE);
if pending == 1 {
println!("retrying initialization");
assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE);
} else {
println!("finished waiting for initialization");
}
};
let waiter1 = thread::spawn(waiter);
let waiter2 = thread::spawn(waiter);
// this yield ensures `waiter1` & `waiter2` are blocked on the main thread
thread::yield_now();
println!("failing initialization");
unsafe {
assert_eq!(InitOnceComplete(init_once_ptr.0, INIT_ONCE_INIT_FAILED, null_mut()), TRUE);
}
waiter1.join().unwrap();
waiter2.join().unwrap();
}
fn main() {
single_thread();
block_until_complete();
retry_on_fail();
}

View File

@ -0,0 +1,6 @@
completing initialization
finished waiting for initialization
finished waiting for initialization
failing initialization
retrying initialization
finished waiting for initialization