revise Hermit's mutex interface to support the behaviour of StaticMutex

rust-lang/rust#77147 simplifies things by splitting this Mutex type
into two types matching the two use cases: StaticMutex and MovableMutex.
To support the behavior of StaticMutex, we move part of the mutex
implementation into libstd.
This commit is contained in:
Stefan Lankes 2020-10-06 11:39:59 +02:00
parent c6bebc14a1
commit 16d65d0432
No known key found for this signature in database
GPG Key ID: 28578EBB789EF62B
1 changed files with 182 additions and 8 deletions

View File

@ -1,9 +1,161 @@
use crate::cell::UnsafeCell;
use crate::collections::VecDeque;
use crate::ffi::c_void;
use crate::ops::{Deref, DerefMut, Drop};
use crate::ptr;
use crate::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint};
use crate::sys::hermit::abi;
/// This type provides a lock based on busy waiting to realize mutual exclusion
///
/// # Description
///
/// This structure behaves a lot like a common mutex. There are some differences:
///
/// - By using busy waiting, it can be used outside the runtime.
/// - It is a so called ticket lock (https://en.wikipedia.org/wiki/Ticket_lock)
/// and completly fair.
#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
struct Spinlock<T: ?Sized> {
queue: AtomicUsize,
dequeue: AtomicUsize,
data: UnsafeCell<T>,
}
unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
/// A guard to which the protected data can be accessed
///
/// When the guard falls out of scope it will release the lock.
struct SpinlockGuard<'a, T: ?Sized + 'a> {
dequeue: &'a AtomicUsize,
data: &'a mut T,
}
impl<T> Spinlock<T> {
pub const fn new(user_data: T) -> Spinlock<T> {
SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
}
#[inline]
fn obtain_lock(&self) {
let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
while self.dequeue.load(Ordering::SeqCst) != ticket {
spin_loop_hint();
}
}
#[inline]
pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
self.obtain_lock();
SpinlockGuard {
dequeue: &self.dequeue,
data: &mut *self.data.get(),
}
}
}
impl<T: ?Sized + Default> Default for Spinlock<T> {
fn default() -> Spinlock<T> {
Spinlock::new(Default::default())
}
}
impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.data
}
}
impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.data
}
}
impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
/// The dropping of the SpinlockGuard will release the lock it was created from.
fn drop(&mut self) {
self.dequeue.fetch_add(1, Ordering::SeqCst);
}
}
/// Realize a priority queue for tasks
struct PriorityQueue {
queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
prio_bitmap: u64,
}
impl PriorityQueue {
pub const fn new() -> PriorityQueue {
PriorityQueue {
queues: [
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None,
],
prio_bitmap: 0,
}
}
/// Add a task handle by its priority to the queue
pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
let i: usize = prio.into().into();
self.prio_bitmap |= (1 << i) as u64;
if let Some(queue) = &mut self.queues[i] {
queue.push_back(id);
} else {
let mut queue = VecDeque::new();
queue.push_back(id);
self.queues[i] = Some(queue);
}
}
fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
if let Some(queue) = &mut self.queues[queue_index] {
let id = queue.pop_front();
if queue.is_empty() {
self.prio_bitmap &= !(1 << queue_index as u64);
}
id
} else {
None
}
}
/// Pop the task handle with the highest priority from the queue
pub fn pop(&mut self) -> Option<abi::Tid> {
for i in 0..abi::NO_PRIORITIES {
if self.prio_bitmap & (1 << i) != 0 {
return self.pop_from_queue(i);
}
}
None
}
}
struct MutexInner {
locked: bool,
blocked_task: PriorityQueue,
}
impl MutexInner {
pub const fn new() -> MutexInner {
MutexInner {
locked: false,
blocked_task: PriorityQueue::new(),
}
}
}
pub struct Mutex {
inner: *const c_void,
inner: Spinlock<MutexInner>,
}
unsafe impl Send for Mutex {}
@ -11,33 +163,55 @@ unsafe impl Sync for Mutex {}
impl Mutex {
pub const fn new() -> Mutex {
Mutex { inner: ptr::null() }
Mutex {
inner: Spinlock::new(MutexInner::new()),
}
}
#[inline]
pub unsafe fn init(&mut self) {
let _ = abi::sem_init(&mut self.inner as *mut *const c_void, 1);
self.inner = Spinlock::new(MutexInner::new());
}
#[inline]
pub unsafe fn lock(&self) {
let _ = abi::sem_timedwait(self.inner, 0);
loop {
let mut guard = self.inner.lock();
if guard.locked == false {
guard.locked = true;
return;
} else {
let prio = abi::get_priority();
let id = abi::getpid();
guard.blocked_task.push(prio, id);
abi::block_current_task();
drop(guard);
abi::yield_now();
}
}
}
#[inline]
pub unsafe fn unlock(&self) {
let _ = abi::sem_post(self.inner);
let mut guard = self.inner.lock();
guard.locked = false;
if let Some(tid) = guard.blocked_task.pop() {
abi::wakeup_task(tid);
}
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
let result = abi::sem_trywait(self.inner);
result == 0
let mut guard = self.inner.lock();
if guard.locked == false {
guard.locked = true;
}
guard.locked
}
#[inline]
pub unsafe fn destroy(&self) {
let _ = abi::sem_destroy(self.inner);
}
}