2020-08-27 13:45:01 +00:00
|
|
|
|
#[cfg(all(test, not(target_os = "emscripten")))]
|
|
|
|
|
mod tests;
|
|
|
|
|
|
2022-09-03 12:05:28 +00:00
|
|
|
|
use super::mutex as sys;
|
2022-04-14 09:11:41 +00:00
|
|
|
|
use crate::cell::UnsafeCell;
|
2019-02-10 19:23:21 +00:00
|
|
|
|
use crate::ops::Deref;
|
|
|
|
|
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
2022-04-14 09:11:41 +00:00
|
|
|
|
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
2015-04-03 21:46:54 +00:00
|
|
|
|
|
|
|
|
|
/// A re-entrant mutual exclusion
|
|
|
|
|
///
|
2015-05-05 23:44:28 +00:00
|
|
|
|
/// This mutex will block *other* threads waiting for the lock to become
|
|
|
|
|
/// available. The thread which has already locked the mutex can lock it
|
|
|
|
|
/// multiple times without blocking, preventing a common source of deadlocks.
|
2022-04-14 09:11:41 +00:00
|
|
|
|
///
|
|
|
|
|
/// This is used by stdout().lock() and friends.
|
|
|
|
|
///
|
|
|
|
|
/// ## Implementation details
|
|
|
|
|
///
|
|
|
|
|
/// The 'owner' field tracks which thread has locked the mutex.
|
|
|
|
|
///
|
|
|
|
|
/// We use current_thread_unique_ptr() as the thread identifier,
|
|
|
|
|
/// which is just the address of a thread local variable.
|
|
|
|
|
///
|
|
|
|
|
/// If `owner` is set to the identifier of the current thread,
|
|
|
|
|
/// we assume the mutex is already locked and instead of locking it again,
|
|
|
|
|
/// we increment `lock_count`.
|
|
|
|
|
///
|
|
|
|
|
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
|
|
|
|
/// it reaches zero.
|
|
|
|
|
///
|
|
|
|
|
/// `lock_count` is protected by the mutex and only accessed by the thread that has
|
|
|
|
|
/// locked the mutex, so needs no synchronization.
|
|
|
|
|
///
|
|
|
|
|
/// `owner` can be checked by other threads that want to see if they already
|
|
|
|
|
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
|
|
|
|
/// same thread that holds the mutex and memory access can use relaxed ordering
|
|
|
|
|
/// since we're not dealing with multiple threads. If it compares unequal,
|
|
|
|
|
/// synchronization is left to the mutex, making relaxed memory ordering for
|
|
|
|
|
/// the `owner` field fine in all cases.
|
2015-04-03 21:46:54 +00:00
|
|
|
|
pub struct ReentrantMutex<T> {
|
2022-09-03 12:05:28 +00:00
|
|
|
|
mutex: sys::MovableMutex,
|
2022-04-14 09:11:41 +00:00
|
|
|
|
owner: AtomicUsize,
|
|
|
|
|
lock_count: UnsafeCell<u32>,
|
2015-04-03 21:46:54 +00:00
|
|
|
|
data: T,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
|
|
|
|
|
unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
|
|
|
|
|
|
2018-07-01 21:30:16 +00:00
|
|
|
|
impl<T> UnwindSafe for ReentrantMutex<T> {}
|
|
|
|
|
impl<T> RefUnwindSafe for ReentrantMutex<T> {}
|
|
|
|
|
|
2015-04-03 21:46:54 +00:00
|
|
|
|
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
|
|
|
|
/// dropped (falls out of scope), the lock will be unlocked.
|
|
|
|
|
///
|
|
|
|
|
/// The data protected by the mutex can be accessed through this guard via its
|
2015-08-12 12:39:49 +00:00
|
|
|
|
/// Deref implementation.
|
|
|
|
|
///
|
|
|
|
|
/// # Mutability
|
|
|
|
|
///
|
|
|
|
|
/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
|
|
|
|
|
/// because implementation of the trait would violate Rust’s reference aliasing
|
|
|
|
|
/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
|
|
|
|
|
/// guarded data.
|
2018-05-07 16:27:50 +00:00
|
|
|
|
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
|
2015-04-03 21:46:54 +00:00
|
|
|
|
pub struct ReentrantMutexGuard<'a, T: 'a> {
|
2022-09-03 12:05:28 +00:00
|
|
|
|
lock: &'a ReentrantMutex<T>,
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-10 18:15:55 +00:00
|
|
|
|
impl<T> !Send for ReentrantMutexGuard<'_, T> {}
|
2015-04-03 21:46:54 +00:00
|
|
|
|
|
|
|
|
|
impl<T> ReentrantMutex<T> {
|
|
|
|
|
/// Creates a new reentrant mutex in an unlocked state.
|
2022-09-03 12:05:28 +00:00
|
|
|
|
pub const fn new(t: T) -> ReentrantMutex<T> {
|
2020-10-10 18:20:14 +00:00
|
|
|
|
ReentrantMutex {
|
2022-09-03 12:05:28 +00:00
|
|
|
|
mutex: sys::MovableMutex::new(),
|
2022-04-14 09:11:41 +00:00
|
|
|
|
owner: AtomicUsize::new(0),
|
|
|
|
|
lock_count: UnsafeCell::new(0),
|
2020-10-10 18:20:14 +00:00
|
|
|
|
data: t,
|
|
|
|
|
}
|
2020-03-12 18:39:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-04-03 21:46:54 +00:00
|
|
|
|
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
|
|
|
|
///
|
|
|
|
|
/// This function will block the caller until it is available to acquire the mutex.
|
|
|
|
|
/// Upon returning, the thread is the only thread with the mutex held. When the thread
|
|
|
|
|
/// calling this method already holds the lock, the call shall succeed without
|
|
|
|
|
/// blocking.
|
|
|
|
|
///
|
2016-02-02 02:41:29 +00:00
|
|
|
|
/// # Errors
|
2015-04-03 21:46:54 +00:00
|
|
|
|
///
|
|
|
|
|
/// If another user of this mutex panicked while holding the mutex, then
|
|
|
|
|
/// this call will return failure if the mutex would otherwise be
|
|
|
|
|
/// acquired.
|
2022-09-03 12:05:28 +00:00
|
|
|
|
pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
|
2022-04-14 09:11:41 +00:00
|
|
|
|
let this_thread = current_thread_unique_ptr();
|
2022-09-03 12:05:28 +00:00
|
|
|
|
// Safety: We only touch lock_count when we own the lock.
|
2022-04-14 09:11:41 +00:00
|
|
|
|
unsafe {
|
|
|
|
|
if self.owner.load(Relaxed) == this_thread {
|
|
|
|
|
self.increment_lock_count();
|
|
|
|
|
} else {
|
2022-09-03 12:05:28 +00:00
|
|
|
|
self.mutex.raw_lock();
|
2022-04-14 09:11:41 +00:00
|
|
|
|
self.owner.store(this_thread, Relaxed);
|
|
|
|
|
debug_assert_eq!(*self.lock_count.get(), 0);
|
|
|
|
|
*self.lock_count.get() = 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-10-10 18:20:14 +00:00
|
|
|
|
ReentrantMutexGuard { lock: self }
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Attempts to acquire this lock.
|
|
|
|
|
///
|
|
|
|
|
/// If the lock could not be acquired at this time, then `Err` is returned.
|
|
|
|
|
/// Otherwise, an RAII guard is returned.
|
|
|
|
|
///
|
|
|
|
|
/// This function does not block.
|
|
|
|
|
///
|
2016-02-02 02:41:29 +00:00
|
|
|
|
/// # Errors
|
2015-04-03 21:46:54 +00:00
|
|
|
|
///
|
|
|
|
|
/// If another user of this mutex panicked while holding the mutex, then
|
|
|
|
|
/// this call will return failure if the mutex would otherwise be
|
|
|
|
|
/// acquired.
|
2022-09-03 12:05:28 +00:00
|
|
|
|
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
|
2022-04-14 09:11:41 +00:00
|
|
|
|
let this_thread = current_thread_unique_ptr();
|
2022-09-03 12:05:28 +00:00
|
|
|
|
// Safety: We only touch lock_count when we own the lock.
|
2022-04-14 09:11:41 +00:00
|
|
|
|
unsafe {
|
|
|
|
|
if self.owner.load(Relaxed) == this_thread {
|
|
|
|
|
self.increment_lock_count();
|
|
|
|
|
Some(ReentrantMutexGuard { lock: self })
|
|
|
|
|
} else if self.mutex.try_lock() {
|
|
|
|
|
self.owner.store(this_thread, Relaxed);
|
|
|
|
|
debug_assert_eq!(*self.lock_count.get(), 0);
|
|
|
|
|
*self.lock_count.get() = 1;
|
|
|
|
|
Some(ReentrantMutexGuard { lock: self })
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2020-10-10 18:20:14 +00:00
|
|
|
|
}
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
2022-04-14 09:11:41 +00:00
|
|
|
|
|
|
|
|
|
unsafe fn increment_lock_count(&self) {
|
|
|
|
|
*self.lock_count.get() = (*self.lock_count.get())
|
|
|
|
|
.checked_add(1)
|
|
|
|
|
.expect("lock count overflow in reentrant mutex");
|
|
|
|
|
}
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-18 03:42:36 +00:00
|
|
|
|
impl<T> Deref for ReentrantMutexGuard<'_, T> {
|
2015-04-03 21:46:54 +00:00
|
|
|
|
type Target = T;
|
|
|
|
|
|
2015-09-07 22:36:29 +00:00
|
|
|
|
fn deref(&self) -> &T {
|
2020-09-24 18:50:09 +00:00
|
|
|
|
&self.lock.data
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-18 03:42:36 +00:00
|
|
|
|
impl<T> Drop for ReentrantMutexGuard<'_, T> {
|
2015-04-03 21:46:54 +00:00
|
|
|
|
#[inline]
|
|
|
|
|
fn drop(&mut self) {
|
2022-09-03 12:05:28 +00:00
|
|
|
|
// Safety: We own the lock.
|
2015-04-03 21:46:54 +00:00
|
|
|
|
unsafe {
|
2022-04-14 09:11:41 +00:00
|
|
|
|
*self.lock.lock_count.get() -= 1;
|
|
|
|
|
if *self.lock.lock_count.get() == 0 {
|
|
|
|
|
self.lock.owner.store(0, Relaxed);
|
2022-09-03 12:05:28 +00:00
|
|
|
|
self.lock.mutex.raw_unlock();
|
2022-04-14 09:11:41 +00:00
|
|
|
|
}
|
2015-04-03 21:46:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-04-14 09:11:41 +00:00
|
|
|
|
|
|
|
|
|
/// Get an address that is unique per running thread.
|
|
|
|
|
///
|
|
|
|
|
/// This can be used as a non-null usize-sized ID.
|
|
|
|
|
pub fn current_thread_unique_ptr() -> usize {
|
|
|
|
|
// Use a non-drop type to make sure it's still available during thread destruction.
|
|
|
|
|
thread_local! { static X: u8 = const { 0 } }
|
|
|
|
|
X.with(|x| <*const _>::addr(x))
|
|
|
|
|
}
|