2023-03-29 23:57:44 +00:00
|
|
|
//! This module defines various operations and types that are implemented in
|
|
|
|
//! one way for the serial compiler, and another way the parallel compiler.
|
2017-12-03 12:49:01 +00:00
|
|
|
//!
|
2023-03-29 23:57:44 +00:00
|
|
|
//! Operations
|
|
|
|
//! ----------
|
|
|
|
//! The parallel versions of operations use Rayon to execute code in parallel,
|
|
|
|
//! while the serial versions degenerate straightforwardly to serial execution.
|
|
|
|
//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
|
2017-12-03 12:49:01 +00:00
|
|
|
//!
|
2023-03-29 23:57:44 +00:00
|
|
|
//! Types
|
|
|
|
//! -----
|
|
|
|
//! The parallel versions of types provide various kinds of synchronization,
|
|
|
|
//! while the serial compiler versions do not.
|
2017-12-03 12:49:01 +00:00
|
|
|
//!
|
2023-03-29 23:57:44 +00:00
|
|
|
//! The following table shows how the types are implemented internally. Except
|
|
|
|
//! where noted otherwise, the type in column one is defined as a
|
|
|
|
//! newtype around the type from column two or three.
|
2017-12-03 12:49:01 +00:00
|
|
|
//!
|
2023-03-29 23:57:44 +00:00
|
|
|
//! | Type | Serial version | Parallel version |
|
|
|
|
//! | ----------------------- | ------------------- | ------------------------------- |
|
|
|
|
//! | `Lrc<T>` | `rc::Rc<T>` | `sync::Arc<T>` |
|
|
|
|
//! |` Weak<T>` | `rc::Weak<T>` | `sync::Weak<T>` |
|
|
|
|
//! | | | |
|
|
|
|
//! | `AtomicBool` | `Cell<bool>` | `atomic::AtomicBool` |
|
|
|
|
//! | `AtomicU32` | `Cell<u32>` | `atomic::AtomicU32` |
|
|
|
|
//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
|
|
|
|
//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
|
|
|
|
//! | | | |
|
2020-10-31 02:14:32 +00:00
|
|
|
//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or |
|
|
|
|
//! | | | `parking_lot::Mutex<T>` |
|
2023-03-29 23:57:44 +00:00
|
|
|
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
|
|
|
|
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
|
|
|
|
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
|
|
|
|
//! | | | |
|
|
|
|
//! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` |
|
2018-06-08 15:48:31 +00:00
|
|
|
//!
|
2023-03-29 23:57:44 +00:00
|
|
|
//! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
|
|
|
|
//! of a `RefCell`. This is appropriate when interior mutability is not
|
|
|
|
//! required.
|
|
|
|
//!
|
|
|
|
//! [^2] `MTLockRef` is a typedef.
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
pub use crate::marker::*;
|
2023-08-24 00:52:16 +00:00
|
|
|
use parking_lot::Mutex;
|
2020-01-03 00:51:30 +00:00
|
|
|
use std::any::Any;
|
2018-04-01 08:25:16 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::hash::{BuildHasher, Hash};
|
2018-04-01 07:43:19 +00:00
|
|
|
use std::ops::{Deref, DerefMut};
|
2022-06-27 08:39:10 +00:00
|
|
|
use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2020-10-31 02:14:32 +00:00
|
|
|
mod lock;
|
|
|
|
pub use lock::{Lock, LockGuard};
|
|
|
|
|
2020-10-31 11:01:54 +00:00
|
|
|
mod worker_local;
|
|
|
|
pub use worker_local::{Registry, WorkerLocal};
|
|
|
|
|
2018-12-05 15:51:58 +00:00
|
|
|
pub use std::sync::atomic::Ordering;
|
|
|
|
pub use std::sync::atomic::Ordering::SeqCst;
|
|
|
|
|
2023-03-14 11:51:00 +00:00
|
|
|
pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
|
2023-02-21 08:37:10 +00:00
|
|
|
|
|
|
|
mod vec;
|
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
mod mode {
|
|
|
|
use super::Ordering;
|
|
|
|
use std::sync::atomic::AtomicU8;
|
|
|
|
|
|
|
|
const UNINITIALIZED: u8 = 0;
|
2023-04-07 13:20:26 +00:00
|
|
|
const DYN_NOT_THREAD_SAFE: u8 = 1;
|
|
|
|
const DYN_THREAD_SAFE: u8 = 2;
|
2023-03-03 02:14:57 +00:00
|
|
|
|
2023-04-07 13:20:26 +00:00
|
|
|
static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
|
2023-03-03 02:14:57 +00:00
|
|
|
|
2023-04-07 13:20:26 +00:00
|
|
|
// Whether thread safety is enabled (due to running under multiple threads).
|
2023-03-03 02:14:57 +00:00
|
|
|
#[inline]
|
2023-04-04 08:26:00 +00:00
|
|
|
pub fn is_dyn_thread_safe() -> bool {
|
2023-04-07 13:20:26 +00:00
|
|
|
match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
|
|
|
|
DYN_NOT_THREAD_SAFE => false,
|
|
|
|
DYN_THREAD_SAFE => true,
|
|
|
|
_ => panic!("uninitialized dyn_thread_safe mode!"),
|
2023-03-03 02:14:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 02:14:32 +00:00
|
|
|
// Whether thread safety might be enabled.
|
|
|
|
#[inline]
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
pub fn might_be_dyn_thread_safe() -> bool {
|
|
|
|
DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
|
|
|
|
}
|
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
// Only set by the `-Z threads` compile option
|
2023-04-07 13:20:26 +00:00
|
|
|
pub fn set_dyn_thread_safe_mode(mode: bool) {
|
|
|
|
let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
|
|
|
|
let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
|
2023-04-04 08:26:00 +00:00
|
|
|
UNINITIALIZED,
|
|
|
|
set,
|
|
|
|
Ordering::Relaxed,
|
|
|
|
Ordering::Relaxed,
|
|
|
|
);
|
2023-03-03 02:14:57 +00:00
|
|
|
|
|
|
|
// Check that the mode was either uninitialized or was already set to the requested mode.
|
|
|
|
assert!(previous.is_ok() || previous == Err(set));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-04 08:26:00 +00:00
|
|
|
pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
|
2023-04-10 14:33:15 +00:00
|
|
|
|
2020-01-03 00:51:30 +00:00
|
|
|
/// A guard used to hold panics that occur during a parallel section to later by unwound.
|
|
|
|
/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
|
|
|
|
/// hiding errors by ensuring that everything in the section has completed executing before
|
|
|
|
/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
|
|
|
|
/// output match the parallel compiler for testing purposes.
|
|
|
|
pub struct ParallelGuard {
|
2023-08-24 00:52:16 +00:00
|
|
|
panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>,
|
2020-01-03 00:51:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ParallelGuard {
|
|
|
|
pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
|
|
|
|
catch_unwind(AssertUnwindSafe(f))
|
|
|
|
.map_err(|err| {
|
|
|
|
*self.panic.lock() = Some(err);
|
|
|
|
})
|
|
|
|
.ok()
|
|
|
|
}
|
2023-08-25 20:16:21 +00:00
|
|
|
}
|
2020-01-03 00:51:30 +00:00
|
|
|
|
2023-08-25 20:16:21 +00:00
|
|
|
/// This gives access to a fresh parallel guard in the closure and will unwind any panics
|
|
|
|
/// caught in it after the closure returns.
|
|
|
|
#[inline]
|
|
|
|
pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
|
|
|
|
let guard = ParallelGuard { panic: Mutex::new(None) };
|
|
|
|
let ret = f(&guard);
|
|
|
|
if let Some(panic) = guard.panic.into_inner() {
|
|
|
|
resume_unwind(panic);
|
2020-01-03 00:51:30 +00:00
|
|
|
}
|
2023-08-25 20:16:21 +00:00
|
|
|
ret
|
2020-01-03 00:51:30 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 12:49:01 +00:00
|
|
|
cfg_if! {
|
2019-01-28 14:51:47 +00:00
|
|
|
if #[cfg(not(parallel_compiler))] {
|
2020-10-31 02:14:32 +00:00
|
|
|
use std::ops::Add;
|
|
|
|
use std::cell::Cell;
|
|
|
|
|
2023-04-05 13:25:04 +00:00
|
|
|
pub unsafe auto trait Send {}
|
|
|
|
pub unsafe auto trait Sync {}
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2023-04-05 13:25:04 +00:00
|
|
|
unsafe impl<T> Send for T {}
|
|
|
|
unsafe impl<T> Sync for T {}
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2019-03-15 11:17:11 +00:00
|
|
|
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
|
2021-03-31 16:29:34 +00:00
|
|
|
/// It has explicit ordering arguments and is only intended for use with
|
|
|
|
/// the native atomic types.
|
2019-03-15 11:17:11 +00:00
|
|
|
/// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
|
|
|
|
/// as it's not intended to be used separately.
|
2022-08-30 00:30:25 +00:00
|
|
|
#[derive(Debug, Default)]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub struct Atomic<T: Copy>(Cell<T>);
|
|
|
|
|
|
|
|
impl<T: Copy> Atomic<T> {
|
2018-12-22 17:03:40 +00:00
|
|
|
#[inline]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub fn new(v: T) -> Self {
|
|
|
|
Atomic(Cell::new(v))
|
|
|
|
}
|
|
|
|
|
2021-03-31 16:29:34 +00:00
|
|
|
#[inline]
|
|
|
|
pub fn into_inner(self) -> T {
|
|
|
|
self.0.into_inner()
|
|
|
|
}
|
|
|
|
|
2018-12-22 17:03:40 +00:00
|
|
|
#[inline]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub fn load(&self, _: Ordering) -> T {
|
|
|
|
self.0.get()
|
|
|
|
}
|
|
|
|
|
2018-12-22 17:03:40 +00:00
|
|
|
#[inline]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub fn store(&self, val: T, _: Ordering) {
|
|
|
|
self.0.set(val)
|
|
|
|
}
|
2021-03-31 16:29:34 +00:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn swap(&self, val: T, _: Ordering) -> T {
|
|
|
|
self.0.replace(val)
|
|
|
|
}
|
2019-03-15 11:17:11 +00:00
|
|
|
}
|
2018-12-05 15:51:58 +00:00
|
|
|
|
2023-03-15 16:25:28 +00:00
|
|
|
impl Atomic<bool> {
|
|
|
|
pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
|
2023-05-25 22:18:05 +00:00
|
|
|
let old = self.0.get();
|
|
|
|
self.0.set(val | old);
|
|
|
|
old
|
2023-03-15 16:25:28 +00:00
|
|
|
}
|
2023-05-09 20:46:54 +00:00
|
|
|
pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
|
2023-05-25 22:18:05 +00:00
|
|
|
let old = self.0.get();
|
|
|
|
self.0.set(val & old);
|
|
|
|
old
|
2023-05-08 22:12:45 +00:00
|
|
|
}
|
2023-03-15 16:25:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 11:17:11 +00:00
|
|
|
impl<T: Copy + PartialEq> Atomic<T> {
|
|
|
|
#[inline]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub fn compare_exchange(&self,
|
|
|
|
current: T,
|
|
|
|
new: T,
|
|
|
|
_: Ordering,
|
|
|
|
_: Ordering)
|
|
|
|
-> Result<T, T> {
|
|
|
|
let read = self.0.get();
|
|
|
|
if read == current {
|
|
|
|
self.0.set(new);
|
|
|
|
Ok(read)
|
|
|
|
} else {
|
|
|
|
Err(read)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: Add<Output=T> + Copy> Atomic<T> {
|
2019-03-15 11:17:11 +00:00
|
|
|
#[inline]
|
2018-12-05 15:51:58 +00:00
|
|
|
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
|
|
|
|
let old = self.0.get();
|
|
|
|
self.0.set(old + val);
|
|
|
|
old
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub type AtomicUsize = Atomic<usize>;
|
|
|
|
pub type AtomicBool = Atomic<bool>;
|
2018-12-22 17:03:40 +00:00
|
|
|
pub type AtomicU32 = Atomic<u32>;
|
2018-12-05 15:51:58 +00:00
|
|
|
pub type AtomicU64 = Atomic<u64>;
|
|
|
|
|
2019-10-14 20:49:47 +00:00
|
|
|
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
|
|
|
|
where A: FnOnce() -> RA,
|
|
|
|
B: FnOnce() -> RB
|
|
|
|
{
|
2023-08-25 20:16:21 +00:00
|
|
|
let (a, b) = parallel_guard(|guard| {
|
|
|
|
let a = guard.run(oper_a);
|
|
|
|
let b = guard.run(oper_b);
|
|
|
|
(a, b)
|
|
|
|
});
|
2020-01-03 00:51:30 +00:00
|
|
|
(a.unwrap(), b.unwrap())
|
2019-10-14 20:49:47 +00:00
|
|
|
}
|
|
|
|
|
2019-01-29 15:47:30 +00:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! parallel {
|
2023-08-19 15:26:54 +00:00
|
|
|
($($blocks:block),*) => {{
|
2023-08-25 20:16:21 +00:00
|
|
|
$crate::sync::parallel_guard(|guard| {
|
|
|
|
$(guard.run(|| $blocks);)*
|
|
|
|
});
|
2023-08-19 15:26:54 +00:00
|
|
|
}}
|
2018-04-25 22:50:33 +00:00
|
|
|
}
|
|
|
|
|
2022-07-19 09:00:51 +00:00
|
|
|
pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
|
2023-08-25 20:16:21 +00:00
|
|
|
parallel_guard(|guard| {
|
|
|
|
t.into_iter().for_each(|i| {
|
|
|
|
guard.run(|| for_each(i));
|
|
|
|
});
|
|
|
|
})
|
2019-02-24 21:37:55 +00:00
|
|
|
}
|
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
|
|
|
|
t: T,
|
|
|
|
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
|
|
|
|
) -> C {
|
2023-08-25 20:16:21 +00:00
|
|
|
parallel_guard(|guard| {
|
|
|
|
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
|
|
|
|
})
|
2023-03-03 02:14:57 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 12:49:01 +00:00
|
|
|
pub use std::rc::Rc as Lrc;
|
2018-04-25 22:50:33 +00:00
|
|
|
pub use std::rc::Weak as Weak;
|
2017-12-03 12:49:01 +00:00
|
|
|
pub use std::cell::Ref as ReadGuard;
|
2018-08-04 22:24:39 +00:00
|
|
|
pub use std::cell::Ref as MappedReadGuard;
|
2017-12-03 12:49:01 +00:00
|
|
|
pub use std::cell::RefMut as WriteGuard;
|
2018-08-04 22:24:39 +00:00
|
|
|
pub use std::cell::RefMut as MappedWriteGuard;
|
|
|
|
pub use std::cell::RefMut as MappedLockGuard;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2022-06-16 15:41:40 +00:00
|
|
|
pub use std::cell::OnceCell;
|
2020-05-16 04:43:06 +00:00
|
|
|
|
2018-03-08 03:50:43 +00:00
|
|
|
use std::cell::RefCell as InnerRwLock;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2023-03-29 23:57:44 +00:00
|
|
|
pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
|
2018-06-08 15:48:31 +00:00
|
|
|
|
2018-10-16 14:57:53 +00:00
|
|
|
#[derive(Debug, Default)]
|
2017-12-03 12:49:01 +00:00
|
|
|
pub struct MTLock<T>(T);
|
|
|
|
|
|
|
|
impl<T> MTLock<T> {
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn new(inner: T) -> Self {
|
|
|
|
MTLock(inner)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn into_inner(self) -> T {
|
|
|
|
self.0
|
|
|
|
}
|
|
|
|
|
2021-03-31 16:29:34 +00:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn get_mut(&mut self) -> &mut T {
|
|
|
|
&mut self.0
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn lock(&self) -> &T {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
|
2017-12-03 12:49:01 +00:00
|
|
|
#[inline(always)]
|
2018-06-08 15:48:31 +00:00
|
|
|
pub fn lock_mut(&mut self) -> &mut T {
|
|
|
|
&mut self.0
|
2017-12-03 12:49:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Probably a bad idea (in the threaded case)
|
|
|
|
impl<T: Clone> Clone for MTLock<T> {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Self {
|
|
|
|
MTLock(self.0.clone())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pub use std::marker::Send as Send;
|
|
|
|
pub use std::marker::Sync as Sync;
|
|
|
|
|
|
|
|
pub use parking_lot::RwLockReadGuard as ReadGuard;
|
2018-08-04 22:24:39 +00:00
|
|
|
pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
|
2017-12-03 12:49:01 +00:00
|
|
|
pub use parking_lot::RwLockWriteGuard as WriteGuard;
|
2018-08-04 22:24:39 +00:00
|
|
|
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2018-08-04 22:24:39 +00:00
|
|
|
pub use parking_lot::MappedMutexGuard as MappedLockGuard;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2022-06-16 15:39:39 +00:00
|
|
|
pub use std::sync::OnceLock as OnceCell;
|
2020-05-16 04:43:06 +00:00
|
|
|
|
2019-12-17 21:28:33 +00:00
|
|
|
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
|
2018-12-05 15:51:58 +00:00
|
|
|
|
2017-12-03 12:49:01 +00:00
|
|
|
pub use std::sync::Arc as Lrc;
|
2018-04-25 22:50:33 +00:00
|
|
|
pub use std::sync::Weak as Weak;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2023-03-29 23:57:44 +00:00
|
|
|
pub type MTLockRef<'a, T> = &'a MTLock<T>;
|
2018-06-08 15:48:31 +00:00
|
|
|
|
2018-10-16 14:57:53 +00:00
|
|
|
#[derive(Debug, Default)]
|
2018-06-08 15:48:31 +00:00
|
|
|
pub struct MTLock<T>(Lock<T>);
|
|
|
|
|
|
|
|
impl<T> MTLock<T> {
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn new(inner: T) -> Self {
|
|
|
|
MTLock(Lock::new(inner))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn into_inner(self) -> T {
|
|
|
|
self.0.into_inner()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn get_mut(&mut self) -> &mut T {
|
|
|
|
self.0.get_mut()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn lock(&self) -> LockGuard<'_, T> {
|
2018-06-08 15:48:31 +00:00
|
|
|
self.0.lock()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn lock_mut(&self) -> LockGuard<'_, T> {
|
2018-06-08 15:48:31 +00:00
|
|
|
self.lock()
|
|
|
|
}
|
|
|
|
}
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2018-03-08 03:50:43 +00:00
|
|
|
use parking_lot::RwLock as InnerRwLock;
|
2017-12-03 12:49:01 +00:00
|
|
|
|
2018-04-01 07:43:19 +00:00
|
|
|
use std::thread;
|
2023-03-03 02:14:57 +00:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
|
|
|
|
where
|
|
|
|
A: FnOnce() -> RA + DynSend,
|
|
|
|
B: FnOnce() -> RB + DynSend,
|
|
|
|
{
|
2023-04-04 08:26:00 +00:00
|
|
|
if mode::is_dyn_thread_safe() {
|
2023-03-03 02:14:57 +00:00
|
|
|
let oper_a = FromDyn::from(oper_a);
|
|
|
|
let oper_b = FromDyn::from(oper_b);
|
|
|
|
let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
|
|
|
|
(a.into_inner(), b.into_inner())
|
|
|
|
} else {
|
2023-08-25 20:16:21 +00:00
|
|
|
let (a, b) = parallel_guard(|guard| {
|
|
|
|
let a = guard.run(oper_a);
|
|
|
|
let b = guard.run(oper_b);
|
|
|
|
(a, b)
|
|
|
|
});
|
2020-01-03 00:51:30 +00:00
|
|
|
(a.unwrap(), b.unwrap())
|
2023-03-03 02:14:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-04 08:26:00 +00:00
|
|
|
// This function only works when `mode::is_dyn_thread_safe()`.
|
2023-03-03 02:14:57 +00:00
|
|
|
pub fn scope<'scope, OP, R>(op: OP) -> R
|
|
|
|
where
|
|
|
|
OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
|
|
|
|
R: DynSend,
|
|
|
|
{
|
|
|
|
let op = FromDyn::from(op);
|
|
|
|
rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
|
|
|
|
}
|
2018-04-25 22:50:33 +00:00
|
|
|
|
2019-03-06 03:46:46 +00:00
|
|
|
/// Runs a list of blocks in parallel. The first block is executed immediately on
|
|
|
|
/// the current thread. Use that for the longest running block.
|
2019-01-29 15:47:30 +00:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! parallel {
|
2023-03-13 02:04:56 +00:00
|
|
|
(impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
|
|
|
|
parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
|
2019-01-29 15:47:30 +00:00
|
|
|
};
|
2023-03-13 02:04:56 +00:00
|
|
|
(impl $fblock:block [$($blocks:expr,)*] []) => {
|
|
|
|
::rustc_data_structures::sync::scope(|s| {
|
|
|
|
$(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
|
|
|
|
s.spawn(move |_| block.into_inner()());)*
|
|
|
|
(|| $fblock)();
|
|
|
|
});
|
2023-03-03 02:14:57 +00:00
|
|
|
};
|
|
|
|
($fblock:block, $($blocks:block),*) => {
|
2023-04-04 08:26:00 +00:00
|
|
|
if rustc_data_structures::sync::is_dyn_thread_safe() {
|
2023-03-03 02:14:57 +00:00
|
|
|
// Reverse the order of the later blocks since Rayon executes them in reverse order
|
|
|
|
// when using a single thread. This ensures the execution order matches that
|
2023-04-07 13:20:26 +00:00
|
|
|
// of a single threaded rustc.
|
2023-03-13 02:04:56 +00:00
|
|
|
parallel!(impl $fblock [] [$($blocks),*]);
|
2023-03-03 02:14:57 +00:00
|
|
|
} else {
|
2023-08-25 20:16:21 +00:00
|
|
|
$crate::sync::parallel_guard(|guard| {
|
|
|
|
guard.run(|| $fblock);
|
|
|
|
$(guard.run(|| $blocks);)*
|
|
|
|
});
|
2023-03-03 02:14:57 +00:00
|
|
|
}
|
2019-01-29 15:47:30 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
|
2018-04-25 22:50:33 +00:00
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
|
|
|
|
t: T,
|
|
|
|
for_each: impl Fn(I) + DynSync + DynSend
|
|
|
|
) {
|
2023-08-25 20:16:21 +00:00
|
|
|
parallel_guard(|guard| {
|
|
|
|
if mode::is_dyn_thread_safe() {
|
|
|
|
let for_each = FromDyn::from(for_each);
|
|
|
|
t.into_par_iter().for_each(|i| {
|
|
|
|
guard.run(|| for_each(i));
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
t.into_iter().for_each(|i| {
|
|
|
|
guard.run(|| for_each(i));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
});
|
2018-04-25 22:50:33 +00:00
|
|
|
}
|
2018-04-01 07:43:19 +00:00
|
|
|
|
2023-03-03 02:14:57 +00:00
|
|
|
pub fn par_map<
|
|
|
|
I,
|
|
|
|
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
|
|
|
|
R: std::marker::Send,
|
|
|
|
C: FromIterator<R> + FromParallelIterator<R>
|
|
|
|
>(
|
2019-02-24 21:37:55 +00:00
|
|
|
t: T,
|
2023-03-03 02:14:57 +00:00
|
|
|
map: impl Fn(I) -> R + DynSync + DynSend
|
|
|
|
) -> C {
|
2023-08-25 20:16:21 +00:00
|
|
|
parallel_guard(|guard| {
|
|
|
|
if mode::is_dyn_thread_safe() {
|
|
|
|
let map = FromDyn::from(map);
|
|
|
|
t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
|
|
|
|
} else {
|
|
|
|
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
|
|
|
|
}
|
|
|
|
})
|
2019-02-24 21:37:55 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 12:49:01 +00:00
|
|
|
/// This makes locks panic if they are already held.
|
|
|
|
/// It is only useful when you are running in a single thread
|
|
|
|
const ERROR_CHECKING: bool = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 11:01:54 +00:00
|
|
|
#[derive(Default)]
|
|
|
|
#[cfg_attr(parallel_compiler, repr(align(64)))]
|
|
|
|
pub struct CacheAligned<T>(pub T);
|
|
|
|
|
2018-04-01 08:25:16 +00:00
|
|
|
pub trait HashMapExt<K, V> {
|
|
|
|
/// Same as HashMap::insert, but it may panic if there's already an
|
|
|
|
/// entry for `key` with a value not equal to `value`
|
|
|
|
fn insert_same(&mut self, key: K, value: V);
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
|
|
|
|
fn insert_same(&mut self, key: K, value: V) {
|
|
|
|
self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 00:00:00 +00:00
|
|
|
#[derive(Debug, Default)]
|
2018-03-08 03:50:43 +00:00
|
|
|
pub struct RwLock<T>(InnerRwLock<T>);
|
|
|
|
|
|
|
|
impl<T> RwLock<T> {
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn new(inner: T) -> Self {
|
|
|
|
RwLock(InnerRwLock::new(inner))
|
|
|
|
}
|
|
|
|
|
2021-03-31 16:29:34 +00:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn into_inner(self) -> T {
|
|
|
|
self.0.into_inner()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn get_mut(&mut self) -> &mut T {
|
|
|
|
self.0.get_mut()
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(not(parallel_compiler))]
|
2018-03-08 03:50:43 +00:00
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn read(&self) -> ReadGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
self.0.borrow()
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-03-08 03:50:43 +00:00
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn read(&self) -> ReadGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
if ERROR_CHECKING {
|
|
|
|
self.0.try_read().expect("lock was already held")
|
|
|
|
} else {
|
|
|
|
self.0.read()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 16:29:34 +00:00
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2021-03-31 16:29:34 +00:00
|
|
|
pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
|
|
|
|
f(&*self.read())
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(not(parallel_compiler))]
|
2018-03-26 18:52:59 +00:00
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
|
2018-03-26 18:52:59 +00:00
|
|
|
self.0.try_borrow_mut().map_err(|_| ())
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-03-26 18:52:59 +00:00
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
|
2018-03-26 18:52:59 +00:00
|
|
|
self.0.try_write().ok_or(())
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(not(parallel_compiler))]
|
2018-03-08 03:50:43 +00:00
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn write(&self) -> WriteGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
self.0.borrow_mut()
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-03-08 03:50:43 +00:00
|
|
|
#[inline(always)]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn write(&self) -> WriteGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
if ERROR_CHECKING {
|
|
|
|
self.0.try_write().expect("lock was already held")
|
|
|
|
} else {
|
|
|
|
self.0.write()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 16:29:34 +00:00
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2021-03-31 16:29:34 +00:00
|
|
|
pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
|
|
|
|
f(&mut *self.write())
|
|
|
|
}
|
|
|
|
|
2018-03-08 03:50:43 +00:00
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn borrow(&self) -> ReadGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
self.read()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
2022-11-01 17:24:51 +00:00
|
|
|
#[track_caller]
|
2019-02-08 16:36:22 +00:00
|
|
|
pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
|
2018-03-08 03:50:43 +00:00
|
|
|
self.write()
|
|
|
|
}
|
2021-07-12 20:19:25 +00:00
|
|
|
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn leak(&self) -> &T {
|
|
|
|
ReadGuard::leak(self.read())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn leak(&self) -> &T {
|
|
|
|
let guard = self.read();
|
|
|
|
let ret = unsafe { &*(&*guard as *const T) };
|
|
|
|
std::mem::forget(guard);
|
|
|
|
ret
|
|
|
|
}
|
2018-03-08 03:50:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Probably a bad idea
|
|
|
|
impl<T: Clone> Clone for RwLock<T> {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Self {
|
|
|
|
RwLock::new(self.borrow().clone())
|
|
|
|
}
|
|
|
|
}
|
2018-04-01 07:43:19 +00:00
|
|
|
|
|
|
|
/// A type which only allows its inner value to be used in one thread.
|
|
|
|
/// It will panic if it is used on multiple threads.
|
2019-10-20 04:54:53 +00:00
|
|
|
#[derive(Debug)]
|
2018-04-01 07:43:19 +00:00
|
|
|
pub struct OneThread<T> {
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-01 07:43:19 +00:00
|
|
|
thread: thread::ThreadId,
|
|
|
|
inner: T,
|
|
|
|
}
|
|
|
|
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-01 07:43:19 +00:00
|
|
|
unsafe impl<T> std::marker::Sync for OneThread<T> {}
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-01 07:43:19 +00:00
|
|
|
unsafe impl<T> std::marker::Send for OneThread<T> {}
|
|
|
|
|
|
|
|
impl<T> OneThread<T> {
|
|
|
|
#[inline(always)]
|
|
|
|
fn check(&self) {
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-01 07:43:19 +00:00
|
|
|
assert_eq!(thread::current().id(), self.thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn new(inner: T) -> Self {
|
|
|
|
OneThread {
|
2019-01-28 14:51:47 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-01 07:43:19 +00:00
|
|
|
thread: thread::current().id(),
|
|
|
|
inner,
|
|
|
|
}
|
|
|
|
}
|
2021-03-31 16:29:34 +00:00
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn into_inner(value: Self) -> T {
|
|
|
|
value.check();
|
|
|
|
value.inner
|
|
|
|
}
|
2018-04-01 07:43:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Deref for OneThread<T> {
|
|
|
|
type Target = T;
|
|
|
|
|
|
|
|
fn deref(&self) -> &T {
|
|
|
|
self.check();
|
|
|
|
&self.inner
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> DerefMut for OneThread<T> {
|
|
|
|
fn deref_mut(&mut self) -> &mut T {
|
|
|
|
self.check();
|
|
|
|
&mut self.inner
|
|
|
|
}
|
|
|
|
}
|