Auto merge of #132282 - Noratrieb:it-is-the-end-of-serial, r=cjgillot

Delete the `cfg(not(parallel))` serial compiler

Since it's inception a long time ago, the parallel compiler and its cfgs have been a maintenance burden. This was a necessary evil the allow iteration while not degrading performance because of synchronization overhead.

But this time is over. Thanks to the amazing work by the parallel working group (and the dyn sync crimes), the parallel compiler has now been fast enough to be shipped by default in nightly for quite a while now.
Stable and beta have still been on the serial compiler, because they can't use `-Zthreads` anyways.
But this is quite suboptimal:
- the maintenance burden still sucks
- we're not testing the serial compiler in nightly

Because of these reasons, it's time to end it. The serial compiler has served us well in the years since it was split from the parallel one, but it's over now.

Let the knight slay one head of the two-headed dragon!

#113349

Note that the default is still 1 thread, as more than 1 thread is still fairly broken.

cc `@onur-ozkan` to see if i did the bootstrap field removal correctly, `@SparrowLii` on the sync parts
This commit is contained in:
bors 2024-11-12 15:14:56 +00:00
commit 6503543d11
42 changed files with 487 additions and 1087 deletions

View File

@ -31,5 +31,4 @@ jemalloc = ['dep:jemalloc-sys']
llvm = ['rustc_driver_impl/llvm'] llvm = ['rustc_driver_impl/llvm']
max_level_info = ['rustc_driver_impl/max_level_info'] max_level_info = ['rustc_driver_impl/max_level_info']
rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts'] rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts']
rustc_use_parallel_compiler = ['rustc_driver_impl/rustc_use_parallel_compiler']
# tidy-alphabetical-end # tidy-alphabetical-end

View File

@ -38,7 +38,6 @@ pub enum TokenTree {
} }
// Ensure all fields of `TokenTree` are `DynSend` and `DynSync`. // Ensure all fields of `TokenTree` are `DynSend` and `DynSync`.
#[cfg(parallel_compiler)]
fn _dummy() fn _dummy()
where where
Token: sync::DynSend + sync::DynSync, Token: sync::DynSend + sync::DynSync,

View File

@ -8,11 +8,6 @@ edition = "2021"
icu_list = "1.2" icu_list = "1.2"
icu_locid = "1.2" icu_locid = "1.2"
icu_locid_transform = "1.3.2" icu_locid_transform = "1.3.2"
icu_provider = "1.2" icu_provider = { version = "1.2", features = ["sync"] }
zerovec = "0.10.0" zerovec = "0.10.0"
# tidy-alphabetical-end # tidy-alphabetical-end
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ['icu_provider/sync']
# tidy-alphabetical-end

View File

@ -10,11 +10,11 @@ bitflags = "2.4.1"
either = "1.0" either = "1.0"
elsa = "=1.7.1" elsa = "=1.7.1"
ena = "0.14.3" ena = "0.14.3"
indexmap = { version = "2.4.0" } indexmap = { version = "2.4.0", features = ["rustc-rayon"] }
jobserver_crate = { version = "0.1.28", package = "jobserver" } jobserver_crate = { version = "0.1.28", package = "jobserver" }
measureme = "11" measureme = "11"
rustc-hash = "2.0.0" rustc-hash = "2.0.0"
rustc-rayon = { version = "0.5.0", optional = true } rustc-rayon = "0.5.0"
rustc-stable-hash = { version = "0.1.0", features = ["nightly"] } rustc-stable-hash = { version = "0.1.0", features = ["nightly"] }
rustc_arena = { path = "../rustc_arena" } rustc_arena = { path = "../rustc_arena" }
rustc_graphviz = { path = "../rustc_graphviz" } rustc_graphviz = { path = "../rustc_graphviz" }
@ -53,8 +53,3 @@ memmap2 = "0.2.1"
[target.'cfg(not(target_has_atomic = "64"))'.dependencies] [target.'cfg(not(target_has_atomic = "64"))'.dependencies]
portable-atomic = "1.5.1" portable-atomic = "1.5.1"
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "dep:rustc-rayon"]
# tidy-alphabetical-end

View File

@ -10,7 +10,6 @@
#![allow(internal_features)] #![allow(internal_features)]
#![allow(rustc::default_hash_types)] #![allow(rustc::default_hash_types)]
#![allow(rustc::potential_query_instability)] #![allow(rustc::potential_query_instability)]
#![cfg_attr(not(parallel_compiler), feature(cell_leak))]
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)] #![doc(rust_logo)]

View File

@ -1,194 +1,162 @@
cfg_match! { #[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \
cfg(not(parallel_compiler)) => { Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")]
pub auto trait DynSend {} // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()`
pub auto trait DynSync {} // is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a
// `Send` type in `IntoDynSyncSend` will create a `DynSend` type.
pub unsafe auto trait DynSend {}
impl<T> DynSend for T {} #[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSync`. \
impl<T> DynSync for T {} Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`")]
} // This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()`
_ => { // is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a
#[rustc_on_unimplemented( // `Sync` type in `IntoDynSyncSend` will create a `DynSync` type.
message = "`{Self}` doesn't implement `DynSend`. \ pub unsafe auto trait DynSync {}
Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`"
)]
// This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()`
// is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a
// `Send` type in `IntoDynSyncSend` will create a `DynSend` type.
pub unsafe auto trait DynSend {}
#[rustc_on_unimplemented( // Same with `Sync` and `Send`.
message = "`{Self}` doesn't implement `DynSync`. \ unsafe impl<T: DynSync + ?Sized> DynSend for &T {}
Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`"
)]
// This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()`
// is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a
// `Sync` type in `IntoDynSyncSend` will create a `DynSync` type.
pub unsafe auto trait DynSync {}
// Same with `Sync` and `Send`. macro_rules! impls_dyn_send_neg {
unsafe impl<T: DynSync + ?Sized> DynSend for &T {} ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
$(impl$(<$($generics1)*>)? !DynSend for $t1 {})*
macro_rules! impls_dyn_send_neg { };
($([$t1: ty $(where $($generics1: tt)*)?])*) => {
$(impl$(<$($generics1)*>)? !DynSend for $t1 {})*
};
}
// Consistent with `std`
impls_dyn_send_neg!(
[std::env::Args]
[std::env::ArgsOs]
[*const T where T: ?Sized]
[*mut T where T: ?Sized]
[std::ptr::NonNull<T> where T: ?Sized]
[std::rc::Rc<T> where T: ?Sized]
[std::rc::Weak<T> where T: ?Sized]
[std::sync::MutexGuard<'_, T> where T: ?Sized]
[std::sync::RwLockReadGuard<'_, T> where T: ?Sized]
[std::sync::RwLockWriteGuard<'_, T> where T: ?Sized]
[std::io::StdoutLock<'_>]
[std::io::StderrLock<'_>]
);
#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
impl !DynSend for std::env::VarsOs {}
macro_rules! already_send {
($([$ty: ty])*) => {
$(unsafe impl DynSend for $ty where $ty: Send {})*
};
}
// These structures are already `Send`.
already_send!(
[std::backtrace::Backtrace]
[std::io::Stdout]
[std::io::Stderr]
[std::io::Error]
[std::fs::File]
[rustc_arena::DroplessArena]
[crate::memmap::Mmap]
[crate::profiling::SelfProfiler]
[crate::owned_slice::OwnedSlice]
);
macro_rules! impl_dyn_send {
($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
$(unsafe impl<$($generics2)*> DynSend for $ty {})*
};
}
impl_dyn_send!(
[std::sync::atomic::AtomicPtr<T> where T]
[std::sync::Mutex<T> where T: ?Sized+ DynSend]
[std::sync::mpsc::Sender<T> where T: DynSend]
[std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
[std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
[std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
[std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
[std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
[Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
[Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
[crate::sync::RwLock<T> where T: DynSend]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
[rustc_arena::TypedArena<T> where T: DynSend]
[indexmap::IndexSet<V, S> where V: DynSend, S: DynSend]
[indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
[thin_vec::ThinVec<T> where T: DynSend]
[smallvec::SmallVec<A> where A: smallvec::Array + DynSend]
);
macro_rules! impls_dyn_sync_neg {
($([$t1: ty $(where $($generics1: tt)*)?])*) => {
$(impl$(<$($generics1)*>)? !DynSync for $t1 {})*
};
}
// Consistent with `std`
impls_dyn_sync_neg!(
[std::env::Args]
[std::env::ArgsOs]
[*const T where T: ?Sized]
[*mut T where T: ?Sized]
[std::cell::Cell<T> where T: ?Sized]
[std::cell::RefCell<T> where T: ?Sized]
[std::cell::UnsafeCell<T> where T: ?Sized]
[std::ptr::NonNull<T> where T: ?Sized]
[std::rc::Rc<T> where T: ?Sized]
[std::rc::Weak<T> where T: ?Sized]
[std::cell::OnceCell<T> where T]
[std::sync::mpsc::Receiver<T> where T]
[std::sync::mpsc::Sender<T> where T]
);
#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
impl !DynSync for std::env::VarsOs {}
macro_rules! already_sync {
($([$ty: ty])*) => {
$(unsafe impl DynSync for $ty where $ty: Sync {})*
};
}
// These structures are already `Sync`.
already_sync!(
[std::sync::atomic::AtomicBool]
[std::sync::atomic::AtomicUsize]
[std::sync::atomic::AtomicU8]
[std::sync::atomic::AtomicU32]
[std::backtrace::Backtrace]
[std::io::Error]
[std::fs::File]
[jobserver_crate::Client]
[crate::memmap::Mmap]
[crate::profiling::SelfProfiler]
[crate::owned_slice::OwnedSlice]
);
// Use portable AtomicU64 for targets without native 64-bit atomics
#[cfg(target_has_atomic = "64")]
already_sync!(
[std::sync::atomic::AtomicU64]
);
#[cfg(not(target_has_atomic = "64"))]
already_sync!(
[portable_atomic::AtomicU64]
);
macro_rules! impl_dyn_sync {
($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
$(unsafe impl<$($generics2)*> DynSync for $ty {})*
};
}
impl_dyn_sync!(
[std::sync::atomic::AtomicPtr<T> where T]
[std::sync::OnceLock<T> where T: DynSend + DynSync]
[std::sync::Mutex<T> where T: ?Sized + DynSend]
[std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
[std::sync::LazyLock<T, F> where T: DynSend + DynSync, F: DynSend]
[std::collections::HashSet<K, S> where K: DynSync, S: DynSync]
[std::collections::HashMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
[std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
[Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
[Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
[crate::sync::RwLock<T> where T: DynSend + DynSync]
[crate::sync::WorkerLocal<T> where T: DynSend]
[crate::intern::Interned<'a, T> where 'a, T: DynSync]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool]
[parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend]
[parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync]
[indexmap::IndexSet<V, S> where V: DynSync, S: DynSync]
[indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
[smallvec::SmallVec<A> where A: smallvec::Array + DynSync]
[thin_vec::ThinVec<T> where T: DynSync]
);
}
} }
// Consistent with `std`
impls_dyn_send_neg!(
[std::env::Args]
[std::env::ArgsOs]
[*const T where T: ?Sized]
[*mut T where T: ?Sized]
[std::ptr::NonNull<T> where T: ?Sized]
[std::rc::Rc<T> where T: ?Sized]
[std::rc::Weak<T> where T: ?Sized]
[std::sync::MutexGuard<'_, T> where T: ?Sized]
[std::sync::RwLockReadGuard<'_, T> where T: ?Sized]
[std::sync::RwLockWriteGuard<'_, T> where T: ?Sized]
[std::io::StdoutLock<'_>]
[std::io::StderrLock<'_>]
);
#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
impl !DynSend for std::env::VarsOs {}
macro_rules! already_send {
($([$ty: ty])*) => {
$(unsafe impl DynSend for $ty where $ty: Send {})*
};
}
// These structures are already `Send`.
already_send!(
[std::backtrace::Backtrace][std::io::Stdout][std::io::Stderr][std::io::Error][std::fs::File]
[rustc_arena::DroplessArena][crate::memmap::Mmap][crate::profiling::SelfProfiler]
[crate::owned_slice::OwnedSlice]
);
macro_rules! impl_dyn_send {
($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
$(unsafe impl<$($generics2)*> DynSend for $ty {})*
};
}
impl_dyn_send!(
[std::sync::atomic::AtomicPtr<T> where T]
[std::sync::Mutex<T> where T: ?Sized+ DynSend]
[std::sync::mpsc::Sender<T> where T: DynSend]
[std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
[std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
[std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
[std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
[std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
[Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
[Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
[crate::sync::RwLock<T> where T: DynSend]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
[rustc_arena::TypedArena<T> where T: DynSend]
[indexmap::IndexSet<V, S> where V: DynSend, S: DynSend]
[indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
[thin_vec::ThinVec<T> where T: DynSend]
[smallvec::SmallVec<A> where A: smallvec::Array + DynSend]
);
macro_rules! impls_dyn_sync_neg {
($([$t1: ty $(where $($generics1: tt)*)?])*) => {
$(impl$(<$($generics1)*>)? !DynSync for $t1 {})*
};
}
// Consistent with `std`
impls_dyn_sync_neg!(
[std::env::Args]
[std::env::ArgsOs]
[*const T where T: ?Sized]
[*mut T where T: ?Sized]
[std::cell::Cell<T> where T: ?Sized]
[std::cell::RefCell<T> where T: ?Sized]
[std::cell::UnsafeCell<T> where T: ?Sized]
[std::ptr::NonNull<T> where T: ?Sized]
[std::rc::Rc<T> where T: ?Sized]
[std::rc::Weak<T> where T: ?Sized]
[std::cell::OnceCell<T> where T]
[std::sync::mpsc::Receiver<T> where T]
[std::sync::mpsc::Sender<T> where T]
);
#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
impl !DynSync for std::env::VarsOs {}
macro_rules! already_sync {
($([$ty: ty])*) => {
$(unsafe impl DynSync for $ty where $ty: Sync {})*
};
}
// These structures are already `Sync`.
already_sync!(
[std::sync::atomic::AtomicBool][std::sync::atomic::AtomicUsize][std::sync::atomic::AtomicU8]
[std::sync::atomic::AtomicU32][std::backtrace::Backtrace][std::io::Error][std::fs::File]
[jobserver_crate::Client][crate::memmap::Mmap][crate::profiling::SelfProfiler]
[crate::owned_slice::OwnedSlice]
);
// Use portable AtomicU64 for targets without native 64-bit atomics
#[cfg(target_has_atomic = "64")]
already_sync!([std::sync::atomic::AtomicU64]);
#[cfg(not(target_has_atomic = "64"))]
already_sync!([portable_atomic::AtomicU64]);
macro_rules! impl_dyn_sync {
($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
$(unsafe impl<$($generics2)*> DynSync for $ty {})*
};
}
impl_dyn_sync!(
[std::sync::atomic::AtomicPtr<T> where T]
[std::sync::OnceLock<T> where T: DynSend + DynSync]
[std::sync::Mutex<T> where T: ?Sized + DynSend]
[std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
[std::sync::LazyLock<T, F> where T: DynSend + DynSync, F: DynSend]
[std::collections::HashSet<K, S> where K: DynSync, S: DynSync]
[std::collections::HashMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
[std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
[Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
[Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
[crate::sync::RwLock<T> where T: DynSend + DynSync]
[crate::sync::WorkerLocal<T> where T: DynSend]
[crate::intern::Interned<'a, T> where 'a, T: DynSync]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool]
[parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend]
[parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync]
[indexmap::IndexSet<V, S> where V: DynSync, S: DynSync]
[indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
[smallvec::SmallVec<A> where A: smallvec::Array + DynSync]
[thin_vec::ThinVec<T> where T: DynSync]
);
pub fn assert_dyn_sync<T: ?Sized + DynSync>() {} pub fn assert_dyn_sync<T: ?Sized + DynSync>() {}
pub fn assert_dyn_send<T: ?Sized + DynSend>() {} pub fn assert_dyn_send<T: ?Sized + DynSend>() {}
pub fn assert_dyn_send_val<T: ?Sized + DynSend>(_t: &T) {} pub fn assert_dyn_send_val<T: ?Sized + DynSend>(_t: &T) {}
@ -203,7 +171,6 @@ impl<T> FromDyn<T> {
// Check that `sync::is_dyn_thread_safe()` is true on creation so we can // Check that `sync::is_dyn_thread_safe()` is true on creation so we can
// implement `Send` and `Sync` for this structure when `T` // implement `Send` and `Sync` for this structure when `T`
// implements `DynSend` and `DynSync` respectively. // implements `DynSend` and `DynSync` respectively.
#[cfg(parallel_compiler)]
assert!(crate::sync::is_dyn_thread_safe()); assert!(crate::sync::is_dyn_thread_safe());
FromDyn(val) FromDyn(val)
} }
@ -215,11 +182,9 @@ impl<T> FromDyn<T> {
} }
// `FromDyn` is `Send` if `T` is `DynSend`, since it ensures that sync::is_dyn_thread_safe() is true. // `FromDyn` is `Send` if `T` is `DynSend`, since it ensures that sync::is_dyn_thread_safe() is true.
#[cfg(parallel_compiler)]
unsafe impl<T: DynSend> Send for FromDyn<T> {} unsafe impl<T: DynSend> Send for FromDyn<T> {}
// `FromDyn` is `Sync` if `T` is `DynSync`, since it ensures that sync::is_dyn_thread_safe() is true. // `FromDyn` is `Sync` if `T` is `DynSync`, since it ensures that sync::is_dyn_thread_safe() is true.
#[cfg(parallel_compiler)]
unsafe impl<T: DynSync> Sync for FromDyn<T> {} unsafe impl<T: DynSync> Sync for FromDyn<T> {}
impl<T> std::ops::Deref for FromDyn<T> { impl<T> std::ops::Deref for FromDyn<T> {
@ -237,9 +202,7 @@ impl<T> std::ops::Deref for FromDyn<T> {
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub struct IntoDynSyncSend<T: ?Sized>(pub T); pub struct IntoDynSyncSend<T: ?Sized>(pub T);
#[cfg(parallel_compiler)]
unsafe impl<T: ?Sized + Send> DynSend for IntoDynSyncSend<T> {} unsafe impl<T: ?Sized + Send> DynSend for IntoDynSyncSend<T> {}
#[cfg(parallel_compiler)]
unsafe impl<T: ?Sized + Sync> DynSync for IntoDynSyncSend<T> {} unsafe impl<T: ?Sized + Sync> DynSync for IntoDynSyncSend<T> {}
impl<T> std::ops::Deref for IntoDynSyncSend<T> { impl<T> std::ops::Deref for IntoDynSyncSend<T> {

View File

@ -139,11 +139,9 @@ impl Borrow<[u8]> for OwnedSlice {
} }
// Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Send` // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Send`
#[cfg(parallel_compiler)]
unsafe impl sync::Send for OwnedSlice {} unsafe impl sync::Send for OwnedSlice {}
// Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Sync` // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Sync`
#[cfg(parallel_compiler)]
unsafe impl sync::Sync for OwnedSlice {} unsafe impl sync::Sync for OwnedSlice {}
#[cfg(test)] #[cfg(test)]

View File

@ -3,27 +3,22 @@ use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::{iter, mem}; use std::{iter, mem};
#[cfg(parallel_compiler)]
use either::Either; use either::Either;
use crate::fx::{FxHashMap, FxHasher}; use crate::fx::{FxHashMap, FxHasher};
#[cfg(parallel_compiler)] use crate::sync::{CacheAligned, Lock, LockGuard, Mode, is_dyn_thread_safe};
use crate::sync::{CacheAligned, is_dyn_thread_safe};
use crate::sync::{Lock, LockGuard, Mode};
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
// may also affect the ideal number of shards. // may also affect the ideal number of shards.
const SHARD_BITS: usize = 5; const SHARD_BITS: usize = 5;
#[cfg(parallel_compiler)]
const SHARDS: usize = 1 << SHARD_BITS; const SHARDS: usize = 1 << SHARD_BITS;
/// An array of cache-line aligned inner locked structures with convenience methods. /// An array of cache-line aligned inner locked structures with convenience methods.
/// A single field is used when the compiler uses only one thread. /// A single field is used when the compiler uses only one thread.
pub enum Sharded<T> { pub enum Sharded<T> {
Single(Lock<T>), Single(Lock<T>),
#[cfg(parallel_compiler)]
Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>), Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>),
} }
@ -37,7 +32,6 @@ impl<T: Default> Default for Sharded<T> {
impl<T> Sharded<T> { impl<T> Sharded<T> {
#[inline] #[inline]
pub fn new(mut value: impl FnMut() -> T) -> Self { pub fn new(mut value: impl FnMut() -> T) -> Self {
#[cfg(parallel_compiler)]
if is_dyn_thread_safe() { if is_dyn_thread_safe() {
return Sharded::Shards(Box::new( return Sharded::Shards(Box::new(
[(); SHARDS].map(|()| CacheAligned(Lock::new(value()))), [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
@ -52,7 +46,6 @@ impl<T> Sharded<T> {
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
match self { match self {
Self::Single(single) => single, Self::Single(single) => single,
#[cfg(parallel_compiler)]
Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
} }
} }
@ -66,7 +59,6 @@ impl<T> Sharded<T> {
pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
match self { match self {
Self::Single(single) => single, Self::Single(single) => single,
#[cfg(parallel_compiler)]
Self::Shards(shards) => { Self::Shards(shards) => {
// SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
@ -87,7 +79,6 @@ impl<T> Sharded<T> {
// `might_be_dyn_thread_safe` was also false. // `might_be_dyn_thread_safe` was also false.
unsafe { single.lock_assume(Mode::NoSync) } unsafe { single.lock_assume(Mode::NoSync) }
} }
#[cfg(parallel_compiler)]
Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
} }
} }
@ -110,7 +101,6 @@ impl<T> Sharded<T> {
// `might_be_dyn_thread_safe` was also false. // `might_be_dyn_thread_safe` was also false.
unsafe { single.lock_assume(Mode::NoSync) } unsafe { single.lock_assume(Mode::NoSync) }
} }
#[cfg(parallel_compiler)]
Self::Shards(shards) => { Self::Shards(shards) => {
// Synchronization is enabled so use the `lock_assume_sync` method optimized // Synchronization is enabled so use the `lock_assume_sync` method optimized
// for that case. // for that case.
@ -127,11 +117,7 @@ impl<T> Sharded<T> {
#[inline] #[inline]
pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> { pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
match self { match self {
#[cfg(not(parallel_compiler))]
Self::Single(single) => iter::once(single.lock()),
#[cfg(parallel_compiler)]
Self::Single(single) => Either::Left(iter::once(single.lock())), Self::Single(single) => Either::Left(iter::once(single.lock())),
#[cfg(parallel_compiler)]
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())), Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
} }
} }
@ -139,11 +125,7 @@ impl<T> Sharded<T> {
#[inline] #[inline]
pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> { pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
match self { match self {
#[cfg(not(parallel_compiler))]
Self::Single(single) => iter::once(single.try_lock()),
#[cfg(parallel_compiler)]
Self::Single(single) => Either::Left(iter::once(single.try_lock())), Self::Single(single) => Either::Left(iter::once(single.try_lock())),
#[cfg(parallel_compiler)]
Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())), Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
} }
} }
@ -151,7 +133,6 @@ impl<T> Sharded<T> {
#[inline] #[inline]
pub fn shards() -> usize { pub fn shards() -> usize {
#[cfg(parallel_compiler)]
if is_dyn_thread_safe() { if is_dyn_thread_safe() {
return SHARDS; return SHARDS;
} }

View File

@ -54,9 +54,7 @@ mod worker_local;
pub use worker_local::{Registry, WorkerLocal}; pub use worker_local::{Registry, WorkerLocal};
mod parallel; mod parallel;
#[cfg(parallel_compiler)] pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in};
pub use parallel::scope;
pub use parallel::{join, par_for_each_in, par_map, parallel_guard, try_par_for_each_in};
pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
mod vec; mod vec;
@ -104,226 +102,66 @@ mod mode {
} }
} }
// FIXME(parallel_compiler): Get rid of these aliases across the compiler.
pub use std::marker::{Send, Sync};
// Use portable AtomicU64 for targets without native 64-bit atomics
#[cfg(target_has_atomic = "64")]
pub use std::sync::atomic::AtomicU64;
pub use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize};
pub use std::sync::{Arc as Lrc, OnceLock, Weak};
pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
pub use parking_lot::{
MappedMutexGuard as MappedLockGuard, MappedRwLockReadGuard as MappedReadGuard,
MappedRwLockWriteGuard as MappedWriteGuard, RwLockReadGuard as ReadGuard,
RwLockWriteGuard as WriteGuard,
};
#[cfg(not(target_has_atomic = "64"))]
pub use portable_atomic::AtomicU64;
cfg_match! { pub type LRef<'a, T> = &'a T;
cfg(not(parallel_compiler)) => {
use std::ops::Add;
use std::cell::Cell;
use std::sync::atomic::Ordering;
pub unsafe auto trait Send {} #[derive(Debug, Default)]
pub unsafe auto trait Sync {} pub struct MTLock<T>(Lock<T>);
unsafe impl<T> Send for T {} impl<T> MTLock<T> {
unsafe impl<T> Sync for T {} #[inline(always)]
pub fn new(inner: T) -> Self {
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. MTLock(Lock::new(inner))
/// It has explicit ordering arguments and is only intended for use with
/// the native atomic types.
/// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
/// as it's not intended to be used separately.
#[derive(Debug, Default)]
pub struct Atomic<T: Copy>(Cell<T>);
impl<T: Copy> Atomic<T> {
#[inline]
pub fn new(v: T) -> Self {
Atomic(Cell::new(v))
}
#[inline]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline]
pub fn load(&self, _: Ordering) -> T {
self.0.get()
}
#[inline]
pub fn store(&self, val: T, _: Ordering) {
self.0.set(val)
}
#[inline]
pub fn swap(&self, val: T, _: Ordering) -> T {
self.0.replace(val)
}
}
impl Atomic<bool> {
pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
let old = self.0.get();
self.0.set(val | old);
old
}
pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
let old = self.0.get();
self.0.set(val & old);
old
}
}
impl<T: Copy + PartialEq> Atomic<T> {
#[inline]
pub fn compare_exchange(&self,
current: T,
new: T,
_: Ordering,
_: Ordering)
-> Result<T, T> {
let read = self.0.get();
if read == current {
self.0.set(new);
Ok(read)
} else {
Err(read)
}
}
}
impl<T: Add<Output=T> + Copy> Atomic<T> {
#[inline]
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
let old = self.0.get();
self.0.set(old + val);
old
}
}
pub type AtomicUsize = Atomic<usize>;
pub type AtomicBool = Atomic<bool>;
pub type AtomicU32 = Atomic<u32>;
pub type AtomicU64 = Atomic<u64>;
pub use std::rc::Rc as Lrc;
pub use std::rc::Weak as Weak;
#[doc(no_inline)]
pub use std::cell::Ref as ReadGuard;
#[doc(no_inline)]
pub use std::cell::Ref as MappedReadGuard;
#[doc(no_inline)]
pub use std::cell::RefMut as WriteGuard;
#[doc(no_inline)]
pub use std::cell::RefMut as MappedWriteGuard;
#[doc(no_inline)]
pub use std::cell::RefMut as MappedLockGuard;
pub use std::cell::OnceCell as OnceLock;
use std::cell::RefCell as InnerRwLock;
pub type LRef<'a, T> = &'a mut T;
#[derive(Debug, Default)]
pub struct MTLock<T>(T);
impl<T> MTLock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
MTLock(inner)
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
&mut self.0
}
#[inline(always)]
pub fn lock(&self) -> &T {
&self.0
}
#[inline(always)]
pub fn lock_mut(&mut self) -> &mut T {
&mut self.0
}
}
// FIXME: Probably a bad idea (in the threaded case)
impl<T: Clone> Clone for MTLock<T> {
#[inline]
fn clone(&self) -> Self {
MTLock(self.0.clone())
}
}
} }
_ => {
pub use std::marker::Send as Send;
pub use std::marker::Sync as Sync;
pub use parking_lot::RwLockReadGuard as ReadGuard; #[inline(always)]
pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard; pub fn into_inner(self) -> T {
pub use parking_lot::RwLockWriteGuard as WriteGuard; self.0.into_inner()
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; }
pub use parking_lot::MappedMutexGuard as MappedLockGuard; #[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
pub use std::sync::OnceLock; #[inline(always)]
pub fn lock(&self) -> LockGuard<'_, T> {
self.0.lock()
}
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32}; #[inline(always)]
pub fn lock_mut(&self) -> LockGuard<'_, T> {
// Use portable AtomicU64 for targets without native 64-bit atomics self.lock()
#[cfg(target_has_atomic = "64")]
pub use std::sync::atomic::AtomicU64;
#[cfg(not(target_has_atomic = "64"))]
pub use portable_atomic::AtomicU64;
pub use std::sync::Arc as Lrc;
pub use std::sync::Weak as Weak;
pub type LRef<'a, T> = &'a T;
#[derive(Debug, Default)]
pub struct MTLock<T>(Lock<T>);
impl<T> MTLock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
MTLock(Lock::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[inline(always)]
pub fn lock(&self) -> LockGuard<'_, T> {
self.0.lock()
}
#[inline(always)]
pub fn lock_mut(&self) -> LockGuard<'_, T> {
self.lock()
}
}
use parking_lot::RwLock as InnerRwLock;
/// This makes locks panic if they are already held.
/// It is only useful when you are running in a single thread
const ERROR_CHECKING: bool = false;
} }
} }
use parking_lot::RwLock as InnerRwLock;
/// This makes locks panic if they are already held.
/// It is only useful when you are running in a single thread
const ERROR_CHECKING: bool = false;
pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>; pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>;
#[derive(Default)] #[derive(Default)]
#[cfg_attr(parallel_compiler, repr(align(64)))] #[repr(align(64))]
pub struct CacheAligned<T>(pub T); pub struct CacheAligned<T>(pub T);
pub trait HashMapExt<K, V> { pub trait HashMapExt<K, V> {
@ -357,14 +195,6 @@ impl<T> RwLock<T> {
self.0.get_mut() self.0.get_mut()
} }
#[cfg(not(parallel_compiler))]
#[inline(always)]
#[track_caller]
pub fn read(&self) -> ReadGuard<'_, T> {
self.0.borrow()
}
#[cfg(parallel_compiler)]
#[inline(always)] #[inline(always)]
pub fn read(&self) -> ReadGuard<'_, T> { pub fn read(&self) -> ReadGuard<'_, T> {
if ERROR_CHECKING { if ERROR_CHECKING {
@ -380,26 +210,11 @@ impl<T> RwLock<T> {
f(&*self.read()) f(&*self.read())
} }
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
self.0.try_borrow_mut().map_err(|_| ())
}
#[cfg(parallel_compiler)]
#[inline(always)] #[inline(always)]
pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> { pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
self.0.try_write().ok_or(()) self.0.try_write().ok_or(())
} }
#[cfg(not(parallel_compiler))]
#[inline(always)]
#[track_caller]
pub fn write(&self) -> WriteGuard<'_, T> {
self.0.borrow_mut()
}
#[cfg(parallel_compiler)]
#[inline(always)] #[inline(always)]
pub fn write(&self) -> WriteGuard<'_, T> { pub fn write(&self) -> WriteGuard<'_, T> {
if ERROR_CHECKING { if ERROR_CHECKING {
@ -427,13 +242,6 @@ impl<T> RwLock<T> {
self.write() self.write()
} }
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn leak(&self) -> &T {
ReadGuard::leak(self.read())
}
#[cfg(parallel_compiler)]
#[inline(always)] #[inline(always)]
pub fn leak(&self) -> &T { pub fn leak(&self) -> &T {
let guard = self.read(); let guard = self.read();

View File

@ -5,9 +5,7 @@ use std::ops::{Deref, DerefMut};
use std::ptr::NonNull; use std::ptr::NonNull;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard}; use crate::sync::{AtomicBool, DynSend, DynSync, ReadGuard, RwLock, WriteGuard};
#[cfg(parallel_compiler)]
use crate::sync::{DynSend, DynSync};
/// A type which allows mutation using a lock until /// A type which allows mutation using a lock until
/// the value is frozen and can be accessed lock-free. /// the value is frozen and can be accessed lock-free.
@ -22,7 +20,6 @@ pub struct FreezeLock<T> {
lock: RwLock<()>, lock: RwLock<()>,
} }
#[cfg(parallel_compiler)]
unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {} unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {}
impl<T> FreezeLock<T> { impl<T> FreezeLock<T> {

View File

@ -1,236 +1,177 @@
//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true. //! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits. //! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
//!
//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
#![allow(dead_code)] #![allow(dead_code)]
use std::fmt; use std::fmt;
#[cfg(parallel_compiler)]
pub use maybe_sync::*;
#[cfg(not(parallel_compiler))]
pub use no_sync::*;
#[derive(Clone, Copy, PartialEq)] #[derive(Clone, Copy, PartialEq)]
pub enum Mode { pub enum Mode {
NoSync, NoSync,
Sync, Sync,
} }
mod maybe_sync { use std::cell::{Cell, UnsafeCell};
use std::cell::{Cell, UnsafeCell}; use std::intrinsics::unlikely;
use std::intrinsics::unlikely; use std::marker::PhantomData;
use std::marker::PhantomData; use std::mem::ManuallyDrop;
use std::mem::ManuallyDrop; use std::ops::{Deref, DerefMut};
use std::ops::{Deref, DerefMut};
use parking_lot::RawMutex; use parking_lot::RawMutex;
use parking_lot::lock_api::RawMutex as _; use parking_lot::lock_api::RawMutex as _;
use super::Mode; use crate::sync::{DynSend, DynSync, mode};
use crate::sync::mode;
#[cfg(parallel_compiler)]
use crate::sync::{DynSend, DynSync};
/// A guard holding mutable access to a `Lock` which is in a locked state. /// A guard holding mutable access to a `Lock` which is in a locked state.
#[must_use = "if unused the Lock will immediately unlock"] #[must_use = "if unused the Lock will immediately unlock"]
pub struct LockGuard<'a, T> { pub struct LockGuard<'a, T> {
lock: &'a Lock<T>, lock: &'a Lock<T>,
marker: PhantomData<&'a mut T>, marker: PhantomData<&'a mut T>,
/// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it /// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it
/// to the original lock operation. /// to the original lock operation.
mode: Mode, mode: Mode,
}
impl<'a, T: 'a> Deref for LockGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
// SAFETY: We have shared access to the mutable access owned by this type,
// so we can give out a shared reference.
unsafe { &*self.lock.data.get() }
} }
}
impl<'a, T: 'a> Deref for LockGuard<'a, T> { impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
type Target = T; #[inline]
#[inline] fn deref_mut(&mut self) -> &mut T {
fn deref(&self) -> &T { // SAFETY: We have mutable access to the data so we can give out a mutable reference.
// SAFETY: We have shared access to the mutable access owned by this type, unsafe { &mut *self.lock.data.get() }
// so we can give out a shared reference.
unsafe { &*self.lock.data.get() }
}
} }
}
impl<'a, T: 'a> DerefMut for LockGuard<'a, T> { impl<'a, T: 'a> Drop for LockGuard<'a, T> {
#[inline] #[inline]
fn deref_mut(&mut self) -> &mut T { fn drop(&mut self) {
// SAFETY: We have mutable access to the data so we can give out a mutable reference. // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
unsafe { &mut *self.lock.data.get() } // with the `lock.mode` state. This means we access the right union fields.
} match self.mode {
} Mode::NoSync => {
let cell = unsafe { &self.lock.mode_union.no_sync };
impl<'a, T: 'a> Drop for LockGuard<'a, T> { debug_assert!(cell.get());
#[inline] cell.set(false);
fn drop(&mut self) {
// SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
// with the `lock.mode` state. This means we access the right union fields.
match self.mode {
Mode::NoSync => {
let cell = unsafe { &self.lock.mode_union.no_sync };
debug_assert!(cell.get());
cell.set(false);
}
// SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
} }
// SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
} }
} }
}
union ModeUnion { union ModeUnion {
/// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`. /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`.
no_sync: ManuallyDrop<Cell<bool>>, no_sync: ManuallyDrop<Cell<bool>>,
/// A lock implementation that's only used if `Lock.mode` is `Sync`. /// A lock implementation that's only used if `Lock.mode` is `Sync`.
sync: ManuallyDrop<RawMutex>, sync: ManuallyDrop<RawMutex>,
}
/// The value representing a locked state for the `Cell`.
const LOCKED: bool = true;
/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
pub struct Lock<T> {
/// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
/// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
/// This is set on initialization and never changed.
mode: Mode,
mode_union: ModeUnion,
data: UnsafeCell<T>,
}
impl<T> Lock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) {
// Create the lock with synchronization enabled using the `RawMutex` type.
(Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) })
} else {
// Create the lock with synchronization disabled.
(Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) })
};
Lock { mode, mode_union, data: UnsafeCell::new(inner) }
} }
/// The value representing a locked state for the `Cell`. #[inline(always)]
const LOCKED: bool = true; pub fn into_inner(self) -> T {
self.data.into_inner()
/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
pub struct Lock<T> {
/// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
/// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
/// This is set on initialization and never changed.
mode: Mode,
mode_union: ModeUnion,
data: UnsafeCell<T>,
} }
impl<T> Lock<T> { #[inline(always)]
#[inline(always)] pub fn get_mut(&mut self) -> &mut T {
pub fn new(inner: T) -> Self { self.data.get_mut()
let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) { }
// Create the lock with synchronization enabled using the `RawMutex` type.
(Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) }) #[inline(always)]
} else { pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
// Create the lock with synchronization disabled. let mode = self.mode;
(Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) }) // SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
}; match mode {
Lock { mode, mode_union, data: UnsafeCell::new(inner) } Mode::NoSync => {
let cell = unsafe { &self.mode_union.no_sync };
let was_unlocked = cell.get() != LOCKED;
if was_unlocked {
cell.set(LOCKED);
}
was_unlocked
}
Mode::Sync => unsafe { self.mode_union.sync.try_lock() },
}
.then(|| LockGuard { lock: self, marker: PhantomData, mode })
}
/// This acquires the lock assuming synchronization is in a specific mode.
///
/// Safety
/// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
/// true on lock creation.
#[inline(always)]
#[track_caller]
pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
#[inline(never)]
#[track_caller]
#[cold]
fn lock_held() -> ! {
panic!("lock was already held")
} }
#[inline(always)] // SAFETY: This is safe since the union fields are used in accordance with `mode`
pub fn into_inner(self) -> T { // which also must match `self.mode` due to the safety precondition.
self.data.into_inner() unsafe {
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.data.get_mut()
}
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
let mode = self.mode;
// SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
match mode { match mode {
Mode::NoSync => { Mode::NoSync => {
let cell = unsafe { &self.mode_union.no_sync }; if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
let was_unlocked = cell.get() != LOCKED; lock_held()
if was_unlocked {
cell.set(LOCKED);
} }
was_unlocked
} }
Mode::Sync => unsafe { self.mode_union.sync.try_lock() }, Mode::Sync => self.mode_union.sync.lock(),
} }
.then(|| LockGuard { lock: self, marker: PhantomData, mode })
}
/// This acquires the lock assuming synchronization is in a specific mode.
///
/// Safety
/// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
/// true on lock creation.
#[inline(always)]
#[track_caller]
pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
#[inline(never)]
#[track_caller]
#[cold]
fn lock_held() -> ! {
panic!("lock was already held")
}
// SAFETY: This is safe since the union fields are used in accordance with `mode`
// which also must match `self.mode` due to the safety precondition.
unsafe {
match mode {
Mode::NoSync => {
if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
lock_held()
}
}
Mode::Sync => self.mode_union.sync.lock(),
}
}
LockGuard { lock: self, marker: PhantomData, mode }
}
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
unsafe { self.lock_assume(self.mode) }
} }
LockGuard { lock: self, marker: PhantomData, mode }
} }
#[cfg(parallel_compiler)] #[inline(always)]
unsafe impl<T: DynSend> DynSend for Lock<T> {} #[track_caller]
#[cfg(parallel_compiler)] pub fn lock(&self) -> LockGuard<'_, T> {
unsafe impl<T: DynSend> DynSync for Lock<T> {} unsafe { self.lock_assume(self.mode) }
}
mod no_sync {
use std::cell::RefCell;
#[doc(no_inline)]
pub use std::cell::RefMut as LockGuard;
use super::Mode;
pub struct Lock<T>(RefCell<T>);
impl<T> Lock<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
Lock(RefCell::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
self.0.try_borrow_mut().ok()
}
#[inline(always)]
#[track_caller]
// This is unsafe to match the API for the `parallel_compiler` case.
pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
} }
} }
unsafe impl<T: DynSend> DynSend for Lock<T> {}
unsafe impl<T: DynSend> DynSync for Lock<T> {}
impl<T> Lock<T> { impl<T> Lock<T> {
#[inline(always)] #[inline(always)]
#[track_caller] #[track_caller]

View File

@ -6,14 +6,11 @@
use std::any::Any; use std::any::Any;
use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind}; use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind};
#[cfg(not(parallel_compiler))]
pub use disabled::*;
#[cfg(parallel_compiler)]
pub use enabled::*;
use parking_lot::Mutex; use parking_lot::Mutex;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
use crate::FatalErrorMarker; use crate::FatalErrorMarker;
use crate::sync::IntoDynSyncSend; use crate::sync::{DynSend, DynSync, FromDyn, IntoDynSyncSend, mode};
/// A guard used to hold panics that occur during a parallel section to later by unwound. /// A guard used to hold panics that occur during a parallel section to later by unwound.
/// This is used for the parallel compiler to prevent fatal errors from non-deterministically /// This is used for the parallel compiler to prevent fatal errors from non-deterministically
@ -49,65 +46,23 @@ pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
ret ret
} }
mod disabled { pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
use crate::sync::parallel_guard; where
A: FnOnce() -> RA,
#[macro_export] B: FnOnce() -> RB,
#[cfg(not(parallel_compiler))] {
macro_rules! parallel { let (a, b) = parallel_guard(|guard| {
($($blocks:block),*) => {{ let a = guard.run(oper_a);
$crate::sync::parallel_guard(|guard| { let b = guard.run(oper_b);
$(guard.run(|| $blocks);)* (a, b)
}); });
}} (a.unwrap(), b.unwrap())
}
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA,
B: FnOnce() -> RB,
{
let (a, b) = parallel_guard(|guard| {
let a = guard.run(oper_a);
let b = guard.run(oper_b);
(a, b)
});
(a.unwrap(), b.unwrap())
}
pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item)) {
parallel_guard(|guard| {
t.into_iter().for_each(|i| {
guard.run(|| for_each(i));
});
})
}
pub fn try_par_for_each_in<T: IntoIterator, E>(
t: T,
mut for_each: impl FnMut(T::Item) -> Result<(), E>,
) -> Result<(), E> {
parallel_guard(|guard| {
t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
})
}
pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
t: T,
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
) -> C {
parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect())
}
} }
#[cfg(parallel_compiler)] /// Runs a list of blocks in parallel. The first block is executed immediately on
mod enabled { /// the current thread. Use that for the longest running block.
use crate::sync::{DynSend, DynSync, FromDyn, mode, parallel_guard}; #[macro_export]
macro_rules! parallel {
/// Runs a list of blocks in parallel. The first block is executed immediately on
/// the current thread. Use that for the longest running block.
#[macro_export]
macro_rules! parallel {
(impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => { (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
}; };
@ -139,92 +94,89 @@ mod enabled {
}; };
} }
// This function only works when `mode::is_dyn_thread_safe()`. // This function only works when `mode::is_dyn_thread_safe()`.
pub fn scope<'scope, OP, R>(op: OP) -> R pub fn scope<'scope, OP, R>(op: OP) -> R
where where
OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
R: DynSend, R: DynSend,
{ {
let op = FromDyn::from(op); let op = FromDyn::from(op);
rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
} }
#[inline] #[inline]
pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB) pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
where where
A: FnOnce() -> RA + DynSend, A: FnOnce() -> RA + DynSend,
B: FnOnce() -> RB + DynSend, B: FnOnce() -> RB + DynSend,
{ {
if mode::is_dyn_thread_safe() { if mode::is_dyn_thread_safe() {
let oper_a = FromDyn::from(oper_a); let oper_a = FromDyn::from(oper_a);
let oper_b = FromDyn::from(oper_b); let oper_b = FromDyn::from(oper_b);
let (a, b) = parallel_guard(|guard| { let (a, b) = parallel_guard(|guard| {
rayon::join( rayon::join(
move || guard.run(move || FromDyn::from(oper_a.into_inner()())), move || guard.run(move || FromDyn::from(oper_a.into_inner()())),
move || guard.run(move || FromDyn::from(oper_b.into_inner()())), move || guard.run(move || FromDyn::from(oper_b.into_inner()())),
) )
});
(a.unwrap().into_inner(), b.unwrap().into_inner())
} else {
super::disabled::join(oper_a, oper_b)
}
}
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
t: T,
for_each: impl Fn(I) + DynSync + DynSend,
) {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let for_each = FromDyn::from(for_each);
t.into_par_iter().for_each(|i| {
guard.run(|| for_each(i));
});
} else {
t.into_iter().for_each(|i| {
guard.run(|| for_each(i));
});
}
}); });
} (a.unwrap().into_inner(), b.unwrap().into_inner())
} else {
pub fn try_par_for_each_in< serial_join(oper_a, oper_b)
T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
E: Send,
>(
t: T,
for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
) -> Result<(), E> {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let for_each = FromDyn::from(for_each);
t.into_par_iter()
.filter_map(|i| guard.run(|| for_each(i)))
.reduce(|| Ok(()), Result::and)
} else {
t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
}
})
}
pub fn par_map<
I,
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
R: std::marker::Send,
C: FromIterator<R> + FromParallelIterator<R>,
>(
t: T,
map: impl Fn(I) -> R + DynSync + DynSend,
) -> C {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let map = FromDyn::from(map);
t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
} else {
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
}
})
} }
} }
pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
t: T,
for_each: impl Fn(I) + DynSync + DynSend,
) {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let for_each = FromDyn::from(for_each);
t.into_par_iter().for_each(|i| {
guard.run(|| for_each(i));
});
} else {
t.into_iter().for_each(|i| {
guard.run(|| for_each(i));
});
}
});
}
pub fn try_par_for_each_in<
T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
E: Send,
>(
t: T,
for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
) -> Result<(), E> {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let for_each = FromDyn::from(for_each);
t.into_par_iter()
.filter_map(|i| guard.run(|| for_each(i)))
.reduce(|| Ok(()), Result::and)
} else {
t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
}
})
}
pub fn par_map<
I,
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
R: std::marker::Send,
C: FromIterator<R> + FromParallelIterator<R>,
>(
t: T,
map: impl Fn(I) -> R + DynSync + DynSend,
) -> C {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let map = FromDyn::from(map);
t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
} else {
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
}
})
}

View File

@ -4,40 +4,23 @@ use rustc_index::Idx;
#[derive(Default)] #[derive(Default)]
pub struct AppendOnlyIndexVec<I: Idx, T: Copy> { pub struct AppendOnlyIndexVec<I: Idx, T: Copy> {
#[cfg(not(parallel_compiler))]
vec: elsa::vec::FrozenVec<T>,
#[cfg(parallel_compiler)]
vec: elsa::sync::LockFreeFrozenVec<T>, vec: elsa::sync::LockFreeFrozenVec<T>,
_marker: PhantomData<fn(&I)>, _marker: PhantomData<fn(&I)>,
} }
impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> { impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self { vec: elsa::sync::LockFreeFrozenVec::new(), _marker: PhantomData }
#[cfg(not(parallel_compiler))]
vec: elsa::vec::FrozenVec::new(),
#[cfg(parallel_compiler)]
vec: elsa::sync::LockFreeFrozenVec::new(),
_marker: PhantomData,
}
} }
pub fn push(&self, val: T) -> I { pub fn push(&self, val: T) -> I {
#[cfg(not(parallel_compiler))]
let i = self.vec.len();
#[cfg(not(parallel_compiler))]
self.vec.push(val);
#[cfg(parallel_compiler)]
let i = self.vec.push(val); let i = self.vec.push(val);
I::new(i) I::new(i)
} }
pub fn get(&self, i: I) -> Option<T> { pub fn get(&self, i: I) -> Option<T> {
let i = i.index(); let i = i.index();
#[cfg(not(parallel_compiler))] self.vec.get(i)
return self.vec.get_copy(i);
#[cfg(parallel_compiler)]
return self.vec.get(i);
} }
} }

View File

@ -5,8 +5,9 @@ use std::ptr;
use std::sync::Arc; use std::sync::Arc;
use parking_lot::Mutex; use parking_lot::Mutex;
#[cfg(parallel_compiler)]
use {crate::outline, crate::sync::CacheAligned}; use crate::outline;
use crate::sync::CacheAligned;
/// A pointer to the `RegistryData` which uniquely identifies a registry. /// A pointer to the `RegistryData` which uniquely identifies a registry.
/// This identifier can be reused if the registry gets freed. /// This identifier can be reused if the registry gets freed.
@ -21,7 +22,6 @@ impl RegistryId {
/// ///
/// Note that there's a race possible where the identifier in `THREAD_DATA` could be reused /// Note that there's a race possible where the identifier in `THREAD_DATA` could be reused
/// so this can succeed from a different registry. /// so this can succeed from a different registry.
#[cfg(parallel_compiler)]
fn verify(self) -> usize { fn verify(self) -> usize {
let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get())); let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get()));
@ -102,11 +102,7 @@ impl Registry {
/// worker local value through the `Deref` impl on the registry associated with the thread it was /// worker local value through the `Deref` impl on the registry associated with the thread it was
/// created on. It will panic otherwise. /// created on. It will panic otherwise.
pub struct WorkerLocal<T> { pub struct WorkerLocal<T> {
#[cfg(not(parallel_compiler))]
local: T,
#[cfg(parallel_compiler)]
locals: Box<[CacheAligned<T>]>, locals: Box<[CacheAligned<T>]>,
#[cfg(parallel_compiler)]
registry: Registry, registry: Registry,
} }
@ -114,7 +110,6 @@ pub struct WorkerLocal<T> {
// or it will panic for threads without an associated local. So there isn't a need for `T` to do // or it will panic for threads without an associated local. So there isn't a need for `T` to do
// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id
// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse.
#[cfg(parallel_compiler)]
unsafe impl<T: Send> Sync for WorkerLocal<T> {} unsafe impl<T: Send> Sync for WorkerLocal<T> {}
impl<T> WorkerLocal<T> { impl<T> WorkerLocal<T> {
@ -122,33 +117,17 @@ impl<T> WorkerLocal<T> {
/// value this worker local should take for each thread in the registry. /// value this worker local should take for each thread in the registry.
#[inline] #[inline]
pub fn new<F: FnMut(usize) -> T>(mut initial: F) -> WorkerLocal<T> { pub fn new<F: FnMut(usize) -> T>(mut initial: F) -> WorkerLocal<T> {
#[cfg(parallel_compiler)] let registry = Registry::current();
{ WorkerLocal {
let registry = Registry::current(); locals: (0..registry.0.thread_limit.get()).map(|i| CacheAligned(initial(i))).collect(),
WorkerLocal { registry,
locals: (0..registry.0.thread_limit.get())
.map(|i| CacheAligned(initial(i)))
.collect(),
registry,
}
}
#[cfg(not(parallel_compiler))]
{
WorkerLocal { local: initial(0) }
} }
} }
/// Returns the worker-local values for each thread /// Returns the worker-local values for each thread
#[inline] #[inline]
pub fn into_inner(self) -> impl Iterator<Item = T> { pub fn into_inner(self) -> impl Iterator<Item = T> {
#[cfg(parallel_compiler)] self.locals.into_vec().into_iter().map(|local| local.0)
{
self.locals.into_vec().into_iter().map(|local| local.0)
}
#[cfg(not(parallel_compiler))]
{
std::iter::once(self.local)
}
} }
} }
@ -156,13 +135,6 @@ impl<T> Deref for WorkerLocal<T> {
type Target = T; type Target = T;
#[inline(always)] #[inline(always)]
#[cfg(not(parallel_compiler))]
fn deref(&self) -> &T {
&self.local
}
#[inline(always)]
#[cfg(parallel_compiler)]
fn deref(&self) -> &T { fn deref(&self) -> &T {
// This is safe because `verify` will only return values less than // This is safe because `verify` will only return values less than
// `self.registry.thread_limit` which is the size of the `self.locals` array. // `self.registry.thread_limit` which is the size of the `self.locals` array.

View File

@ -77,9 +77,4 @@ rustc_randomized_layouts = [
'rustc_index/rustc_randomized_layouts', 'rustc_index/rustc_randomized_layouts',
'rustc_middle/rustc_randomized_layouts' 'rustc_middle/rustc_randomized_layouts'
] ]
rustc_use_parallel_compiler = [
'rustc_data_structures/rustc_use_parallel_compiler',
'rustc_interface/rustc_use_parallel_compiler',
'rustc_middle/rustc_use_parallel_compiler'
]
# tidy-alphabetical-end # tidy-alphabetical-end

View File

@ -19,8 +19,3 @@ rustc_span = { path = "../rustc_span" }
tracing = "0.1" tracing = "0.1"
unic-langid = { version = "0.9.0", features = ["macros"] } unic-langid = { version = "0.9.0", features = ["macros"] }
# tidy-alphabetical-end # tidy-alphabetical-end
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ['rustc_baked_icu_data/rustc_use_parallel_compiler']
# tidy-alphabetical-end

View File

@ -8,12 +8,9 @@
// tidy-alphabetical-end // tidy-alphabetical-end
use std::borrow::Cow; use std::borrow::Cow;
#[cfg(not(parallel_compiler))]
use std::cell::LazyCell as Lazy;
use std::error::Error; use std::error::Error;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
#[cfg(parallel_compiler)] use std::sync::LazyLock;
use std::sync::LazyLock as Lazy;
use std::{fmt, fs, io}; use std::{fmt, fs, io};
use fluent_bundle::FluentResource; use fluent_bundle::FluentResource;
@ -21,9 +18,6 @@ pub use fluent_bundle::types::FluentType;
pub use fluent_bundle::{self, FluentArgs, FluentError, FluentValue}; pub use fluent_bundle::{self, FluentArgs, FluentError, FluentValue};
use fluent_syntax::parser::ParserError; use fluent_syntax::parser::ParserError;
use icu_provider_adapters::fallback::{LocaleFallbackProvider, LocaleFallbacker}; use icu_provider_adapters::fallback::{LocaleFallbackProvider, LocaleFallbacker};
#[cfg(not(parallel_compiler))]
use intl_memoizer::IntlLangMemoizer;
#[cfg(parallel_compiler)]
use intl_memoizer::concurrent::IntlLangMemoizer; use intl_memoizer::concurrent::IntlLangMemoizer;
use rustc_data_structures::sync::{IntoDynSyncSend, Lrc}; use rustc_data_structures::sync::{IntoDynSyncSend, Lrc};
use rustc_macros::{Decodable, Encodable}; use rustc_macros::{Decodable, Encodable};
@ -34,12 +28,6 @@ pub use unic_langid::{LanguageIdentifier, langid};
pub type FluentBundle = pub type FluentBundle =
IntoDynSyncSend<fluent_bundle::bundle::FluentBundle<FluentResource, IntlLangMemoizer>>; IntoDynSyncSend<fluent_bundle::bundle::FluentBundle<FluentResource, IntlLangMemoizer>>;
#[cfg(not(parallel_compiler))]
fn new_bundle(locales: Vec<LanguageIdentifier>) -> FluentBundle {
IntoDynSyncSend(fluent_bundle::bundle::FluentBundle::new(locales))
}
#[cfg(parallel_compiler)]
fn new_bundle(locales: Vec<LanguageIdentifier>) -> FluentBundle { fn new_bundle(locales: Vec<LanguageIdentifier>) -> FluentBundle {
IntoDynSyncSend(fluent_bundle::bundle::FluentBundle::new_concurrent(locales)) IntoDynSyncSend(fluent_bundle::bundle::FluentBundle::new_concurrent(locales))
} }
@ -217,7 +205,7 @@ fn register_functions(bundle: &mut FluentBundle) {
/// Type alias for the result of `fallback_fluent_bundle` - a reference-counted pointer to a lazily /// Type alias for the result of `fallback_fluent_bundle` - a reference-counted pointer to a lazily
/// evaluated fluent bundle. /// evaluated fluent bundle.
pub type LazyFallbackBundle = Lrc<Lazy<FluentBundle, impl FnOnce() -> FluentBundle>>; pub type LazyFallbackBundle = Lrc<LazyLock<FluentBundle, impl FnOnce() -> FluentBundle>>;
/// Return the default `FluentBundle` with standard "en-US" diagnostic messages. /// Return the default `FluentBundle` with standard "en-US" diagnostic messages.
#[instrument(level = "trace", skip(resources))] #[instrument(level = "trace", skip(resources))]
@ -225,7 +213,7 @@ pub fn fallback_fluent_bundle(
resources: Vec<&'static str>, resources: Vec<&'static str>,
with_directionality_markers: bool, with_directionality_markers: bool,
) -> LazyFallbackBundle { ) -> LazyFallbackBundle {
Lrc::new(Lazy::new(move || { Lrc::new(LazyLock::new(move || {
let mut fallback_bundle = new_bundle(vec![langid!("en-US")]); let mut fallback_bundle = new_bundle(vec![langid!("en-US")]);
register_functions(&mut fallback_bundle); register_functions(&mut fallback_bundle);
@ -548,15 +536,6 @@ pub fn fluent_value_from_str_list_sep_by_and(l: Vec<Cow<'_, str>>) -> FluentValu
Cow::Owned(result) Cow::Owned(result)
} }
#[cfg(not(parallel_compiler))]
fn as_string_threadsafe(
&self,
_intls: &intl_memoizer::concurrent::IntlLangMemoizer,
) -> Cow<'static, str> {
unreachable!("`as_string_threadsafe` is not used in non-parallel rustc")
}
#[cfg(parallel_compiler)]
fn as_string_threadsafe( fn as_string_threadsafe(
&self, &self,
intls: &intl_memoizer::concurrent::IntlLangMemoizer, intls: &intl_memoizer::concurrent::IntlLangMemoizer,

View File

@ -36,8 +36,3 @@ features = [
"Win32_Security", "Win32_Security",
"Win32_System_Threading", "Win32_System_Threading",
] ]
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ['rustc_error_messages/rustc_use_parallel_compiler']
# tidy-alphabetical-end

View File

@ -26,16 +26,11 @@ fn make_dummy(ftl: &'static str) -> Dummy {
let langid_en = langid!("en-US"); let langid_en = langid!("en-US");
#[cfg(parallel_compiler)]
let mut bundle: FluentBundle = let mut bundle: FluentBundle =
IntoDynSyncSend(crate::fluent_bundle::bundle::FluentBundle::new_concurrent(vec![ IntoDynSyncSend(crate::fluent_bundle::bundle::FluentBundle::new_concurrent(vec![
langid_en, langid_en,
])); ]));
#[cfg(not(parallel_compiler))]
let mut bundle: FluentBundle =
IntoDynSyncSend(crate::fluent_bundle::bundle::FluentBundle::new(vec![langid_en]));
bundle.add_resource(resource).expect("Failed to add FTL resources to the bundle."); bundle.add_resource(resource).expect("Failed to add FTL resources to the bundle.");
Dummy { bundle } Dummy { bundle }

View File

@ -5,8 +5,8 @@ edition = "2021"
[dependencies] [dependencies]
# tidy-alphabetical-start # tidy-alphabetical-start
rustc-rayon = { version = "0.5.0", optional = true } rustc-rayon = { version = "0.5.0" }
rustc-rayon-core = { version = "0.5.0", optional = true } rustc-rayon-core = { version = "0.5.0" }
rustc_ast = { path = "../rustc_ast" } rustc_ast = { path = "../rustc_ast" }
rustc_ast_lowering = { path = "../rustc_ast_lowering" } rustc_ast_lowering = { path = "../rustc_ast_lowering" }
rustc_ast_passes = { path = "../rustc_ast_passes" } rustc_ast_passes = { path = "../rustc_ast_passes" }
@ -54,10 +54,4 @@ tracing = "0.1"
[features] [features]
# tidy-alphabetical-start # tidy-alphabetical-start
llvm = ['dep:rustc_codegen_llvm'] llvm = ['dep:rustc_codegen_llvm']
rustc_use_parallel_compiler = [
'dep:rustc-rayon',
'dep:rustc-rayon-core',
'rustc_query_impl/rustc_use_parallel_compiler',
'rustc_errors/rustc_use_parallel_compiler'
]
# tidy-alphabetical-end # tidy-alphabetical-end

View File

@ -6,7 +6,6 @@ use std::{env, iter, thread};
use rustc_ast as ast; use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend; use rustc_codegen_ssa::traits::CodegenBackend;
#[cfg(parallel_compiler)]
use rustc_data_structures::sync; use rustc_data_structures::sync;
use rustc_metadata::{DylibError, load_symbol_from_dylib}; use rustc_metadata::{DylibError, load_symbol_from_dylib};
use rustc_middle::ty::CurrentGcx; use rustc_middle::ty::CurrentGcx;
@ -117,19 +116,6 @@ fn run_in_thread_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>(
}) })
} }
#[cfg(not(parallel_compiler))]
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>(
thread_builder_diag: &EarlyDiagCtxt,
edition: Edition,
_threads: usize,
sm_inputs: SourceMapInputs,
f: F,
) -> R {
let thread_stack_size = init_stack_size(thread_builder_diag);
run_in_thread_with_globals(thread_stack_size, edition, sm_inputs, f)
}
#[cfg(parallel_compiler)]
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>( pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>(
thread_builder_diag: &EarlyDiagCtxt, thread_builder_diag: &EarlyDiagCtxt,
edition: Edition, edition: Edition,

View File

@ -11,7 +11,7 @@ either = "1.5.0"
field-offset = "0.3.5" field-offset = "0.3.5"
gsgdt = "0.1.2" gsgdt = "0.1.2"
polonius-engine = "0.13.0" polonius-engine = "0.13.0"
rustc-rayon-core = { version = "0.5.0", optional = true } rustc-rayon-core = { version = "0.5.0" }
rustc_abi = { path = "../rustc_abi" } rustc_abi = { path = "../rustc_abi" }
rustc_apfloat = "0.2.0" rustc_apfloat = "0.2.0"
rustc_arena = { path = "../rustc_arena" } rustc_arena = { path = "../rustc_arena" }
@ -43,5 +43,4 @@ tracing = "0.1"
[features] [features]
# tidy-alphabetical-start # tidy-alphabetical-start
rustc_randomized_layouts = [] rustc_randomized_layouts = []
rustc_use_parallel_compiler = ["dep:rustc-rayon-core"]
# tidy-alphabetical-end # tidy-alphabetical-end

View File

@ -22,9 +22,9 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal; use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{self, FreezeReadGuard, Lock, Lrc, RwLock, WorkerLocal}; use rustc_data_structures::sync::{
#[cfg(parallel_compiler)] self, DynSend, DynSync, FreezeReadGuard, Lock, Lrc, RwLock, WorkerLocal,
use rustc_data_structures::sync::{DynSend, DynSync}; };
use rustc_data_structures::unord::UnordSet; use rustc_data_structures::unord::UnordSet;
use rustc_errors::{ use rustc_errors::{
Applicability, Diag, DiagCtxtHandle, ErrorGuaranteed, LintDiagnostic, MultiSpan, Applicability, Diag, DiagCtxtHandle, ErrorGuaranteed, LintDiagnostic, MultiSpan,
@ -1259,9 +1259,7 @@ pub struct TyCtxt<'tcx> {
} }
// Explicitly implement `DynSync` and `DynSend` for `TyCtxt` to short circuit trait resolution. // Explicitly implement `DynSync` and `DynSend` for `TyCtxt` to short circuit trait resolution.
#[cfg(parallel_compiler)]
unsafe impl DynSend for TyCtxt<'_> {} unsafe impl DynSend for TyCtxt<'_> {}
#[cfg(parallel_compiler)]
unsafe impl DynSync for TyCtxt<'_> {} unsafe impl DynSync for TyCtxt<'_> {}
fn _assert_tcx_fields() { fn _assert_tcx_fields() {
sync::assert_dyn_sync::<&'_ GlobalCtxt<'_>>(); sync::assert_dyn_sync::<&'_ GlobalCtxt<'_>>();
@ -1383,9 +1381,7 @@ pub struct CurrentGcx {
value: Lrc<RwLock<Option<*const ()>>>, value: Lrc<RwLock<Option<*const ()>>>,
} }
#[cfg(parallel_compiler)]
unsafe impl DynSend for CurrentGcx {} unsafe impl DynSend for CurrentGcx {}
#[cfg(parallel_compiler)]
unsafe impl DynSync for CurrentGcx {} unsafe impl DynSync for CurrentGcx {}
impl CurrentGcx { impl CurrentGcx {

View File

@ -1,5 +1,3 @@
#[cfg(not(parallel_compiler))]
use std::cell::Cell;
use std::{mem, ptr}; use std::{mem, ptr};
use rustc_data_structures::sync::{self, Lock}; use rustc_data_structures::sync::{self, Lock};
@ -50,16 +48,8 @@ impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> {
} }
// Import the thread-local variable from Rayon, which is preserved for Rayon jobs. // Import the thread-local variable from Rayon, which is preserved for Rayon jobs.
#[cfg(parallel_compiler)]
use rayon_core::tlv::TLV; use rayon_core::tlv::TLV;
// Otherwise define our own
#[cfg(not(parallel_compiler))]
thread_local! {
/// A thread local variable that stores a pointer to the current `ImplicitCtxt`.
static TLV: Cell<*const ()> = const { Cell::new(ptr::null()) };
}
#[inline] #[inline]
fn erase(context: &ImplicitCtxt<'_, '_>) -> *const () { fn erase(context: &ImplicitCtxt<'_, '_>) -> *const () {
context as *const _ as *const () context as *const _ as *const ()

View File

@ -143,12 +143,10 @@ impl<'tcx> rustc_type_ir::inherent::IntoKind for GenericArg<'tcx> {
} }
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSend for GenericArg<'tcx> where unsafe impl<'tcx> rustc_data_structures::sync::DynSend for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSend &'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSend
{ {
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSync for GenericArg<'tcx> where unsafe impl<'tcx> rustc_data_structures::sync::DynSync for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSync &'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSync
{ {

View File

@ -5,7 +5,6 @@ use std::ops::Deref;
use std::{fmt, iter, mem, ptr, slice}; use std::{fmt, iter, mem, ptr, slice};
use rustc_data_structures::aligned::{Aligned, align_of}; use rustc_data_structures::aligned::{Aligned, align_of};
#[cfg(parallel_compiler)]
use rustc_data_structures::sync::DynSync; use rustc_data_structures::sync::DynSync;
use rustc_serialize::{Encodable, Encoder}; use rustc_serialize::{Encodable, Encoder};
@ -259,7 +258,6 @@ impl<'a, H, T: Copy> IntoIterator for &'a RawList<H, T> {
unsafe impl<H: Sync, T: Sync> Sync for RawList<H, T> {} unsafe impl<H: Sync, T: Sync> Sync for RawList<H, T> {}
// We need this since `List` uses extern type `OpaqueListContents`. // We need this since `List` uses extern type `OpaqueListContents`.
#[cfg(parallel_compiler)]
unsafe impl<H: DynSync, T: DynSync> DynSync for RawList<H, T> {} unsafe impl<H: DynSync, T: DynSync> DynSync for RawList<H, T> {}
// Safety: // Safety:

View File

@ -487,12 +487,10 @@ impl<'tcx> rustc_type_ir::inherent::IntoKind for Term<'tcx> {
} }
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSend for Term<'tcx> where unsafe impl<'tcx> rustc_data_structures::sync::DynSend for Term<'tcx> where
&'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSend &'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSend
{ {
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSync for Term<'tcx> where unsafe impl<'tcx> rustc_data_structures::sync::DynSync for Term<'tcx> where
&'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSync &'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSync
{ {

View File

@ -19,8 +19,3 @@ rustc_span = { path = "../rustc_span" }
thin-vec = "0.2.12" thin-vec = "0.2.12"
tracing = "0.1" tracing = "0.1"
# tidy-alphabetical-end # tidy-alphabetical-end
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ["rustc_query_system/rustc_use_parallel_compiler"]
# tidy-alphabetical-end

View File

@ -6,7 +6,7 @@ edition = "2021"
[dependencies] [dependencies]
# tidy-alphabetical-start # tidy-alphabetical-start
parking_lot = "0.12" parking_lot = "0.12"
rustc-rayon-core = { version = "0.5.0", optional = true } rustc-rayon-core = { version = "0.5.0" }
rustc_ast = { path = "../rustc_ast" } rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" } rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" } rustc_errors = { path = "../rustc_errors" }
@ -23,8 +23,3 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
thin-vec = "0.2.12" thin-vec = "0.2.12"
tracing = "0.1" tracing = "0.1"
# tidy-alphabetical-end # tidy-alphabetical-end
[features]
# tidy-alphabetical-start
rustc_use_parallel_compiler = ["dep:rustc-rayon-core"]
# tidy-alphabetical-end

View File

@ -837,12 +837,6 @@ impl<D: Deps> DepGraphData<D> {
) -> Option<DepNodeIndex> { ) -> Option<DepNodeIndex> {
let frame = MarkFrame { index: prev_dep_node_index, parent: frame }; let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
#[cfg(not(parallel_compiler))]
{
debug_assert!(!self.dep_node_exists(dep_node));
debug_assert!(self.colors.get(prev_dep_node_index).is_none());
}
// We never try to mark eval_always nodes as green // We never try to mark eval_always nodes as green
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@ -871,13 +865,6 @@ impl<D: Deps> DepGraphData<D> {
// Maybe store a list on disk and encode this fact in the DepNodeState // Maybe store a list on disk and encode this fact in the DepNodeState
let side_effects = qcx.load_side_effects(prev_dep_node_index); let side_effects = qcx.load_side_effects(prev_dep_node_index);
#[cfg(not(parallel_compiler))]
debug_assert!(
self.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {dep_node:?}"
);
if side_effects.maybe_any() { if side_effects.maybe_any() {
qcx.dep_context().dep_graph().with_query_deserialization(|| { qcx.dep_context().dep_graph().with_query_deserialization(|| {
self.emit_side_effects(qcx, dep_node_index, side_effects) self.emit_side_effects(qcx, dep_node_index, side_effects)

View File

@ -1,21 +1,16 @@
use std::hash::Hash; use std::hash::Hash;
use std::io::Write; use std::io::Write;
use std::iter;
use std::num::NonZero; use std::num::NonZero;
use std::sync::Arc;
use rustc_data_structures::fx::FxHashMap; use parking_lot::{Condvar, Mutex};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::jobserver;
use rustc_errors::{Diag, DiagCtxtHandle}; use rustc_errors::{Diag, DiagCtxtHandle};
use rustc_hir::def::DefKind; use rustc_hir::def::DefKind;
use rustc_session::Session; use rustc_session::Session;
use rustc_span::Span; use rustc_span::{DUMMY_SP, Span};
#[cfg(parallel_compiler)]
use {
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::jobserver,
rustc_span::DUMMY_SP,
std::iter,
std::sync::Arc,
};
use crate::dep_graph::DepContext; use crate::dep_graph::DepContext;
use crate::error::CycleStack; use crate::error::CycleStack;
@ -41,17 +36,14 @@ impl QueryJobId {
map.get(&self).unwrap().query.clone() map.get(&self).unwrap().query.clone()
} }
#[cfg(parallel_compiler)]
fn span(self, map: &QueryMap) -> Span { fn span(self, map: &QueryMap) -> Span {
map.get(&self).unwrap().job.span map.get(&self).unwrap().job.span
} }
#[cfg(parallel_compiler)]
fn parent(self, map: &QueryMap) -> Option<QueryJobId> { fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent map.get(&self).unwrap().job.parent
} }
#[cfg(parallel_compiler)]
fn latch(self, map: &QueryMap) -> Option<&QueryLatch> { fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
map.get(&self).unwrap().job.latch.as_ref() map.get(&self).unwrap().job.latch.as_ref()
} }
@ -75,7 +67,6 @@ pub struct QueryJob {
pub parent: Option<QueryJobId>, pub parent: Option<QueryJobId>,
/// The latch that is used to wait on this job. /// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
latch: Option<QueryLatch>, latch: Option<QueryLatch>,
} }
@ -83,16 +74,9 @@ impl QueryJob {
/// Creates a new query job. /// Creates a new query job.
#[inline] #[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
QueryJob { QueryJob { id, span, parent, latch: None }
id,
span,
parent,
#[cfg(parallel_compiler)]
latch: None,
}
} }
#[cfg(parallel_compiler)]
pub(super) fn latch(&mut self) -> QueryLatch { pub(super) fn latch(&mut self) -> QueryLatch {
if self.latch.is_none() { if self.latch.is_none() {
self.latch = Some(QueryLatch::new()); self.latch = Some(QueryLatch::new());
@ -106,11 +90,8 @@ impl QueryJob {
/// as there are no concurrent jobs which could be waiting on us /// as there are no concurrent jobs which could be waiting on us
#[inline] #[inline]
pub fn signal_complete(self) { pub fn signal_complete(self) {
#[cfg(parallel_compiler)] if let Some(latch) = self.latch {
{ latch.set();
if let Some(latch) = self.latch {
latch.set();
}
} }
} }
} }
@ -176,7 +157,6 @@ impl QueryJobId {
} }
} }
#[cfg(parallel_compiler)]
#[derive(Debug)] #[derive(Debug)]
struct QueryWaiter { struct QueryWaiter {
query: Option<QueryJobId>, query: Option<QueryJobId>,
@ -185,7 +165,6 @@ struct QueryWaiter {
cycle: Mutex<Option<CycleError>>, cycle: Mutex<Option<CycleError>>,
} }
#[cfg(parallel_compiler)]
impl QueryWaiter { impl QueryWaiter {
fn notify(&self, registry: &rayon_core::Registry) { fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry); rayon_core::mark_unblocked(registry);
@ -193,20 +172,17 @@ impl QueryWaiter {
} }
} }
#[cfg(parallel_compiler)]
#[derive(Debug)] #[derive(Debug)]
struct QueryLatchInfo { struct QueryLatchInfo {
complete: bool, complete: bool,
waiters: Vec<Arc<QueryWaiter>>, waiters: Vec<Arc<QueryWaiter>>,
} }
#[cfg(parallel_compiler)]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(super) struct QueryLatch { pub(super) struct QueryLatch {
info: Arc<Mutex<QueryLatchInfo>>, info: Arc<Mutex<QueryLatchInfo>>,
} }
#[cfg(parallel_compiler)]
impl QueryLatch { impl QueryLatch {
fn new() -> Self { fn new() -> Self {
QueryLatch { QueryLatch {
@ -273,7 +249,6 @@ impl QueryLatch {
} }
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)]
type Waiter = (QueryJobId, usize); type Waiter = (QueryJobId, usize);
/// Visits all the non-resumable and resumable waiters of a query. /// Visits all the non-resumable and resumable waiters of a query.
@ -285,7 +260,6 @@ type Waiter = (QueryJobId, usize);
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter. /// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None. /// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>> fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
where where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
@ -316,7 +290,6 @@ where
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing /// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle. /// the cycle.
#[cfg(parallel_compiler)]
fn cycle_check( fn cycle_check(
query_map: &QueryMap, query_map: &QueryMap,
query: QueryJobId, query: QueryJobId,
@ -357,7 +330,6 @@ fn cycle_check(
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query) /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`. /// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search. /// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
fn connected_to_root( fn connected_to_root(
query_map: &QueryMap, query_map: &QueryMap,
query: QueryJobId, query: QueryJobId,
@ -380,7 +352,6 @@ fn connected_to_root(
} }
// Deterministically pick an query from a list // Deterministically pick an query from a list
#[cfg(parallel_compiler)]
fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
where where
F: Fn(&T) -> (Span, QueryJobId), F: Fn(&T) -> (Span, QueryJobId),
@ -406,7 +377,6 @@ where
/// the function return true. /// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and /// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false. /// the function returns false.
#[cfg(parallel_compiler)]
fn remove_cycle( fn remove_cycle(
query_map: &QueryMap, query_map: &QueryMap,
jobs: &mut Vec<QueryJobId>, jobs: &mut Vec<QueryJobId>,
@ -511,7 +481,6 @@ fn remove_cycle(
/// uses a query latch and then resuming that waiter. /// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches /// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once. /// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) { pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) {
let mut wakelist = Vec::new(); let mut wakelist = Vec::new();
let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect(); let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();

View File

@ -2,10 +2,9 @@ mod plumbing;
pub use self::plumbing::*; pub use self::plumbing::*;
mod job; mod job;
#[cfg(parallel_compiler)]
pub use self::job::break_query_cycles;
pub use self::job::{ pub use self::job::{
QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, print_query_stack, report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack,
report_cycle,
}; };
mod caches; mod caches;
@ -38,7 +37,6 @@ pub struct QueryStackFrame {
pub dep_kind: DepKind, pub dep_kind: DepKind,
/// This hash is used to deterministically pick /// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler. /// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
hash: Hash64, hash: Hash64,
} }
@ -51,18 +49,9 @@ impl QueryStackFrame {
def_kind: Option<DefKind>, def_kind: Option<DefKind>,
dep_kind: DepKind, dep_kind: DepKind,
ty_def_id: Option<DefId>, ty_def_id: Option<DefId>,
_hash: impl FnOnce() -> Hash64, hash: impl FnOnce() -> Hash64,
) -> Self { ) -> Self {
Self { Self { description, span, def_id, def_kind, ty_def_id, dep_kind, hash: hash() }
description,
span,
def_id,
def_kind,
ty_def_id,
dep_kind,
#[cfg(parallel_compiler)]
hash: _hash(),
}
} }
// FIXME(eddyb) Get more valid `Span`s on queries. // FIXME(eddyb) Get more valid `Span`s on queries.

View File

@ -13,7 +13,6 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
#[cfg(parallel_compiler)]
use rustc_data_structures::{outline, sync}; use rustc_data_structures::{outline, sync};
use rustc_errors::{Diag, FatalError, StashKey}; use rustc_errors::{Diag, FatalError, StashKey};
use rustc_span::{DUMMY_SP, Span}; use rustc_span::{DUMMY_SP, Span};
@ -25,9 +24,7 @@ use crate::HandleCycleError;
use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams}; use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
#[cfg(parallel_compiler)] use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
use crate::query::job::QueryLatch;
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, report_cycle};
use crate::query::{ use crate::query::{
QueryContext, QueryMap, QuerySideEffects, QueryStackFrame, SerializedDepNodeIndex, QueryContext, QueryMap, QuerySideEffects, QueryStackFrame, SerializedDepNodeIndex,
}; };
@ -263,7 +260,6 @@ where
} }
#[inline(always)] #[inline(always)]
#[cfg(parallel_compiler)]
fn wait_for_query<Q, Qcx>( fn wait_for_query<Q, Qcx>(
query: Q, query: Q,
qcx: Qcx, qcx: Qcx,
@ -334,7 +330,7 @@ where
// re-executing the query since `try_start` only checks that the query is not currently // re-executing the query since `try_start` only checks that the query is not currently
// executing, but another thread may have already completed the query and stores it result // executing, but another thread may have already completed the query and stores it result
// in the query cache. // in the query cache.
if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 { if qcx.dep_context().sess().threads() > 1 {
if let Some((value, index)) = query.query_cache(qcx).lookup(&key) { if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
qcx.dep_context().profiler().query_cache_hit(index.into()); qcx.dep_context().profiler().query_cache_hit(index.into());
return (value, Some(index)); return (value, Some(index));
@ -359,7 +355,6 @@ where
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
match entry.get_mut() { match entry.get_mut() {
QueryResult::Started(job) => { QueryResult::Started(job) => {
#[cfg(parallel_compiler)]
if sync::is_dyn_thread_safe() { if sync::is_dyn_thread_safe() {
// Get the latch out // Get the latch out
let latch = job.latch(); let latch = job.latch();

View File

@ -1300,7 +1300,6 @@ pub fn register_expn_id(
let expn_id = ExpnId { krate, local_id }; let expn_id = ExpnId { krate, local_id };
HygieneData::with(|hygiene_data| { HygieneData::with(|hygiene_data| {
let _old_data = hygiene_data.foreign_expn_data.insert(expn_id, data); let _old_data = hygiene_data.foreign_expn_data.insert(expn_id, data);
debug_assert!(_old_data.is_none() || cfg!(parallel_compiler));
let _old_hash = hygiene_data.foreign_expn_hashes.insert(expn_id, hash); let _old_hash = hygiene_data.foreign_expn_hashes.insert(expn_id, hash);
debug_assert!(_old_hash.is_none() || _old_hash == Some(hash)); debug_assert!(_old_hash.is_none() || _old_hash == Some(hash));
let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id); let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id);
@ -1423,18 +1422,7 @@ pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContext
ctxt_data = old.clone(); ctxt_data = old.clone();
} }
let dummy = std::mem::replace( hygiene_data.syntax_context_data[ctxt.as_u32() as usize] = ctxt_data;
&mut hygiene_data.syntax_context_data[ctxt.as_u32() as usize],
ctxt_data,
);
if cfg!(not(parallel_compiler)) {
// Make sure nothing weird happened while `decode_data` was running.
// We used `kw::Empty` for the dummy value and we expect nothing to be
// modifying the dummy entry.
// This does not hold for the parallel compiler as another thread may
// have inserted the fully decoded data.
assert_eq!(dummy.dollar_crate_name, kw::Empty);
}
}); });
// Mark the context as completed // Mark the context as completed

View File

@ -521,14 +521,6 @@ impl SpanData {
} }
} }
// The interner is pointed to by a thread local value which is only set on the main thread
// with parallelization is disabled. So we don't allow `Span` to transfer between threads
// to avoid panics and other errors, even though it would be memory safe to do so.
#[cfg(not(parallel_compiler))]
impl !Send for Span {}
#[cfg(not(parallel_compiler))]
impl !Sync for Span {}
impl PartialOrd for Span { impl PartialOrd for Span {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
PartialOrd::partial_cmp(&self.data(), &rhs.data()) PartialOrd::partial_cmp(&self.data(), &rhs.data())

View File

@ -594,8 +594,7 @@
# Build a multi-threaded rustc. This allows users to use parallel rustc # Build a multi-threaded rustc. This allows users to use parallel rustc
# via the unstable option `-Z threads=n`. # via the unstable option `-Z threads=n`.
# Since stable/beta channels only allow using stable features, # This option is deprecated and always true.
# `parallel-compiler = false` should be set for these channels.
#parallel-compiler = true #parallel-compiler = true
# The default linker that will be hard-coded into the generated # The default linker that will be hard-coded into the generated

View File

@ -1198,15 +1198,6 @@ impl Builder<'_> {
rustflags.arg("-Zinline-mir-preserve-debug"); rustflags.arg("-Zinline-mir-preserve-debug");
} }
if self.config.rustc_parallel
&& matches!(mode, Mode::ToolRustc | Mode::Rustc | Mode::Codegen)
{
// keep in sync with `bootstrap/lib.rs:Build::rustc_features`
// `cfg` option for rustc, `features` option for cargo, for conditional compilation
rustflags.arg("--cfg=parallel_compiler");
rustdocflags.arg("--cfg=parallel_compiler");
}
Cargo { Cargo {
command: cargo, command: cargo,
compiler, compiler,

View File

@ -278,7 +278,6 @@ pub struct Config {
pub rust_strip: bool, pub rust_strip: bool,
pub rust_frame_pointers: bool, pub rust_frame_pointers: bool,
pub rust_stack_protector: Option<String>, pub rust_stack_protector: Option<String>,
pub rustc_parallel: bool,
pub rustc_default_linker: Option<String>, pub rustc_default_linker: Option<String>,
pub rust_optimize_tests: bool, pub rust_optimize_tests: bool,
pub rust_dist_src: bool, pub rust_dist_src: bool,
@ -1224,7 +1223,6 @@ impl Config {
bindir: "bin".into(), bindir: "bin".into(),
dist_include_mingw_linker: true, dist_include_mingw_linker: true,
dist_compression_profile: "fast".into(), dist_compression_profile: "fast".into(),
rustc_parallel: true,
stdout_is_tty: std::io::stdout().is_terminal(), stdout_is_tty: std::io::stdout().is_terminal(),
stderr_is_tty: std::io::stderr().is_terminal(), stderr_is_tty: std::io::stderr().is_terminal(),
@ -1773,8 +1771,14 @@ impl Config {
config.rust_randomize_layout = randomize_layout.unwrap_or_default(); config.rust_randomize_layout = randomize_layout.unwrap_or_default();
config.llvm_tools_enabled = llvm_tools.unwrap_or(true); config.llvm_tools_enabled = llvm_tools.unwrap_or(true);
config.rustc_parallel =
parallel_compiler.unwrap_or(config.channel == "dev" || config.channel == "nightly"); // FIXME: Remove this option at the end of 2024.
if parallel_compiler.is_some() {
println!(
"WARNING: The `rust.parallel-compiler` option is deprecated and does nothing. The parallel compiler (with one thread) is now the default"
);
}
config.llvm_enzyme = config.llvm_enzyme =
llvm_enzyme.unwrap_or(config.channel == "dev" || config.channel == "nightly"); llvm_enzyme.unwrap_or(config.channel == "dev" || config.channel == "nightly");
config.rustc_default_linker = default_linker; config.rustc_default_linker = default_linker;

View File

@ -80,11 +80,8 @@ const EXTRA_CHECK_CFGS: &[(Option<Mode>, &str, Option<&[&'static str]>)] = &[
(Some(Mode::Rustc), "llvm_enzyme", None), (Some(Mode::Rustc), "llvm_enzyme", None),
(Some(Mode::Codegen), "llvm_enzyme", None), (Some(Mode::Codegen), "llvm_enzyme", None),
(Some(Mode::ToolRustc), "llvm_enzyme", None), (Some(Mode::ToolRustc), "llvm_enzyme", None),
(Some(Mode::Rustc), "parallel_compiler", None),
(Some(Mode::ToolRustc), "parallel_compiler", None),
(Some(Mode::ToolRustc), "rust_analyzer", None), (Some(Mode::ToolRustc), "rust_analyzer", None),
(Some(Mode::ToolStd), "rust_analyzer", None), (Some(Mode::ToolStd), "rust_analyzer", None),
(Some(Mode::Codegen), "parallel_compiler", None),
// Any library specific cfgs like `target_os`, `target_arch` should be put in // Any library specific cfgs like `target_os`, `target_arch` should be put in
// priority the `[lints.rust.unexpected_cfgs.check-cfg]` table // priority the `[lints.rust.unexpected_cfgs.check-cfg]` table
// in the appropriate `library/{std,alloc,core}/Cargo.toml` // in the appropriate `library/{std,alloc,core}/Cargo.toml`
@ -695,9 +692,6 @@ impl Build {
features.push("llvm"); features.push("llvm");
} }
// keep in sync with `bootstrap/compile.rs:rustc_cargo_env` // keep in sync with `bootstrap/compile.rs:rustc_cargo_env`
if self.config.rustc_parallel && check("rustc_use_parallel_compiler") {
features.push("rustc_use_parallel_compiler");
}
if self.config.rust_randomize_layout { if self.config.rust_randomize_layout {
features.push("rustc_randomized_layouts"); features.push("rustc_randomized_layouts");
} }

View File

@ -290,6 +290,11 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
severity: ChangeSeverity::Info, severity: ChangeSeverity::Info,
summary: "New option `llvm.offload` to control whether the llvm offload runtime for GPU support is built. Implicitly enables the openmp runtime as dependency.", summary: "New option `llvm.offload` to control whether the llvm offload runtime for GPU support is built. Implicitly enables the openmp runtime as dependency.",
}, },
ChangeInfo {
change_id: 132282,
severity: ChangeSeverity::Warning,
summary: "Deprecated `rust.parallel_compiler` as the compiler now always defaults to being parallel (with 1 thread)",
},
ChangeInfo { ChangeInfo {
change_id: 132494, change_id: 132494,
severity: ChangeSeverity::Info, severity: ChangeSeverity::Info,

View File

@ -120,9 +120,6 @@ if [ "$DEPLOY$DEPLOY_ALT" = "1" ]; then
if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions"
elif [ "$DEPLOY_ALT" != "" ]; then elif [ "$DEPLOY_ALT" != "" ]; then
if [ "$ALT_PARALLEL_COMPILER" = "" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.parallel-compiler=false"
fi
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-assertions" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-assertions"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.verify-llvm-ir" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.verify-llvm-ir"
fi fi
@ -263,7 +260,7 @@ fi
if [ "$RUN_CHECK_WITH_PARALLEL_QUERIES" != "" ]; then if [ "$RUN_CHECK_WITH_PARALLEL_QUERIES" != "" ]; then
rm -f config.toml rm -f config.toml
$SRC/configure --set change-id=99999999 --set rust.parallel-compiler $SRC/configure --set change-id=99999999
# Save the build metrics before we wipe the directory # Save the build metrics before we wipe the directory
if [ "$HAS_METRICS" = 1 ]; then if [ "$HAS_METRICS" = 1 ]; then

View File

@ -206,7 +206,7 @@ fn init_logging(early_dcx: &EarlyDiagCtxt) {
.with_verbose_exit(true) .with_verbose_exit(true)
.with_verbose_entry(true) .with_verbose_entry(true)
.with_indent_amount(2); .with_indent_amount(2);
#[cfg(all(parallel_compiler, debug_assertions))] #[cfg(debug_assertions)]
let layer = layer.with_thread_ids(true).with_thread_names(true); let layer = layer.with_thread_ids(true).with_thread_names(true);
use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::layer::SubscriberExt;