diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml index a2fc9d5c408..f85c30ac7ec 100644 --- a/compiler/rustc/Cargo.toml +++ b/compiler/rustc/Cargo.toml @@ -31,5 +31,4 @@ jemalloc = ['dep:jemalloc-sys'] llvm = ['rustc_driver_impl/llvm'] max_level_info = ['rustc_driver_impl/max_level_info'] rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts'] -rustc_use_parallel_compiler = ['rustc_driver_impl/rustc_use_parallel_compiler'] # tidy-alphabetical-end diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs index d5c2bc1c7f6..0b4bfc0b36a 100644 --- a/compiler/rustc_ast/src/tokenstream.rs +++ b/compiler/rustc_ast/src/tokenstream.rs @@ -38,7 +38,6 @@ pub enum TokenTree { } // Ensure all fields of `TokenTree` are `DynSend` and `DynSync`. -#[cfg(parallel_compiler)] fn _dummy() where Token: sync::DynSend + sync::DynSync, diff --git a/compiler/rustc_baked_icu_data/Cargo.toml b/compiler/rustc_baked_icu_data/Cargo.toml index e6cfb4887c9..c35556dcf5b 100644 --- a/compiler/rustc_baked_icu_data/Cargo.toml +++ b/compiler/rustc_baked_icu_data/Cargo.toml @@ -8,11 +8,6 @@ edition = "2021" icu_list = "1.2" icu_locid = "1.2" icu_locid_transform = "1.3.2" -icu_provider = "1.2" +icu_provider = { version = "1.2", features = ["sync"] } zerovec = "0.10.0" # tidy-alphabetical-end - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ['icu_provider/sync'] -# tidy-alphabetical-end diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index 5a477143a62..c8ecddb046c 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -10,11 +10,11 @@ bitflags = "2.4.1" either = "1.0" elsa = "=1.7.1" ena = "0.14.3" -indexmap = { version = "2.4.0" } +indexmap = { version = "2.4.0", features = ["rustc-rayon"] } jobserver_crate = { version = "0.1.28", package = "jobserver" } measureme = "11" rustc-hash = "2.0.0" -rustc-rayon = { version = "0.5.0", optional = true } +rustc-rayon = "0.5.0" rustc-stable-hash = { version = "0.1.0", features = ["nightly"] } rustc_arena = { path = "../rustc_arena" } rustc_graphviz = { path = "../rustc_graphviz" } @@ -53,8 +53,3 @@ memmap2 = "0.2.1" [target.'cfg(not(target_has_atomic = "64"))'.dependencies] portable-atomic = "1.5.1" - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "dep:rustc-rayon"] -# tidy-alphabetical-end diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index afac08ae6f8..bede4c49703 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -10,7 +10,6 @@ #![allow(internal_features)] #![allow(rustc::default_hash_types)] #![allow(rustc::potential_query_instability)] -#![cfg_attr(not(parallel_compiler), feature(cell_leak))] #![deny(unsafe_op_in_unsafe_fn)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 83fdaff515b..2b629024bfe 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -1,194 +1,162 @@ -cfg_match! { - cfg(not(parallel_compiler)) => { - pub auto trait DynSend {} - pub auto trait DynSync {} +#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \ + Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")] +// This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()` +// is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a +// `Send` type in `IntoDynSyncSend` will create a `DynSend` type. +pub unsafe auto trait DynSend {} - impl DynSend for T {} - impl DynSync for T {} - } - _ => { - #[rustc_on_unimplemented( - message = "`{Self}` doesn't implement `DynSend`. \ - Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`" - )] - // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()` - // is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a - // `Send` type in `IntoDynSyncSend` will create a `DynSend` type. - pub unsafe auto trait DynSend {} +#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSync`. \ + Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`")] +// This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()` +// is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a +// `Sync` type in `IntoDynSyncSend` will create a `DynSync` type. +pub unsafe auto trait DynSync {} - #[rustc_on_unimplemented( - message = "`{Self}` doesn't implement `DynSync`. \ - Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`" - )] - // This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()` - // is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a - // `Sync` type in `IntoDynSyncSend` will create a `DynSync` type. - pub unsafe auto trait DynSync {} +// Same with `Sync` and `Send`. +unsafe impl DynSend for &T {} - // Same with `Sync` and `Send`. - unsafe impl DynSend for &T {} - - macro_rules! impls_dyn_send_neg { - ($([$t1: ty $(where $($generics1: tt)*)?])*) => { - $(impl$(<$($generics1)*>)? !DynSend for $t1 {})* - }; - } - - // Consistent with `std` - impls_dyn_send_neg!( - [std::env::Args] - [std::env::ArgsOs] - [*const T where T: ?Sized] - [*mut T where T: ?Sized] - [std::ptr::NonNull where T: ?Sized] - [std::rc::Rc where T: ?Sized] - [std::rc::Weak where T: ?Sized] - [std::sync::MutexGuard<'_, T> where T: ?Sized] - [std::sync::RwLockReadGuard<'_, T> where T: ?Sized] - [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized] - [std::io::StdoutLock<'_>] - [std::io::StderrLock<'_>] - ); - - #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] - // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms - impl !DynSend for std::env::VarsOs {} - - macro_rules! already_send { - ($([$ty: ty])*) => { - $(unsafe impl DynSend for $ty where $ty: Send {})* - }; - } - - // These structures are already `Send`. - already_send!( - [std::backtrace::Backtrace] - [std::io::Stdout] - [std::io::Stderr] - [std::io::Error] - [std::fs::File] - [rustc_arena::DroplessArena] - [crate::memmap::Mmap] - [crate::profiling::SelfProfiler] - [crate::owned_slice::OwnedSlice] - ); - - macro_rules! impl_dyn_send { - ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => { - $(unsafe impl<$($generics2)*> DynSend for $ty {})* - }; - } - - impl_dyn_send!( - [std::sync::atomic::AtomicPtr where T] - [std::sync::Mutex where T: ?Sized+ DynSend] - [std::sync::mpsc::Sender where T: DynSend] - [std::sync::Arc where T: ?Sized + DynSync + DynSend] - [std::sync::LazyLock where T: DynSend, F: DynSend] - [std::collections::HashSet where K: DynSend, S: DynSend] - [std::collections::HashMap where K: DynSend, V: DynSend, S: DynSend] - [std::collections::BTreeMap where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] - [Vec where T: DynSend, A: std::alloc::Allocator + DynSend] - [Box where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] - [crate::sync::RwLock where T: DynSend] - [crate::tagged_ptr::CopyTaggedPtr where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] - [rustc_arena::TypedArena where T: DynSend] - [indexmap::IndexSet where V: DynSend, S: DynSend] - [indexmap::IndexMap where K: DynSend, V: DynSend, S: DynSend] - [thin_vec::ThinVec where T: DynSend] - [smallvec::SmallVec where A: smallvec::Array + DynSend] - ); - - macro_rules! impls_dyn_sync_neg { - ($([$t1: ty $(where $($generics1: tt)*)?])*) => { - $(impl$(<$($generics1)*>)? !DynSync for $t1 {})* - }; - } - - // Consistent with `std` - impls_dyn_sync_neg!( - [std::env::Args] - [std::env::ArgsOs] - [*const T where T: ?Sized] - [*mut T where T: ?Sized] - [std::cell::Cell where T: ?Sized] - [std::cell::RefCell where T: ?Sized] - [std::cell::UnsafeCell where T: ?Sized] - [std::ptr::NonNull where T: ?Sized] - [std::rc::Rc where T: ?Sized] - [std::rc::Weak where T: ?Sized] - [std::cell::OnceCell where T] - [std::sync::mpsc::Receiver where T] - [std::sync::mpsc::Sender where T] - ); - - #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] - // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms - impl !DynSync for std::env::VarsOs {} - - macro_rules! already_sync { - ($([$ty: ty])*) => { - $(unsafe impl DynSync for $ty where $ty: Sync {})* - }; - } - - // These structures are already `Sync`. - already_sync!( - [std::sync::atomic::AtomicBool] - [std::sync::atomic::AtomicUsize] - [std::sync::atomic::AtomicU8] - [std::sync::atomic::AtomicU32] - [std::backtrace::Backtrace] - [std::io::Error] - [std::fs::File] - [jobserver_crate::Client] - [crate::memmap::Mmap] - [crate::profiling::SelfProfiler] - [crate::owned_slice::OwnedSlice] - ); - - // Use portable AtomicU64 for targets without native 64-bit atomics - #[cfg(target_has_atomic = "64")] - already_sync!( - [std::sync::atomic::AtomicU64] - ); - - #[cfg(not(target_has_atomic = "64"))] - already_sync!( - [portable_atomic::AtomicU64] - ); - - macro_rules! impl_dyn_sync { - ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => { - $(unsafe impl<$($generics2)*> DynSync for $ty {})* - }; - } - - impl_dyn_sync!( - [std::sync::atomic::AtomicPtr where T] - [std::sync::OnceLock where T: DynSend + DynSync] - [std::sync::Mutex where T: ?Sized + DynSend] - [std::sync::Arc where T: ?Sized + DynSync + DynSend] - [std::sync::LazyLock where T: DynSend + DynSync, F: DynSend] - [std::collections::HashSet where K: DynSync, S: DynSync] - [std::collections::HashMap where K: DynSync, V: DynSync, S: DynSync] - [std::collections::BTreeMap where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync] - [Vec where T: DynSync, A: std::alloc::Allocator + DynSync] - [Box where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync] - [crate::sync::RwLock where T: DynSend + DynSync] - [crate::sync::WorkerLocal where T: DynSend] - [crate::intern::Interned<'a, T> where 'a, T: DynSync] - [crate::tagged_ptr::CopyTaggedPtr where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool] - [parking_lot::lock_api::Mutex where R: DynSync, T: ?Sized + DynSend] - [parking_lot::lock_api::RwLock where R: DynSync, T: ?Sized + DynSend + DynSync] - [indexmap::IndexSet where V: DynSync, S: DynSync] - [indexmap::IndexMap where K: DynSync, V: DynSync, S: DynSync] - [smallvec::SmallVec where A: smallvec::Array + DynSync] - [thin_vec::ThinVec where T: DynSync] - ); - } +macro_rules! impls_dyn_send_neg { + ($([$t1: ty $(where $($generics1: tt)*)?])*) => { + $(impl$(<$($generics1)*>)? !DynSend for $t1 {})* + }; } +// Consistent with `std` +impls_dyn_send_neg!( + [std::env::Args] + [std::env::ArgsOs] + [*const T where T: ?Sized] + [*mut T where T: ?Sized] + [std::ptr::NonNull where T: ?Sized] + [std::rc::Rc where T: ?Sized] + [std::rc::Weak where T: ?Sized] + [std::sync::MutexGuard<'_, T> where T: ?Sized] + [std::sync::RwLockReadGuard<'_, T> where T: ?Sized] + [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized] + [std::io::StdoutLock<'_>] + [std::io::StderrLock<'_>] +); + +#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] +// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms +impl !DynSend for std::env::VarsOs {} + +macro_rules! already_send { + ($([$ty: ty])*) => { + $(unsafe impl DynSend for $ty where $ty: Send {})* + }; +} + +// These structures are already `Send`. +already_send!( + [std::backtrace::Backtrace][std::io::Stdout][std::io::Stderr][std::io::Error][std::fs::File] + [rustc_arena::DroplessArena][crate::memmap::Mmap][crate::profiling::SelfProfiler] + [crate::owned_slice::OwnedSlice] +); + +macro_rules! impl_dyn_send { + ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => { + $(unsafe impl<$($generics2)*> DynSend for $ty {})* + }; +} + +impl_dyn_send!( + [std::sync::atomic::AtomicPtr where T] + [std::sync::Mutex where T: ?Sized+ DynSend] + [std::sync::mpsc::Sender where T: DynSend] + [std::sync::Arc where T: ?Sized + DynSync + DynSend] + [std::sync::LazyLock where T: DynSend, F: DynSend] + [std::collections::HashSet where K: DynSend, S: DynSend] + [std::collections::HashMap where K: DynSend, V: DynSend, S: DynSend] + [std::collections::BTreeMap where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] + [Vec where T: DynSend, A: std::alloc::Allocator + DynSend] + [Box where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] + [crate::sync::RwLock where T: DynSend] + [crate::tagged_ptr::CopyTaggedPtr where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] + [rustc_arena::TypedArena where T: DynSend] + [indexmap::IndexSet where V: DynSend, S: DynSend] + [indexmap::IndexMap where K: DynSend, V: DynSend, S: DynSend] + [thin_vec::ThinVec where T: DynSend] + [smallvec::SmallVec where A: smallvec::Array + DynSend] +); + +macro_rules! impls_dyn_sync_neg { + ($([$t1: ty $(where $($generics1: tt)*)?])*) => { + $(impl$(<$($generics1)*>)? !DynSync for $t1 {})* + }; +} + +// Consistent with `std` +impls_dyn_sync_neg!( + [std::env::Args] + [std::env::ArgsOs] + [*const T where T: ?Sized] + [*mut T where T: ?Sized] + [std::cell::Cell where T: ?Sized] + [std::cell::RefCell where T: ?Sized] + [std::cell::UnsafeCell where T: ?Sized] + [std::ptr::NonNull where T: ?Sized] + [std::rc::Rc where T: ?Sized] + [std::rc::Weak where T: ?Sized] + [std::cell::OnceCell where T] + [std::sync::mpsc::Receiver where T] + [std::sync::mpsc::Sender where T] +); + +#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] +// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms +impl !DynSync for std::env::VarsOs {} + +macro_rules! already_sync { + ($([$ty: ty])*) => { + $(unsafe impl DynSync for $ty where $ty: Sync {})* + }; +} + +// These structures are already `Sync`. +already_sync!( + [std::sync::atomic::AtomicBool][std::sync::atomic::AtomicUsize][std::sync::atomic::AtomicU8] + [std::sync::atomic::AtomicU32][std::backtrace::Backtrace][std::io::Error][std::fs::File] + [jobserver_crate::Client][crate::memmap::Mmap][crate::profiling::SelfProfiler] + [crate::owned_slice::OwnedSlice] +); + +// Use portable AtomicU64 for targets without native 64-bit atomics +#[cfg(target_has_atomic = "64")] +already_sync!([std::sync::atomic::AtomicU64]); + +#[cfg(not(target_has_atomic = "64"))] +already_sync!([portable_atomic::AtomicU64]); + +macro_rules! impl_dyn_sync { + ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => { + $(unsafe impl<$($generics2)*> DynSync for $ty {})* + }; +} + +impl_dyn_sync!( + [std::sync::atomic::AtomicPtr where T] + [std::sync::OnceLock where T: DynSend + DynSync] + [std::sync::Mutex where T: ?Sized + DynSend] + [std::sync::Arc where T: ?Sized + DynSync + DynSend] + [std::sync::LazyLock where T: DynSend + DynSync, F: DynSend] + [std::collections::HashSet where K: DynSync, S: DynSync] + [std::collections::HashMap where K: DynSync, V: DynSync, S: DynSync] + [std::collections::BTreeMap where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync] + [Vec where T: DynSync, A: std::alloc::Allocator + DynSync] + [Box where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync] + [crate::sync::RwLock where T: DynSend + DynSync] + [crate::sync::WorkerLocal where T: DynSend] + [crate::intern::Interned<'a, T> where 'a, T: DynSync] + [crate::tagged_ptr::CopyTaggedPtr where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool] + [parking_lot::lock_api::Mutex where R: DynSync, T: ?Sized + DynSend] + [parking_lot::lock_api::RwLock where R: DynSync, T: ?Sized + DynSend + DynSync] + [indexmap::IndexSet where V: DynSync, S: DynSync] + [indexmap::IndexMap where K: DynSync, V: DynSync, S: DynSync] + [smallvec::SmallVec where A: smallvec::Array + DynSync] + [thin_vec::ThinVec where T: DynSync] +); + pub fn assert_dyn_sync() {} pub fn assert_dyn_send() {} pub fn assert_dyn_send_val(_t: &T) {} @@ -203,7 +171,6 @@ impl FromDyn { // Check that `sync::is_dyn_thread_safe()` is true on creation so we can // implement `Send` and `Sync` for this structure when `T` // implements `DynSend` and `DynSync` respectively. - #[cfg(parallel_compiler)] assert!(crate::sync::is_dyn_thread_safe()); FromDyn(val) } @@ -215,11 +182,9 @@ impl FromDyn { } // `FromDyn` is `Send` if `T` is `DynSend`, since it ensures that sync::is_dyn_thread_safe() is true. -#[cfg(parallel_compiler)] unsafe impl Send for FromDyn {} // `FromDyn` is `Sync` if `T` is `DynSync`, since it ensures that sync::is_dyn_thread_safe() is true. -#[cfg(parallel_compiler)] unsafe impl Sync for FromDyn {} impl std::ops::Deref for FromDyn { @@ -237,9 +202,7 @@ impl std::ops::Deref for FromDyn { #[derive(Copy, Clone)] pub struct IntoDynSyncSend(pub T); -#[cfg(parallel_compiler)] unsafe impl DynSend for IntoDynSyncSend {} -#[cfg(parallel_compiler)] unsafe impl DynSync for IntoDynSyncSend {} impl std::ops::Deref for IntoDynSyncSend { diff --git a/compiler/rustc_data_structures/src/owned_slice.rs b/compiler/rustc_data_structures/src/owned_slice.rs index bbe6691e548..c8be0ab52e9 100644 --- a/compiler/rustc_data_structures/src/owned_slice.rs +++ b/compiler/rustc_data_structures/src/owned_slice.rs @@ -139,11 +139,9 @@ impl Borrow<[u8]> for OwnedSlice { } // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc)`, which is `Send` -#[cfg(parallel_compiler)] unsafe impl sync::Send for OwnedSlice {} // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc)`, which is `Sync` -#[cfg(parallel_compiler)] unsafe impl sync::Sync for OwnedSlice {} #[cfg(test)] diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index d0b6fe2bc6f..65488c73d3c 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -3,27 +3,22 @@ use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; use std::{iter, mem}; -#[cfg(parallel_compiler)] use either::Either; use crate::fx::{FxHashMap, FxHasher}; -#[cfg(parallel_compiler)] -use crate::sync::{CacheAligned, is_dyn_thread_safe}; -use crate::sync::{Lock, LockGuard, Mode}; +use crate::sync::{CacheAligned, Lock, LockGuard, Mode, is_dyn_thread_safe}; // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, // but this should be tested on higher core count CPUs. How the `Sharded` type gets used // may also affect the ideal number of shards. const SHARD_BITS: usize = 5; -#[cfg(parallel_compiler)] const SHARDS: usize = 1 << SHARD_BITS; /// An array of cache-line aligned inner locked structures with convenience methods. /// A single field is used when the compiler uses only one thread. pub enum Sharded { Single(Lock), - #[cfg(parallel_compiler)] Shards(Box<[CacheAligned>; SHARDS]>), } @@ -37,7 +32,6 @@ impl Default for Sharded { impl Sharded { #[inline] pub fn new(mut value: impl FnMut() -> T) -> Self { - #[cfg(parallel_compiler)] if is_dyn_thread_safe() { return Sharded::Shards(Box::new( [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))), @@ -52,7 +46,6 @@ impl Sharded { pub fn get_shard_by_value(&self, _val: &K) -> &Lock { match self { Self::Single(single) => single, - #[cfg(parallel_compiler)] Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), } } @@ -66,7 +59,6 @@ impl Sharded { pub fn get_shard_by_index(&self, _i: usize) -> &Lock { match self { Self::Single(single) => single, - #[cfg(parallel_compiler)] Self::Shards(shards) => { // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } @@ -87,7 +79,6 @@ impl Sharded { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - #[cfg(parallel_compiler)] Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), } } @@ -110,7 +101,6 @@ impl Sharded { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - #[cfg(parallel_compiler)] Self::Shards(shards) => { // Synchronization is enabled so use the `lock_assume_sync` method optimized // for that case. @@ -127,11 +117,7 @@ impl Sharded { #[inline] pub fn lock_shards(&self) -> impl Iterator> { match self { - #[cfg(not(parallel_compiler))] - Self::Single(single) => iter::once(single.lock()), - #[cfg(parallel_compiler)] Self::Single(single) => Either::Left(iter::once(single.lock())), - #[cfg(parallel_compiler)] Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())), } } @@ -139,11 +125,7 @@ impl Sharded { #[inline] pub fn try_lock_shards(&self) -> impl Iterator>> { match self { - #[cfg(not(parallel_compiler))] - Self::Single(single) => iter::once(single.try_lock()), - #[cfg(parallel_compiler)] Self::Single(single) => Either::Left(iter::once(single.try_lock())), - #[cfg(parallel_compiler)] Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())), } } @@ -151,7 +133,6 @@ impl Sharded { #[inline] pub fn shards() -> usize { - #[cfg(parallel_compiler)] if is_dyn_thread_safe() { return SHARDS; } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index a3491dbfec7..7a9533031f4 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -54,9 +54,7 @@ mod worker_local; pub use worker_local::{Registry, WorkerLocal}; mod parallel; -#[cfg(parallel_compiler)] -pub use parallel::scope; -pub use parallel::{join, par_for_each_in, par_map, parallel_guard, try_par_for_each_in}; +pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in}; pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; mod vec; @@ -104,226 +102,66 @@ mod mode { } } +// FIXME(parallel_compiler): Get rid of these aliases across the compiler. + +pub use std::marker::{Send, Sync}; +// Use portable AtomicU64 for targets without native 64-bit atomics +#[cfg(target_has_atomic = "64")] +pub use std::sync::atomic::AtomicU64; +pub use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize}; +pub use std::sync::{Arc as Lrc, OnceLock, Weak}; + pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +pub use parking_lot::{ + MappedMutexGuard as MappedLockGuard, MappedRwLockReadGuard as MappedReadGuard, + MappedRwLockWriteGuard as MappedWriteGuard, RwLockReadGuard as ReadGuard, + RwLockWriteGuard as WriteGuard, +}; +#[cfg(not(target_has_atomic = "64"))] +pub use portable_atomic::AtomicU64; -cfg_match! { - cfg(not(parallel_compiler)) => { - use std::ops::Add; - use std::cell::Cell; - use std::sync::atomic::Ordering; +pub type LRef<'a, T> = &'a T; - pub unsafe auto trait Send {} - pub unsafe auto trait Sync {} +#[derive(Debug, Default)] +pub struct MTLock(Lock); - unsafe impl Send for T {} - unsafe impl Sync for T {} - - /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. - /// It has explicit ordering arguments and is only intended for use with - /// the native atomic types. - /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases - /// as it's not intended to be used separately. - #[derive(Debug, Default)] - pub struct Atomic(Cell); - - impl Atomic { - #[inline] - pub fn new(v: T) -> Self { - Atomic(Cell::new(v)) - } - - #[inline] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline] - pub fn load(&self, _: Ordering) -> T { - self.0.get() - } - - #[inline] - pub fn store(&self, val: T, _: Ordering) { - self.0.set(val) - } - - #[inline] - pub fn swap(&self, val: T, _: Ordering) -> T { - self.0.replace(val) - } - } - - impl Atomic { - pub fn fetch_or(&self, val: bool, _: Ordering) -> bool { - let old = self.0.get(); - self.0.set(val | old); - old - } - pub fn fetch_and(&self, val: bool, _: Ordering) -> bool { - let old = self.0.get(); - self.0.set(val & old); - old - } - } - - impl Atomic { - #[inline] - pub fn compare_exchange(&self, - current: T, - new: T, - _: Ordering, - _: Ordering) - -> Result { - let read = self.0.get(); - if read == current { - self.0.set(new); - Ok(read) - } else { - Err(read) - } - } - } - - impl + Copy> Atomic { - #[inline] - pub fn fetch_add(&self, val: T, _: Ordering) -> T { - let old = self.0.get(); - self.0.set(old + val); - old - } - } - - pub type AtomicUsize = Atomic; - pub type AtomicBool = Atomic; - pub type AtomicU32 = Atomic; - pub type AtomicU64 = Atomic; - - pub use std::rc::Rc as Lrc; - pub use std::rc::Weak as Weak; - #[doc(no_inline)] - pub use std::cell::Ref as ReadGuard; - #[doc(no_inline)] - pub use std::cell::Ref as MappedReadGuard; - #[doc(no_inline)] - pub use std::cell::RefMut as WriteGuard; - #[doc(no_inline)] - pub use std::cell::RefMut as MappedWriteGuard; - #[doc(no_inline)] - pub use std::cell::RefMut as MappedLockGuard; - - pub use std::cell::OnceCell as OnceLock; - - use std::cell::RefCell as InnerRwLock; - - pub type LRef<'a, T> = &'a mut T; - - #[derive(Debug, Default)] - pub struct MTLock(T); - - impl MTLock { - #[inline(always)] - pub fn new(inner: T) -> Self { - MTLock(inner) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0 - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - &mut self.0 - } - - #[inline(always)] - pub fn lock(&self) -> &T { - &self.0 - } - - #[inline(always)] - pub fn lock_mut(&mut self) -> &mut T { - &mut self.0 - } - } - - // FIXME: Probably a bad idea (in the threaded case) - impl Clone for MTLock { - #[inline] - fn clone(&self) -> Self { - MTLock(self.0.clone()) - } - } +impl MTLock { + #[inline(always)] + pub fn new(inner: T) -> Self { + MTLock(Lock::new(inner)) } - _ => { - pub use std::marker::Send as Send; - pub use std::marker::Sync as Sync; - pub use parking_lot::RwLockReadGuard as ReadGuard; - pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard; - pub use parking_lot::RwLockWriteGuard as WriteGuard; - pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; + #[inline(always)] + pub fn into_inner(self) -> T { + self.0.into_inner() + } - pub use parking_lot::MappedMutexGuard as MappedLockGuard; + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.0.get_mut() + } - pub use std::sync::OnceLock; + #[inline(always)] + pub fn lock(&self) -> LockGuard<'_, T> { + self.0.lock() + } - pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32}; - - // Use portable AtomicU64 for targets without native 64-bit atomics - #[cfg(target_has_atomic = "64")] - pub use std::sync::atomic::AtomicU64; - - #[cfg(not(target_has_atomic = "64"))] - pub use portable_atomic::AtomicU64; - - pub use std::sync::Arc as Lrc; - pub use std::sync::Weak as Weak; - - pub type LRef<'a, T> = &'a T; - - #[derive(Debug, Default)] - pub struct MTLock(Lock); - - impl MTLock { - #[inline(always)] - pub fn new(inner: T) -> Self { - MTLock(Lock::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - self.0.get_mut() - } - - #[inline(always)] - pub fn lock(&self) -> LockGuard<'_, T> { - self.0.lock() - } - - #[inline(always)] - pub fn lock_mut(&self) -> LockGuard<'_, T> { - self.lock() - } - } - - use parking_lot::RwLock as InnerRwLock; - - /// This makes locks panic if they are already held. - /// It is only useful when you are running in a single thread - const ERROR_CHECKING: bool = false; + #[inline(always)] + pub fn lock_mut(&self) -> LockGuard<'_, T> { + self.lock() } } +use parking_lot::RwLock as InnerRwLock; + +/// This makes locks panic if they are already held. +/// It is only useful when you are running in a single thread +const ERROR_CHECKING: bool = false; + pub type MTLockRef<'a, T> = LRef<'a, MTLock>; #[derive(Default)] -#[cfg_attr(parallel_compiler, repr(align(64)))] +#[repr(align(64))] pub struct CacheAligned(pub T); pub trait HashMapExt { @@ -357,14 +195,6 @@ impl RwLock { self.0.get_mut() } - #[cfg(not(parallel_compiler))] - #[inline(always)] - #[track_caller] - pub fn read(&self) -> ReadGuard<'_, T> { - self.0.borrow() - } - - #[cfg(parallel_compiler)] #[inline(always)] pub fn read(&self) -> ReadGuard<'_, T> { if ERROR_CHECKING { @@ -380,26 +210,11 @@ impl RwLock { f(&*self.read()) } - #[cfg(not(parallel_compiler))] - #[inline(always)] - pub fn try_write(&self) -> Result, ()> { - self.0.try_borrow_mut().map_err(|_| ()) - } - - #[cfg(parallel_compiler)] #[inline(always)] pub fn try_write(&self) -> Result, ()> { self.0.try_write().ok_or(()) } - #[cfg(not(parallel_compiler))] - #[inline(always)] - #[track_caller] - pub fn write(&self) -> WriteGuard<'_, T> { - self.0.borrow_mut() - } - - #[cfg(parallel_compiler)] #[inline(always)] pub fn write(&self) -> WriteGuard<'_, T> { if ERROR_CHECKING { @@ -427,13 +242,6 @@ impl RwLock { self.write() } - #[cfg(not(parallel_compiler))] - #[inline(always)] - pub fn leak(&self) -> &T { - ReadGuard::leak(self.read()) - } - - #[cfg(parallel_compiler)] #[inline(always)] pub fn leak(&self) -> &T { let guard = self.read(); diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs index fad5f583d1c..5236c9fe156 100644 --- a/compiler/rustc_data_structures/src/sync/freeze.rs +++ b/compiler/rustc_data_structures/src/sync/freeze.rs @@ -5,9 +5,7 @@ use std::ops::{Deref, DerefMut}; use std::ptr::NonNull; use std::sync::atomic::Ordering; -use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard}; -#[cfg(parallel_compiler)] -use crate::sync::{DynSend, DynSync}; +use crate::sync::{AtomicBool, DynSend, DynSync, ReadGuard, RwLock, WriteGuard}; /// A type which allows mutation using a lock until /// the value is frozen and can be accessed lock-free. @@ -22,7 +20,6 @@ pub struct FreezeLock { lock: RwLock<()>, } -#[cfg(parallel_compiler)] unsafe impl DynSync for FreezeLock {} impl FreezeLock { diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs index 012ee7f900e..2ccf06ccd4f 100644 --- a/compiler/rustc_data_structures/src/sync/lock.rs +++ b/compiler/rustc_data_structures/src/sync/lock.rs @@ -1,236 +1,177 @@ //! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true. //! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits. -//! -//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`. #![allow(dead_code)] use std::fmt; -#[cfg(parallel_compiler)] -pub use maybe_sync::*; -#[cfg(not(parallel_compiler))] -pub use no_sync::*; - #[derive(Clone, Copy, PartialEq)] pub enum Mode { NoSync, Sync, } -mod maybe_sync { - use std::cell::{Cell, UnsafeCell}; - use std::intrinsics::unlikely; - use std::marker::PhantomData; - use std::mem::ManuallyDrop; - use std::ops::{Deref, DerefMut}; +use std::cell::{Cell, UnsafeCell}; +use std::intrinsics::unlikely; +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::ops::{Deref, DerefMut}; - use parking_lot::RawMutex; - use parking_lot::lock_api::RawMutex as _; +use parking_lot::RawMutex; +use parking_lot::lock_api::RawMutex as _; - use super::Mode; - use crate::sync::mode; - #[cfg(parallel_compiler)] - use crate::sync::{DynSend, DynSync}; +use crate::sync::{DynSend, DynSync, mode}; - /// A guard holding mutable access to a `Lock` which is in a locked state. - #[must_use = "if unused the Lock will immediately unlock"] - pub struct LockGuard<'a, T> { - lock: &'a Lock, - marker: PhantomData<&'a mut T>, +/// A guard holding mutable access to a `Lock` which is in a locked state. +#[must_use = "if unused the Lock will immediately unlock"] +pub struct LockGuard<'a, T> { + lock: &'a Lock, + marker: PhantomData<&'a mut T>, - /// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it - /// to the original lock operation. - mode: Mode, + /// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it + /// to the original lock operation. + mode: Mode, +} + +impl<'a, T: 'a> Deref for LockGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: We have shared access to the mutable access owned by this type, + // so we can give out a shared reference. + unsafe { &*self.lock.data.get() } } +} - impl<'a, T: 'a> Deref for LockGuard<'a, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - // SAFETY: We have shared access to the mutable access owned by this type, - // so we can give out a shared reference. - unsafe { &*self.lock.data.get() } - } +impl<'a, T: 'a> DerefMut for LockGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: We have mutable access to the data so we can give out a mutable reference. + unsafe { &mut *self.lock.data.get() } } +} - impl<'a, T: 'a> DerefMut for LockGuard<'a, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - // SAFETY: We have mutable access to the data so we can give out a mutable reference. - unsafe { &mut *self.lock.data.get() } - } - } - - impl<'a, T: 'a> Drop for LockGuard<'a, T> { - #[inline] - fn drop(&mut self) { - // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent - // with the `lock.mode` state. This means we access the right union fields. - match self.mode { - Mode::NoSync => { - let cell = unsafe { &self.lock.mode_union.no_sync }; - debug_assert!(cell.get()); - cell.set(false); - } - // SAFETY (unlock): We know that the lock is locked as this type is a proof of that. - Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() }, +impl<'a, T: 'a> Drop for LockGuard<'a, T> { + #[inline] + fn drop(&mut self) { + // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent + // with the `lock.mode` state. This means we access the right union fields. + match self.mode { + Mode::NoSync => { + let cell = unsafe { &self.lock.mode_union.no_sync }; + debug_assert!(cell.get()); + cell.set(false); } + // SAFETY (unlock): We know that the lock is locked as this type is a proof of that. + Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() }, } } +} - union ModeUnion { - /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`. - no_sync: ManuallyDrop>, +union ModeUnion { + /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`. + no_sync: ManuallyDrop>, - /// A lock implementation that's only used if `Lock.mode` is `Sync`. - sync: ManuallyDrop, + /// A lock implementation that's only used if `Lock.mode` is `Sync`. + sync: ManuallyDrop, +} + +/// The value representing a locked state for the `Cell`. +const LOCKED: bool = true; + +/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true. +/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`. +pub struct Lock { + /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a + /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`. + /// This is set on initialization and never changed. + mode: Mode, + + mode_union: ModeUnion, + data: UnsafeCell, +} + +impl Lock { + #[inline(always)] + pub fn new(inner: T) -> Self { + let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) { + // Create the lock with synchronization enabled using the `RawMutex` type. + (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) }) + } else { + // Create the lock with synchronization disabled. + (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) }) + }; + Lock { mode, mode_union, data: UnsafeCell::new(inner) } } - /// The value representing a locked state for the `Cell`. - const LOCKED: bool = true; - - /// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true. - /// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`. - pub struct Lock { - /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a - /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`. - /// This is set on initialization and never changed. - mode: Mode, - - mode_union: ModeUnion, - data: UnsafeCell, + #[inline(always)] + pub fn into_inner(self) -> T { + self.data.into_inner() } - impl Lock { - #[inline(always)] - pub fn new(inner: T) -> Self { - let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) { - // Create the lock with synchronization enabled using the `RawMutex` type. - (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) }) - } else { - // Create the lock with synchronization disabled. - (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) }) - }; - Lock { mode, mode_union, data: UnsafeCell::new(inner) } + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.data.get_mut() + } + + #[inline(always)] + pub fn try_lock(&self) -> Option> { + let mode = self.mode; + // SAFETY: This is safe since the union fields are used in accordance with `self.mode`. + match mode { + Mode::NoSync => { + let cell = unsafe { &self.mode_union.no_sync }; + let was_unlocked = cell.get() != LOCKED; + if was_unlocked { + cell.set(LOCKED); + } + was_unlocked + } + Mode::Sync => unsafe { self.mode_union.sync.try_lock() }, + } + .then(|| LockGuard { lock: self, marker: PhantomData, mode }) + } + + /// This acquires the lock assuming synchronization is in a specific mode. + /// + /// Safety + /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was + /// true on lock creation. + #[inline(always)] + #[track_caller] + pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> { + #[inline(never)] + #[track_caller] + #[cold] + fn lock_held() -> ! { + panic!("lock was already held") } - #[inline(always)] - pub fn into_inner(self) -> T { - self.data.into_inner() - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - self.data.get_mut() - } - - #[inline(always)] - pub fn try_lock(&self) -> Option> { - let mode = self.mode; - // SAFETY: This is safe since the union fields are used in accordance with `self.mode`. + // SAFETY: This is safe since the union fields are used in accordance with `mode` + // which also must match `self.mode` due to the safety precondition. + unsafe { match mode { Mode::NoSync => { - let cell = unsafe { &self.mode_union.no_sync }; - let was_unlocked = cell.get() != LOCKED; - if was_unlocked { - cell.set(LOCKED); + if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) { + lock_held() } - was_unlocked } - Mode::Sync => unsafe { self.mode_union.sync.try_lock() }, + Mode::Sync => self.mode_union.sync.lock(), } - .then(|| LockGuard { lock: self, marker: PhantomData, mode }) - } - - /// This acquires the lock assuming synchronization is in a specific mode. - /// - /// Safety - /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was - /// true on lock creation. - #[inline(always)] - #[track_caller] - pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> { - #[inline(never)] - #[track_caller] - #[cold] - fn lock_held() -> ! { - panic!("lock was already held") - } - - // SAFETY: This is safe since the union fields are used in accordance with `mode` - // which also must match `self.mode` due to the safety precondition. - unsafe { - match mode { - Mode::NoSync => { - if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) { - lock_held() - } - } - Mode::Sync => self.mode_union.sync.lock(), - } - } - LockGuard { lock: self, marker: PhantomData, mode } - } - - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - unsafe { self.lock_assume(self.mode) } } + LockGuard { lock: self, marker: PhantomData, mode } } - #[cfg(parallel_compiler)] - unsafe impl DynSend for Lock {} - #[cfg(parallel_compiler)] - unsafe impl DynSync for Lock {} -} - -mod no_sync { - use std::cell::RefCell; - #[doc(no_inline)] - pub use std::cell::RefMut as LockGuard; - - use super::Mode; - - pub struct Lock(RefCell); - - impl Lock { - #[inline(always)] - pub fn new(inner: T) -> Self { - Lock(RefCell::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - self.0.get_mut() - } - - #[inline(always)] - pub fn try_lock(&self) -> Option> { - self.0.try_borrow_mut().ok() - } - - #[inline(always)] - #[track_caller] - // This is unsafe to match the API for the `parallel_compiler` case. - pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> { - self.0.borrow_mut() - } - - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - self.0.borrow_mut() - } + #[inline(always)] + #[track_caller] + pub fn lock(&self) -> LockGuard<'_, T> { + unsafe { self.lock_assume(self.mode) } } } +unsafe impl DynSend for Lock {} +unsafe impl DynSync for Lock {} + impl Lock { #[inline(always)] #[track_caller] diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs index c7df19842d6..1ba631b8623 100644 --- a/compiler/rustc_data_structures/src/sync/parallel.rs +++ b/compiler/rustc_data_structures/src/sync/parallel.rs @@ -6,14 +6,11 @@ use std::any::Any; use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind}; -#[cfg(not(parallel_compiler))] -pub use disabled::*; -#[cfg(parallel_compiler)] -pub use enabled::*; use parking_lot::Mutex; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; use crate::FatalErrorMarker; -use crate::sync::IntoDynSyncSend; +use crate::sync::{DynSend, DynSync, FromDyn, IntoDynSyncSend, mode}; /// A guard used to hold panics that occur during a parallel section to later by unwound. /// This is used for the parallel compiler to prevent fatal errors from non-deterministically @@ -49,65 +46,23 @@ pub fn parallel_guard(f: impl FnOnce(&ParallelGuard) -> R) -> R { ret } -mod disabled { - use crate::sync::parallel_guard; - - #[macro_export] - #[cfg(not(parallel_compiler))] - macro_rules! parallel { - ($($blocks:block),*) => {{ - $crate::sync::parallel_guard(|guard| { - $(guard.run(|| $blocks);)* - }); - }} - } - - pub fn join(oper_a: A, oper_b: B) -> (RA, RB) - where - A: FnOnce() -> RA, - B: FnOnce() -> RB, - { - let (a, b) = parallel_guard(|guard| { - let a = guard.run(oper_a); - let b = guard.run(oper_b); - (a, b) - }); - (a.unwrap(), b.unwrap()) - } - - pub fn par_for_each_in(t: T, mut for_each: impl FnMut(T::Item)) { - parallel_guard(|guard| { - t.into_iter().for_each(|i| { - guard.run(|| for_each(i)); - }); - }) - } - - pub fn try_par_for_each_in( - t: T, - mut for_each: impl FnMut(T::Item) -> Result<(), E>, - ) -> Result<(), E> { - parallel_guard(|guard| { - t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and) - }) - } - - pub fn par_map>( - t: T, - mut map: impl FnMut(<::IntoIter as Iterator>::Item) -> R, - ) -> C { - parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()) - } +pub fn serial_join(oper_a: A, oper_b: B) -> (RA, RB) +where + A: FnOnce() -> RA, + B: FnOnce() -> RB, +{ + let (a, b) = parallel_guard(|guard| { + let a = guard.run(oper_a); + let b = guard.run(oper_b); + (a, b) + }); + (a.unwrap(), b.unwrap()) } -#[cfg(parallel_compiler)] -mod enabled { - use crate::sync::{DynSend, DynSync, FromDyn, mode, parallel_guard}; - - /// Runs a list of blocks in parallel. The first block is executed immediately on - /// the current thread. Use that for the longest running block. - #[macro_export] - macro_rules! parallel { +/// Runs a list of blocks in parallel. The first block is executed immediately on +/// the current thread. Use that for the longest running block. +#[macro_export] +macro_rules! parallel { (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => { parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) }; @@ -139,92 +94,89 @@ mod enabled { }; } - // This function only works when `mode::is_dyn_thread_safe()`. - pub fn scope<'scope, OP, R>(op: OP) -> R - where - OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, - R: DynSend, - { - let op = FromDyn::from(op); - rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() - } +// This function only works when `mode::is_dyn_thread_safe()`. +pub fn scope<'scope, OP, R>(op: OP) -> R +where + OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, + R: DynSend, +{ + let op = FromDyn::from(op); + rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() +} - #[inline] - pub fn join(oper_a: A, oper_b: B) -> (RA, RB) - where - A: FnOnce() -> RA + DynSend, - B: FnOnce() -> RB + DynSend, - { - if mode::is_dyn_thread_safe() { - let oper_a = FromDyn::from(oper_a); - let oper_b = FromDyn::from(oper_b); - let (a, b) = parallel_guard(|guard| { - rayon::join( - move || guard.run(move || FromDyn::from(oper_a.into_inner()())), - move || guard.run(move || FromDyn::from(oper_b.into_inner()())), - ) - }); - (a.unwrap().into_inner(), b.unwrap().into_inner()) - } else { - super::disabled::join(oper_a, oper_b) - } - } - - use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; - - pub fn par_for_each_in + IntoParallelIterator>( - t: T, - for_each: impl Fn(I) + DynSync + DynSend, - ) { - parallel_guard(|guard| { - if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - t.into_par_iter().for_each(|i| { - guard.run(|| for_each(i)); - }); - } else { - t.into_iter().for_each(|i| { - guard.run(|| for_each(i)); - }); - } +#[inline] +pub fn join(oper_a: A, oper_b: B) -> (RA, RB) +where + A: FnOnce() -> RA + DynSend, + B: FnOnce() -> RB + DynSend, +{ + if mode::is_dyn_thread_safe() { + let oper_a = FromDyn::from(oper_a); + let oper_b = FromDyn::from(oper_b); + let (a, b) = parallel_guard(|guard| { + rayon::join( + move || guard.run(move || FromDyn::from(oper_a.into_inner()())), + move || guard.run(move || FromDyn::from(oper_b.into_inner()())), + ) }); - } - - pub fn try_par_for_each_in< - T: IntoIterator + IntoParallelIterator::Item>, - E: Send, - >( - t: T, - for_each: impl Fn(::Item) -> Result<(), E> + DynSync + DynSend, - ) -> Result<(), E> { - parallel_guard(|guard| { - if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - t.into_par_iter() - .filter_map(|i| guard.run(|| for_each(i))) - .reduce(|| Ok(()), Result::and) - } else { - t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and) - } - }) - } - - pub fn par_map< - I, - T: IntoIterator + IntoParallelIterator, - R: std::marker::Send, - C: FromIterator + FromParallelIterator, - >( - t: T, - map: impl Fn(I) -> R + DynSync + DynSend, - ) -> C { - parallel_guard(|guard| { - if mode::is_dyn_thread_safe() { - let map = FromDyn::from(map); - t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() - } else { - t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() - } - }) + (a.unwrap().into_inner(), b.unwrap().into_inner()) + } else { + serial_join(oper_a, oper_b) } } + +pub fn par_for_each_in + IntoParallelIterator>( + t: T, + for_each: impl Fn(I) + DynSync + DynSend, +) { + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let for_each = FromDyn::from(for_each); + t.into_par_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + } else { + t.into_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + } + }); +} + +pub fn try_par_for_each_in< + T: IntoIterator + IntoParallelIterator::Item>, + E: Send, +>( + t: T, + for_each: impl Fn(::Item) -> Result<(), E> + DynSync + DynSend, +) -> Result<(), E> { + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let for_each = FromDyn::from(for_each); + t.into_par_iter() + .filter_map(|i| guard.run(|| for_each(i))) + .reduce(|| Ok(()), Result::and) + } else { + t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and) + } + }) +} + +pub fn par_map< + I, + T: IntoIterator + IntoParallelIterator, + R: std::marker::Send, + C: FromIterator + FromParallelIterator, +>( + t: T, + map: impl Fn(I) -> R + DynSync + DynSend, +) -> C { + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let map = FromDyn::from(map); + t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() + } else { + t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() + } + }) +} diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs index 314496ce9f0..21ec5cf6c13 100644 --- a/compiler/rustc_data_structures/src/sync/vec.rs +++ b/compiler/rustc_data_structures/src/sync/vec.rs @@ -4,40 +4,23 @@ use rustc_index::Idx; #[derive(Default)] pub struct AppendOnlyIndexVec { - #[cfg(not(parallel_compiler))] - vec: elsa::vec::FrozenVec, - #[cfg(parallel_compiler)] vec: elsa::sync::LockFreeFrozenVec, _marker: PhantomData, } impl AppendOnlyIndexVec { pub fn new() -> Self { - Self { - #[cfg(not(parallel_compiler))] - vec: elsa::vec::FrozenVec::new(), - #[cfg(parallel_compiler)] - vec: elsa::sync::LockFreeFrozenVec::new(), - _marker: PhantomData, - } + Self { vec: elsa::sync::LockFreeFrozenVec::new(), _marker: PhantomData } } pub fn push(&self, val: T) -> I { - #[cfg(not(parallel_compiler))] - let i = self.vec.len(); - #[cfg(not(parallel_compiler))] - self.vec.push(val); - #[cfg(parallel_compiler)] let i = self.vec.push(val); I::new(i) } pub fn get(&self, i: I) -> Option { let i = i.index(); - #[cfg(not(parallel_compiler))] - return self.vec.get_copy(i); - #[cfg(parallel_compiler)] - return self.vec.get(i); + self.vec.get(i) } } diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index b6efcada10b..d75af009850 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -5,8 +5,9 @@ use std::ptr; use std::sync::Arc; use parking_lot::Mutex; -#[cfg(parallel_compiler)] -use {crate::outline, crate::sync::CacheAligned}; + +use crate::outline; +use crate::sync::CacheAligned; /// A pointer to the `RegistryData` which uniquely identifies a registry. /// This identifier can be reused if the registry gets freed. @@ -21,7 +22,6 @@ impl RegistryId { /// /// Note that there's a race possible where the identifier in `THREAD_DATA` could be reused /// so this can succeed from a different registry. - #[cfg(parallel_compiler)] fn verify(self) -> usize { let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get())); @@ -102,11 +102,7 @@ impl Registry { /// worker local value through the `Deref` impl on the registry associated with the thread it was /// created on. It will panic otherwise. pub struct WorkerLocal { - #[cfg(not(parallel_compiler))] - local: T, - #[cfg(parallel_compiler)] locals: Box<[CacheAligned]>, - #[cfg(parallel_compiler)] registry: Registry, } @@ -114,7 +110,6 @@ pub struct WorkerLocal { // or it will panic for threads without an associated local. So there isn't a need for `T` to do // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. -#[cfg(parallel_compiler)] unsafe impl Sync for WorkerLocal {} impl WorkerLocal { @@ -122,33 +117,17 @@ impl WorkerLocal { /// value this worker local should take for each thread in the registry. #[inline] pub fn new T>(mut initial: F) -> WorkerLocal { - #[cfg(parallel_compiler)] - { - let registry = Registry::current(); - WorkerLocal { - locals: (0..registry.0.thread_limit.get()) - .map(|i| CacheAligned(initial(i))) - .collect(), - registry, - } - } - #[cfg(not(parallel_compiler))] - { - WorkerLocal { local: initial(0) } + let registry = Registry::current(); + WorkerLocal { + locals: (0..registry.0.thread_limit.get()).map(|i| CacheAligned(initial(i))).collect(), + registry, } } /// Returns the worker-local values for each thread #[inline] pub fn into_inner(self) -> impl Iterator { - #[cfg(parallel_compiler)] - { - self.locals.into_vec().into_iter().map(|local| local.0) - } - #[cfg(not(parallel_compiler))] - { - std::iter::once(self.local) - } + self.locals.into_vec().into_iter().map(|local| local.0) } } @@ -156,13 +135,6 @@ impl Deref for WorkerLocal { type Target = T; #[inline(always)] - #[cfg(not(parallel_compiler))] - fn deref(&self) -> &T { - &self.local - } - - #[inline(always)] - #[cfg(parallel_compiler)] fn deref(&self) -> &T { // This is safe because `verify` will only return values less than // `self.registry.thread_limit` which is the size of the `self.locals` array. diff --git a/compiler/rustc_driver_impl/Cargo.toml b/compiler/rustc_driver_impl/Cargo.toml index ef577c03218..ccc84123bea 100644 --- a/compiler/rustc_driver_impl/Cargo.toml +++ b/compiler/rustc_driver_impl/Cargo.toml @@ -77,9 +77,4 @@ rustc_randomized_layouts = [ 'rustc_index/rustc_randomized_layouts', 'rustc_middle/rustc_randomized_layouts' ] -rustc_use_parallel_compiler = [ - 'rustc_data_structures/rustc_use_parallel_compiler', - 'rustc_interface/rustc_use_parallel_compiler', - 'rustc_middle/rustc_use_parallel_compiler' -] # tidy-alphabetical-end diff --git a/compiler/rustc_error_messages/Cargo.toml b/compiler/rustc_error_messages/Cargo.toml index 5b6b8b3f183..6974c12f994 100644 --- a/compiler/rustc_error_messages/Cargo.toml +++ b/compiler/rustc_error_messages/Cargo.toml @@ -19,8 +19,3 @@ rustc_span = { path = "../rustc_span" } tracing = "0.1" unic-langid = { version = "0.9.0", features = ["macros"] } # tidy-alphabetical-end - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ['rustc_baked_icu_data/rustc_use_parallel_compiler'] -# tidy-alphabetical-end diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs index 2ede7d805fa..74b6d63365a 100644 --- a/compiler/rustc_error_messages/src/lib.rs +++ b/compiler/rustc_error_messages/src/lib.rs @@ -8,12 +8,9 @@ // tidy-alphabetical-end use std::borrow::Cow; -#[cfg(not(parallel_compiler))] -use std::cell::LazyCell as Lazy; use std::error::Error; use std::path::{Path, PathBuf}; -#[cfg(parallel_compiler)] -use std::sync::LazyLock as Lazy; +use std::sync::LazyLock; use std::{fmt, fs, io}; use fluent_bundle::FluentResource; @@ -21,9 +18,6 @@ pub use fluent_bundle::types::FluentType; pub use fluent_bundle::{self, FluentArgs, FluentError, FluentValue}; use fluent_syntax::parser::ParserError; use icu_provider_adapters::fallback::{LocaleFallbackProvider, LocaleFallbacker}; -#[cfg(not(parallel_compiler))] -use intl_memoizer::IntlLangMemoizer; -#[cfg(parallel_compiler)] use intl_memoizer::concurrent::IntlLangMemoizer; use rustc_data_structures::sync::{IntoDynSyncSend, Lrc}; use rustc_macros::{Decodable, Encodable}; @@ -34,12 +28,6 @@ pub use unic_langid::{LanguageIdentifier, langid}; pub type FluentBundle = IntoDynSyncSend>; -#[cfg(not(parallel_compiler))] -fn new_bundle(locales: Vec) -> FluentBundle { - IntoDynSyncSend(fluent_bundle::bundle::FluentBundle::new(locales)) -} - -#[cfg(parallel_compiler)] fn new_bundle(locales: Vec) -> FluentBundle { IntoDynSyncSend(fluent_bundle::bundle::FluentBundle::new_concurrent(locales)) } @@ -217,7 +205,7 @@ fn register_functions(bundle: &mut FluentBundle) { /// Type alias for the result of `fallback_fluent_bundle` - a reference-counted pointer to a lazily /// evaluated fluent bundle. -pub type LazyFallbackBundle = Lrc FluentBundle>>; +pub type LazyFallbackBundle = Lrc FluentBundle>>; /// Return the default `FluentBundle` with standard "en-US" diagnostic messages. #[instrument(level = "trace", skip(resources))] @@ -225,7 +213,7 @@ pub fn fallback_fluent_bundle( resources: Vec<&'static str>, with_directionality_markers: bool, ) -> LazyFallbackBundle { - Lrc::new(Lazy::new(move || { + Lrc::new(LazyLock::new(move || { let mut fallback_bundle = new_bundle(vec![langid!("en-US")]); register_functions(&mut fallback_bundle); @@ -548,15 +536,6 @@ pub fn fluent_value_from_str_list_sep_by_and(l: Vec>) -> FluentValu Cow::Owned(result) } - #[cfg(not(parallel_compiler))] - fn as_string_threadsafe( - &self, - _intls: &intl_memoizer::concurrent::IntlLangMemoizer, - ) -> Cow<'static, str> { - unreachable!("`as_string_threadsafe` is not used in non-parallel rustc") - } - - #[cfg(parallel_compiler)] fn as_string_threadsafe( &self, intls: &intl_memoizer::concurrent::IntlLangMemoizer, diff --git a/compiler/rustc_errors/Cargo.toml b/compiler/rustc_errors/Cargo.toml index 41ebe4ae267..06bae57638f 100644 --- a/compiler/rustc_errors/Cargo.toml +++ b/compiler/rustc_errors/Cargo.toml @@ -36,8 +36,3 @@ features = [ "Win32_Security", "Win32_System_Threading", ] - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ['rustc_error_messages/rustc_use_parallel_compiler'] -# tidy-alphabetical-end diff --git a/compiler/rustc_errors/src/tests.rs b/compiler/rustc_errors/src/tests.rs index 70179237e5d..376fd24d57b 100644 --- a/compiler/rustc_errors/src/tests.rs +++ b/compiler/rustc_errors/src/tests.rs @@ -26,16 +26,11 @@ fn make_dummy(ftl: &'static str) -> Dummy { let langid_en = langid!("en-US"); - #[cfg(parallel_compiler)] let mut bundle: FluentBundle = IntoDynSyncSend(crate::fluent_bundle::bundle::FluentBundle::new_concurrent(vec![ langid_en, ])); - #[cfg(not(parallel_compiler))] - let mut bundle: FluentBundle = - IntoDynSyncSend(crate::fluent_bundle::bundle::FluentBundle::new(vec![langid_en])); - bundle.add_resource(resource).expect("Failed to add FTL resources to the bundle."); Dummy { bundle } diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml index b5abf145d6b..7a2ba07ce87 100644 --- a/compiler/rustc_interface/Cargo.toml +++ b/compiler/rustc_interface/Cargo.toml @@ -5,8 +5,8 @@ edition = "2021" [dependencies] # tidy-alphabetical-start -rustc-rayon = { version = "0.5.0", optional = true } -rustc-rayon-core = { version = "0.5.0", optional = true } +rustc-rayon = { version = "0.5.0" } +rustc-rayon-core = { version = "0.5.0" } rustc_ast = { path = "../rustc_ast" } rustc_ast_lowering = { path = "../rustc_ast_lowering" } rustc_ast_passes = { path = "../rustc_ast_passes" } @@ -54,10 +54,4 @@ tracing = "0.1" [features] # tidy-alphabetical-start llvm = ['dep:rustc_codegen_llvm'] -rustc_use_parallel_compiler = [ - 'dep:rustc-rayon', - 'dep:rustc-rayon-core', - 'rustc_query_impl/rustc_use_parallel_compiler', - 'rustc_errors/rustc_use_parallel_compiler' -] # tidy-alphabetical-end diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index 71e8accf5a3..266d2e5e8bc 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -6,7 +6,6 @@ use std::{env, iter, thread}; use rustc_ast as ast; use rustc_codegen_ssa::traits::CodegenBackend; -#[cfg(parallel_compiler)] use rustc_data_structures::sync; use rustc_metadata::{DylibError, load_symbol_from_dylib}; use rustc_middle::ty::CurrentGcx; @@ -117,19 +116,6 @@ fn run_in_thread_with_globals R + Send, R: Send>( }) } -#[cfg(not(parallel_compiler))] -pub(crate) fn run_in_thread_pool_with_globals R + Send, R: Send>( - thread_builder_diag: &EarlyDiagCtxt, - edition: Edition, - _threads: usize, - sm_inputs: SourceMapInputs, - f: F, -) -> R { - let thread_stack_size = init_stack_size(thread_builder_diag); - run_in_thread_with_globals(thread_stack_size, edition, sm_inputs, f) -} - -#[cfg(parallel_compiler)] pub(crate) fn run_in_thread_pool_with_globals R + Send, R: Send>( thread_builder_diag: &EarlyDiagCtxt, edition: Edition, diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index 485d1c14df3..3bda3a4aa63 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -11,7 +11,7 @@ either = "1.5.0" field-offset = "0.3.5" gsgdt = "0.1.2" polonius-engine = "0.13.0" -rustc-rayon-core = { version = "0.5.0", optional = true } +rustc-rayon-core = { version = "0.5.0" } rustc_abi = { path = "../rustc_abi" } rustc_apfloat = "0.2.0" rustc_arena = { path = "../rustc_arena" } @@ -43,5 +43,4 @@ tracing = "0.1" [features] # tidy-alphabetical-start rustc_randomized_layouts = [] -rustc_use_parallel_compiler = ["dep:rustc-rayon-core"] # tidy-alphabetical-end diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 9abad6d1a68..fcca240af7c 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -22,9 +22,9 @@ use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; -use rustc_data_structures::sync::{self, FreezeReadGuard, Lock, Lrc, RwLock, WorkerLocal}; -#[cfg(parallel_compiler)] -use rustc_data_structures::sync::{DynSend, DynSync}; +use rustc_data_structures::sync::{ + self, DynSend, DynSync, FreezeReadGuard, Lock, Lrc, RwLock, WorkerLocal, +}; use rustc_data_structures::unord::UnordSet; use rustc_errors::{ Applicability, Diag, DiagCtxtHandle, ErrorGuaranteed, LintDiagnostic, MultiSpan, @@ -1260,9 +1260,7 @@ pub struct TyCtxt<'tcx> { } // Explicitly implement `DynSync` and `DynSend` for `TyCtxt` to short circuit trait resolution. -#[cfg(parallel_compiler)] unsafe impl DynSend for TyCtxt<'_> {} -#[cfg(parallel_compiler)] unsafe impl DynSync for TyCtxt<'_> {} fn _assert_tcx_fields() { sync::assert_dyn_sync::<&'_ GlobalCtxt<'_>>(); @@ -1384,9 +1382,7 @@ pub struct CurrentGcx { value: Lrc>>, } -#[cfg(parallel_compiler)] unsafe impl DynSend for CurrentGcx {} -#[cfg(parallel_compiler)] unsafe impl DynSync for CurrentGcx {} impl CurrentGcx { diff --git a/compiler/rustc_middle/src/ty/context/tls.rs b/compiler/rustc_middle/src/ty/context/tls.rs index 6a5d3030646..eaab8474dd2 100644 --- a/compiler/rustc_middle/src/ty/context/tls.rs +++ b/compiler/rustc_middle/src/ty/context/tls.rs @@ -1,5 +1,3 @@ -#[cfg(not(parallel_compiler))] -use std::cell::Cell; use std::{mem, ptr}; use rustc_data_structures::sync::{self, Lock}; @@ -50,16 +48,8 @@ impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> { } // Import the thread-local variable from Rayon, which is preserved for Rayon jobs. -#[cfg(parallel_compiler)] use rayon_core::tlv::TLV; -// Otherwise define our own -#[cfg(not(parallel_compiler))] -thread_local! { - /// A thread local variable that stores a pointer to the current `ImplicitCtxt`. - static TLV: Cell<*const ()> = const { Cell::new(ptr::null()) }; -} - #[inline] fn erase(context: &ImplicitCtxt<'_, '_>) -> *const () { context as *const _ as *const () diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs index 737f1362b34..fd84d75b53f 100644 --- a/compiler/rustc_middle/src/ty/generic_args.rs +++ b/compiler/rustc_middle/src/ty/generic_args.rs @@ -143,12 +143,10 @@ impl<'tcx> rustc_type_ir::inherent::IntoKind for GenericArg<'tcx> { } } -#[cfg(parallel_compiler)] unsafe impl<'tcx> rustc_data_structures::sync::DynSend for GenericArg<'tcx> where &'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSend { } -#[cfg(parallel_compiler)] unsafe impl<'tcx> rustc_data_structures::sync::DynSync for GenericArg<'tcx> where &'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSync { diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs index ea07cd3a1b9..30a5586f59c 100644 --- a/compiler/rustc_middle/src/ty/list.rs +++ b/compiler/rustc_middle/src/ty/list.rs @@ -5,7 +5,6 @@ use std::ops::Deref; use std::{fmt, iter, mem, ptr, slice}; use rustc_data_structures::aligned::{Aligned, align_of}; -#[cfg(parallel_compiler)] use rustc_data_structures::sync::DynSync; use rustc_serialize::{Encodable, Encoder}; @@ -259,7 +258,6 @@ impl<'a, H, T: Copy> IntoIterator for &'a RawList { unsafe impl Sync for RawList {} // We need this since `List` uses extern type `OpaqueListContents`. -#[cfg(parallel_compiler)] unsafe impl DynSync for RawList {} // Safety: diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index dac81a6dfbb..39e295ca521 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -496,12 +496,10 @@ impl<'tcx> rustc_type_ir::inherent::IntoKind for Term<'tcx> { } } -#[cfg(parallel_compiler)] unsafe impl<'tcx> rustc_data_structures::sync::DynSend for Term<'tcx> where &'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSend { } -#[cfg(parallel_compiler)] unsafe impl<'tcx> rustc_data_structures::sync::DynSync for Term<'tcx> where &'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSync { diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index 2bb1be22b98..6e8fd32610b 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -19,8 +19,3 @@ rustc_span = { path = "../rustc_span" } thin-vec = "0.2.12" tracing = "0.1" # tidy-alphabetical-end - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ["rustc_query_system/rustc_use_parallel_compiler"] -# tidy-alphabetical-end diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml index 2f42fa47728..96b210accdb 100644 --- a/compiler/rustc_query_system/Cargo.toml +++ b/compiler/rustc_query_system/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] # tidy-alphabetical-start parking_lot = "0.12" -rustc-rayon-core = { version = "0.5.0", optional = true } +rustc-rayon-core = { version = "0.5.0" } rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } @@ -23,8 +23,3 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } thin-vec = "0.2.12" tracing = "0.1" # tidy-alphabetical-end - -[features] -# tidy-alphabetical-start -rustc_use_parallel_compiler = ["dep:rustc-rayon-core"] -# tidy-alphabetical-end diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 5e30f17d626..d806e995d1b 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -837,12 +837,6 @@ impl DepGraphData { ) -> Option { let frame = MarkFrame { index: prev_dep_node_index, parent: frame }; - #[cfg(not(parallel_compiler))] - { - debug_assert!(!self.dep_node_exists(dep_node)); - debug_assert!(self.colors.get(prev_dep_node_index).is_none()); - } - // We never try to mark eval_always nodes as green debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); @@ -871,13 +865,6 @@ impl DepGraphData { // Maybe store a list on disk and encode this fact in the DepNodeState let side_effects = qcx.load_side_effects(prev_dep_node_index); - #[cfg(not(parallel_compiler))] - debug_assert!( - self.colors.get(prev_dep_node_index).is_none(), - "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ - insertion for {dep_node:?}" - ); - if side_effects.maybe_any() { qcx.dep_context().dep_graph().with_query_deserialization(|| { self.emit_side_effects(qcx, dep_node_index, side_effects) diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 5af41b9e687..2a7d759ab35 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -1,21 +1,16 @@ use std::hash::Hash; use std::io::Write; +use std::iter; use std::num::NonZero; +use std::sync::Arc; -use rustc_data_structures::fx::FxHashMap; +use parking_lot::{Condvar, Mutex}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::jobserver; use rustc_errors::{Diag, DiagCtxtHandle}; use rustc_hir::def::DefKind; use rustc_session::Session; -use rustc_span::Span; -#[cfg(parallel_compiler)] -use { - parking_lot::{Condvar, Mutex}, - rustc_data_structures::fx::FxHashSet, - rustc_data_structures::jobserver, - rustc_span::DUMMY_SP, - std::iter, - std::sync::Arc, -}; +use rustc_span::{DUMMY_SP, Span}; use crate::dep_graph::DepContext; use crate::error::CycleStack; @@ -41,17 +36,14 @@ impl QueryJobId { map.get(&self).unwrap().query.clone() } - #[cfg(parallel_compiler)] fn span(self, map: &QueryMap) -> Span { map.get(&self).unwrap().job.span } - #[cfg(parallel_compiler)] fn parent(self, map: &QueryMap) -> Option { map.get(&self).unwrap().job.parent } - #[cfg(parallel_compiler)] fn latch(self, map: &QueryMap) -> Option<&QueryLatch> { map.get(&self).unwrap().job.latch.as_ref() } @@ -75,7 +67,6 @@ pub struct QueryJob { pub parent: Option, /// The latch that is used to wait on this job. - #[cfg(parallel_compiler)] latch: Option, } @@ -83,16 +74,9 @@ impl QueryJob { /// Creates a new query job. #[inline] pub fn new(id: QueryJobId, span: Span, parent: Option) -> Self { - QueryJob { - id, - span, - parent, - #[cfg(parallel_compiler)] - latch: None, - } + QueryJob { id, span, parent, latch: None } } - #[cfg(parallel_compiler)] pub(super) fn latch(&mut self) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); @@ -106,11 +90,8 @@ impl QueryJob { /// as there are no concurrent jobs which could be waiting on us #[inline] pub fn signal_complete(self) { - #[cfg(parallel_compiler)] - { - if let Some(latch) = self.latch { - latch.set(); - } + if let Some(latch) = self.latch { + latch.set(); } } } @@ -176,7 +157,6 @@ impl QueryJobId { } } -#[cfg(parallel_compiler)] #[derive(Debug)] struct QueryWaiter { query: Option, @@ -185,7 +165,6 @@ struct QueryWaiter { cycle: Mutex>, } -#[cfg(parallel_compiler)] impl QueryWaiter { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); @@ -193,20 +172,17 @@ impl QueryWaiter { } } -#[cfg(parallel_compiler)] #[derive(Debug)] struct QueryLatchInfo { complete: bool, waiters: Vec>, } -#[cfg(parallel_compiler)] #[derive(Clone, Debug)] pub(super) struct QueryLatch { info: Arc>, } -#[cfg(parallel_compiler)] impl QueryLatch { fn new() -> Self { QueryLatch { @@ -273,7 +249,6 @@ impl QueryLatch { } /// A resumable waiter of a query. The usize is the index into waiters in the query's latch -#[cfg(parallel_compiler)] type Waiter = (QueryJobId, usize); /// Visits all the non-resumable and resumable waiters of a query. @@ -285,7 +260,6 @@ type Waiter = (QueryJobId, usize); /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. -#[cfg(parallel_compiler)] fn visit_waiters(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option> where F: FnMut(Span, QueryJobId) -> Option>, @@ -316,7 +290,6 @@ where /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. -#[cfg(parallel_compiler)] fn cycle_check( query_map: &QueryMap, query: QueryJobId, @@ -357,7 +330,6 @@ fn cycle_check( /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. -#[cfg(parallel_compiler)] fn connected_to_root( query_map: &QueryMap, query: QueryJobId, @@ -380,7 +352,6 @@ fn connected_to_root( } // Deterministically pick an query from a list -#[cfg(parallel_compiler)] fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T where F: Fn(&T) -> (Span, QueryJobId), @@ -406,7 +377,6 @@ where /// the function return true. /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. -#[cfg(parallel_compiler)] fn remove_cycle( query_map: &QueryMap, jobs: &mut Vec, @@ -511,7 +481,6 @@ fn remove_cycle( /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. -#[cfg(parallel_compiler)] pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let mut jobs: Vec = query_map.keys().cloned().collect(); diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 3e35fdb77b3..b81386f06ec 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -2,10 +2,9 @@ mod plumbing; pub use self::plumbing::*; mod job; -#[cfg(parallel_compiler)] -pub use self::job::break_query_cycles; pub use self::job::{ - QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, print_query_stack, report_cycle, + QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack, + report_cycle, }; mod caches; @@ -38,7 +37,6 @@ pub struct QueryStackFrame { pub dep_kind: DepKind, /// This hash is used to deterministically pick /// a query to remove cycles in the parallel compiler. - #[cfg(parallel_compiler)] hash: Hash64, } @@ -51,18 +49,9 @@ impl QueryStackFrame { def_kind: Option, dep_kind: DepKind, ty_def_id: Option, - _hash: impl FnOnce() -> Hash64, + hash: impl FnOnce() -> Hash64, ) -> Self { - Self { - description, - span, - def_id, - def_kind, - ty_def_id, - dep_kind, - #[cfg(parallel_compiler)] - hash: _hash(), - } + Self { description, span, def_id, def_kind, ty_def_id, dep_kind, hash: hash() } } // FIXME(eddyb) Get more valid `Span`s on queries. diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 17486be04dc..aac8ab87c64 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -13,7 +13,6 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; -#[cfg(parallel_compiler)] use rustc_data_structures::{outline, sync}; use rustc_errors::{Diag, FatalError, StashKey}; use rustc_span::{DUMMY_SP, Span}; @@ -25,9 +24,7 @@ use crate::HandleCycleError; use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; -#[cfg(parallel_compiler)] -use crate::query::job::QueryLatch; -use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, report_cycle}; +use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle}; use crate::query::{ QueryContext, QueryMap, QuerySideEffects, QueryStackFrame, SerializedDepNodeIndex, }; @@ -263,7 +260,6 @@ where } #[inline(always)] -#[cfg(parallel_compiler)] fn wait_for_query( query: Q, qcx: Qcx, @@ -334,7 +330,7 @@ where // re-executing the query since `try_start` only checks that the query is not currently // executing, but another thread may have already completed the query and stores it result // in the query cache. - if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 { + if qcx.dep_context().sess().threads() > 1 { if let Some((value, index)) = query.query_cache(qcx).lookup(&key) { qcx.dep_context().profiler().query_cache_hit(index.into()); return (value, Some(index)); @@ -359,7 +355,6 @@ where Entry::Occupied(mut entry) => { match entry.get_mut() { QueryResult::Started(job) => { - #[cfg(parallel_compiler)] if sync::is_dyn_thread_safe() { // Get the latch out let latch = job.latch(); diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index c57d516fcae..a9ca5dbe509 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -1300,7 +1300,6 @@ pub fn register_expn_id( let expn_id = ExpnId { krate, local_id }; HygieneData::with(|hygiene_data| { let _old_data = hygiene_data.foreign_expn_data.insert(expn_id, data); - debug_assert!(_old_data.is_none() || cfg!(parallel_compiler)); let _old_hash = hygiene_data.foreign_expn_hashes.insert(expn_id, hash); debug_assert!(_old_hash.is_none() || _old_hash == Some(hash)); let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id); @@ -1423,18 +1422,7 @@ pub fn decode_syntax_context SyntaxContext ctxt_data = old.clone(); } - let dummy = std::mem::replace( - &mut hygiene_data.syntax_context_data[ctxt.as_u32() as usize], - ctxt_data, - ); - if cfg!(not(parallel_compiler)) { - // Make sure nothing weird happened while `decode_data` was running. - // We used `kw::Empty` for the dummy value and we expect nothing to be - // modifying the dummy entry. - // This does not hold for the parallel compiler as another thread may - // have inserted the fully decoded data. - assert_eq!(dummy.dollar_crate_name, kw::Empty); - } + hygiene_data.syntax_context_data[ctxt.as_u32() as usize] = ctxt_data; }); // Mark the context as completed diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index 5b1be5bca05..0274a757966 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -521,14 +521,6 @@ impl SpanData { } } -// The interner is pointed to by a thread local value which is only set on the main thread -// with parallelization is disabled. So we don't allow `Span` to transfer between threads -// to avoid panics and other errors, even though it would be memory safe to do so. -#[cfg(not(parallel_compiler))] -impl !Send for Span {} -#[cfg(not(parallel_compiler))] -impl !Sync for Span {} - impl PartialOrd for Span { fn partial_cmp(&self, rhs: &Self) -> Option { PartialOrd::partial_cmp(&self.data(), &rhs.data()) diff --git a/config.example.toml b/config.example.toml index cd7ec6a05bc..5e4e0156909 100644 --- a/config.example.toml +++ b/config.example.toml @@ -594,8 +594,7 @@ # Build a multi-threaded rustc. This allows users to use parallel rustc # via the unstable option `-Z threads=n`. -# Since stable/beta channels only allow using stable features, -# `parallel-compiler = false` should be set for these channels. +# This option is deprecated and always true. #parallel-compiler = true # The default linker that will be hard-coded into the generated diff --git a/src/bootstrap/src/core/builder/cargo.rs b/src/bootstrap/src/core/builder/cargo.rs index c3d0994f164..7b93a68dedc 100644 --- a/src/bootstrap/src/core/builder/cargo.rs +++ b/src/bootstrap/src/core/builder/cargo.rs @@ -1198,15 +1198,6 @@ impl Builder<'_> { rustflags.arg("-Zinline-mir-preserve-debug"); } - if self.config.rustc_parallel - && matches!(mode, Mode::ToolRustc | Mode::Rustc | Mode::Codegen) - { - // keep in sync with `bootstrap/lib.rs:Build::rustc_features` - // `cfg` option for rustc, `features` option for cargo, for conditional compilation - rustflags.arg("--cfg=parallel_compiler"); - rustdocflags.arg("--cfg=parallel_compiler"); - } - Cargo { command: cargo, compiler, diff --git a/src/bootstrap/src/core/config/config.rs b/src/bootstrap/src/core/config/config.rs index 8115aea033d..13ee664302c 100644 --- a/src/bootstrap/src/core/config/config.rs +++ b/src/bootstrap/src/core/config/config.rs @@ -276,7 +276,6 @@ pub struct Config { pub rust_strip: bool, pub rust_frame_pointers: bool, pub rust_stack_protector: Option, - pub rustc_parallel: bool, pub rustc_default_linker: Option, pub rust_optimize_tests: bool, pub rust_dist_src: bool, @@ -1222,7 +1221,6 @@ impl Config { bindir: "bin".into(), dist_include_mingw_linker: true, dist_compression_profile: "fast".into(), - rustc_parallel: true, stdout_is_tty: std::io::stdout().is_terminal(), stderr_is_tty: std::io::stderr().is_terminal(), @@ -1771,8 +1769,14 @@ impl Config { config.rust_randomize_layout = randomize_layout.unwrap_or_default(); config.llvm_tools_enabled = llvm_tools.unwrap_or(true); - config.rustc_parallel = - parallel_compiler.unwrap_or(config.channel == "dev" || config.channel == "nightly"); + + // FIXME: Remove this option at the end of 2024. + if parallel_compiler.is_some() { + println!( + "WARNING: The `rust.parallel-compiler` option is deprecated and does nothing. The parallel compiler (with one thread) is now the default" + ); + } + config.llvm_enzyme = llvm_enzyme.unwrap_or(config.channel == "dev" || config.channel == "nightly"); config.rustc_default_linker = default_linker; diff --git a/src/bootstrap/src/lib.rs b/src/bootstrap/src/lib.rs index ba74cabcd30..c384fd6bf43 100644 --- a/src/bootstrap/src/lib.rs +++ b/src/bootstrap/src/lib.rs @@ -80,11 +80,8 @@ const EXTRA_CHECK_CFGS: &[(Option, &str, Option<&[&'static str]>)] = &[ (Some(Mode::Rustc), "llvm_enzyme", None), (Some(Mode::Codegen), "llvm_enzyme", None), (Some(Mode::ToolRustc), "llvm_enzyme", None), - (Some(Mode::Rustc), "parallel_compiler", None), - (Some(Mode::ToolRustc), "parallel_compiler", None), (Some(Mode::ToolRustc), "rust_analyzer", None), (Some(Mode::ToolStd), "rust_analyzer", None), - (Some(Mode::Codegen), "parallel_compiler", None), // Any library specific cfgs like `target_os`, `target_arch` should be put in // priority the `[lints.rust.unexpected_cfgs.check-cfg]` table // in the appropriate `library/{std,alloc,core}/Cargo.toml` @@ -695,9 +692,6 @@ impl Build { features.push("llvm"); } // keep in sync with `bootstrap/compile.rs:rustc_cargo_env` - if self.config.rustc_parallel && check("rustc_use_parallel_compiler") { - features.push("rustc_use_parallel_compiler"); - } if self.config.rust_randomize_layout { features.push("rustc_randomized_layouts"); } diff --git a/src/bootstrap/src/utils/change_tracker.rs b/src/bootstrap/src/utils/change_tracker.rs index 0915ec15a2f..1d05f94e3be 100644 --- a/src/bootstrap/src/utils/change_tracker.rs +++ b/src/bootstrap/src/utils/change_tracker.rs @@ -290,6 +290,11 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[ severity: ChangeSeverity::Info, summary: "New option `llvm.offload` to control whether the llvm offload runtime for GPU support is built. Implicitly enables the openmp runtime as dependency.", }, + ChangeInfo { + change_id: 132282, + severity: ChangeSeverity::Warning, + summary: "Deprecated `rust.parallel_compiler` as the compiler now always defaults to being parallel (with 1 thread)", + }, ChangeInfo { change_id: 132494, severity: ChangeSeverity::Info, diff --git a/src/ci/run.sh b/src/ci/run.sh index 8e2f525db68..d9c58d9a02a 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -120,9 +120,6 @@ if [ "$DEPLOY$DEPLOY_ALT" = "1" ]; then if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions" elif [ "$DEPLOY_ALT" != "" ]; then - if [ "$ALT_PARALLEL_COMPILER" = "" ]; then - RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.parallel-compiler=false" - fi RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-assertions" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.verify-llvm-ir" fi @@ -261,7 +258,7 @@ fi if [ "$RUN_CHECK_WITH_PARALLEL_QUERIES" != "" ]; then rm -f config.toml - $SRC/configure --set change-id=99999999 --set rust.parallel-compiler + $SRC/configure --set change-id=99999999 # Save the build metrics before we wipe the directory if [ "$HAS_METRICS" = 1 ]; then diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index 7c9dcd41e6a..a8f8da1fc89 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -206,7 +206,7 @@ fn init_logging(early_dcx: &EarlyDiagCtxt) { .with_verbose_exit(true) .with_verbose_entry(true) .with_indent_amount(2); - #[cfg(all(parallel_compiler, debug_assertions))] + #[cfg(debug_assertions)] let layer = layer.with_thread_ids(true).with_thread_names(true); use tracing_subscriber::layer::SubscriberExt;