Rollup merge of #97062 - bjorn3:cg_ssa_driver_refactor, r=compiler-errors

Couple of refactorings to cg_ssa::base::codegen_crate

This makes the code simpler and easier to read.
This commit is contained in:
Yuki Okushi 2022-05-19 08:22:42 +09:00 committed by GitHub
commit 330e03f682
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -15,8 +15,9 @@ use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry}; use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
use rustc_data_structures::sync::par_iter;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use rustc_data_structures::sync::{par_iter, ParallelIterator}; use rustc_data_structures::sync::ParallelIterator;
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem; use rustc_hir::lang_items::LangItem;
@ -607,6 +608,14 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
second_half.iter().rev().interleave(first_half).copied().collect() second_half.iter().rev().interleave(first_half).copied().collect()
}; };
// Calculate the CGU reuse
let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
});
let mut total_codegen_time = Duration::new(0, 0);
let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
// The non-parallel compiler can only translate codegen units to LLVM IR // The non-parallel compiler can only translate codegen units to LLVM IR
// on a single thread, leading to a staircase effect where the N LLVM // on a single thread, leading to a staircase effect where the N LLVM
// threads have to wait on the single codegen threads to generate work // threads have to wait on the single codegen threads to generate work
@ -617,8 +626,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
// This likely is a temporary measure. Once we don't have to support the // This likely is a temporary measure. Once we don't have to support the
// non-parallel compiler anymore, we can compile CGUs end-to-end in // non-parallel compiler anymore, we can compile CGUs end-to-end in
// parallel and get rid of the complicated scheduling logic. // parallel and get rid of the complicated scheduling logic.
#[cfg(parallel_compiler)] let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
tcx.sess.time("compile_first_CGU_batch", || { tcx.sess.time("compile_first_CGU_batch", || {
// Try to find one CGU to compile per thread. // Try to find one CGU to compile per thread.
let cgus: Vec<_> = cgu_reuse let cgus: Vec<_> = cgu_reuse
@ -638,48 +646,31 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
}) })
.collect(); .collect();
(pre_compiled_cgus, start_time.elapsed()) total_codegen_time += start_time.elapsed();
pre_compiled_cgus
}) })
} else {
FxHashMap::default()
}; };
#[cfg(not(parallel_compiler))]
let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
let mut cgu_reuse = Vec::new();
let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
let mut total_codegen_time = Duration::new(0, 0);
let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
for (i, cgu) in codegen_units.iter().enumerate() { for (i, cgu) in codegen_units.iter().enumerate() {
ongoing_codegen.wait_for_signal_to_codegen_item(); ongoing_codegen.wait_for_signal_to_codegen_item();
ongoing_codegen.check_for_errors(tcx.sess); ongoing_codegen.check_for_errors(tcx.sess);
// Do some setup work in the first iteration
if pre_compiled_cgus.is_none() {
// Calculate the CGU reuse
cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
});
// Pre compile some CGUs
let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
pre_compiled_cgus = Some(compiled_cgus);
total_codegen_time += codegen_time;
}
let cgu_reuse = cgu_reuse[i]; let cgu_reuse = cgu_reuse[i];
tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse); tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
match cgu_reuse { match cgu_reuse {
CguReuse::No => { CguReuse::No => {
let (module, cost) = let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) { cgu
cgu } else {
} else { let start_time = Instant::now();
let start_time = Instant::now(); let module = backend.compile_codegen_unit(tcx, cgu.name());
let module = backend.compile_codegen_unit(tcx, cgu.name()); total_codegen_time += start_time.elapsed();
total_codegen_time += start_time.elapsed(); module
module };
};
// This will unwind if there are errors, which triggers our `AbortCodegenOnDrop` // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
// guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
// compilation hang on post-monomorphization errors. // compilation hang on post-monomorphization errors.