diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs index 2da72bfc292..572a4fc6971 100644 --- a/compiler/rustc_incremental/src/persist/file_format.rs +++ b/compiler/rustc_incremental/src/persist/file_format.rs @@ -52,7 +52,10 @@ where // Delete the old file, if any. // Note: It's important that we actually delete the old file and not just // truncate and overwrite it, since it might be a shared hard-link, the - // underlying data of which we don't want to modify + // underlying data of which we don't want to modify. + // + // We have to ensure we have dropped the memory maps to this file + // before performing this removal. match fs::remove_file(&path_buf) { Ok(()) => { debug!("save: remove old file"); @@ -114,6 +117,12 @@ pub fn read_file( Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None), Err(err) => return Err(err), }; + // SAFETY: This process must not modify nor remove the backing file while the memory map lives. + // For the dep-graph and the work product index, it is as soon as the decoding is done. + // For the query result cache, the memory map is dropped in save_dep_graph before calling + // save_in and trying to remove the backing file. + // + // There is no way to prevent another process from modifying this file. let mmap = unsafe { Mmap::map(file) }?; let mut file = io::Cursor::new(&*mmap); diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 2feba71e010..6c683058b12 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -42,6 +42,11 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) { join( move || { sess.time("incr_comp_persist_result_cache", || { + // Drop the memory map so that we can remove the file and write to it. + if let Some(odc) = &tcx.on_disk_cache { + odc.drop_serialized_data(tcx); + } + file_format::save_in(sess, query_cache_path, "query cache", |e| { encode_query_cache(tcx, e) }); diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 08990cb7baf..dae82d2438a 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -101,6 +101,8 @@ pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync { fn register_reused_dep_node(&self, tcx: TyCtxt<'tcx>, dep_node: &DepNode); fn store_foreign_def_id_hash(&self, def_id: DefId, hash: DefPathHash); + fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>); + fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: &mut FileEncoder) -> FileEncodeResult; } diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs index e696df85b88..c197962fabb 100644 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ b/compiler/rustc_query_impl/src/on_disk_cache.rs @@ -1,7 +1,7 @@ use crate::QueryCtxt; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; use rustc_data_structures::memmap::Mmap; -use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, OnceCell}; +use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, OnceCell, RwLock}; use rustc_data_structures::unhash::UnhashMap; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE}; use rustc_hir::definitions::DefPathHash; @@ -43,7 +43,7 @@ const TAG_EXPN_DATA: u8 = 1; /// any side effects that have been emitted during a query. pub struct OnDiskCache<'sess> { // The complete cache data in serialized form. - serialized_data: Option, + serialized_data: RwLock>, // Collects all `QuerySideEffects` created during the current compilation // session. @@ -206,7 +206,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { }; Self { - serialized_data: Some(data), + serialized_data: RwLock::new(Some(data)), file_index_to_stable_id: footer.file_index_to_stable_id, file_index_to_file: Default::default(), cnum_map: OnceCell::new(), @@ -227,7 +227,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { fn new_empty(source_map: &'sess SourceMap) -> Self { Self { - serialized_data: None, + serialized_data: RwLock::new(None), file_index_to_stable_id: Default::default(), file_index_to_file: Default::default(), cnum_map: OnceCell::new(), @@ -246,7 +246,26 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { } } - fn serialize(&self, tcx: TyCtxt<'sess>, encoder: &mut FileEncoder) -> FileEncodeResult { + fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>) { + // Register any dep nodes that we reused from the previous session, + // but didn't `DepNode::construct` in this session. This ensures + // that their `DefPathHash` to `RawDefId` mappings are registered + // in 'latest_foreign_def_path_hashes' if necessary, since that + // normally happens in `DepNode::construct`. + tcx.dep_graph.register_reused_dep_nodes(tcx); + + // Load everything into memory so we can write it out to the on-disk + // cache. The vast majority of cacheable query results should already + // be in memory, so this should be a cheap operation. + // Do this *before* we clone 'latest_foreign_def_path_hashes', since + // loading existing queries may cause us to create new DepNodes, which + // may in turn end up invoking `store_foreign_def_id_hash` + tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx)); + + *self.serialized_data.write() = None; + } + + fn serialize<'tcx>(&self, tcx: TyCtxt<'tcx>, encoder: &mut FileEncoder) -> FileEncodeResult { // Serializing the `DepGraph` should not modify it. tcx.dep_graph.with_ignore(|| { // Allocate `SourceFileIndex`es. @@ -268,21 +287,6 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { (file_to_file_index, file_index_to_stable_id) }; - // Register any dep nodes that we reused from the previous session, - // but didn't `DepNode::construct` in this session. This ensures - // that their `DefPathHash` to `RawDefId` mappings are registered - // in 'latest_foreign_def_path_hashes' if necessary, since that - // normally happens in `DepNode::construct`. - tcx.dep_graph.register_reused_dep_nodes(tcx); - - // Load everything into memory so we can write it out to the on-disk - // cache. The vast majority of cacheable query results should already - // be in memory, so this should be a cheap operation. - // Do this *before* we clone 'latest_foreign_def_path_hashes', since - // loading existing queries may cause us to create new DepNodes, which - // may in turn end up invoking `store_foreign_def_id_hash` - tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx)); - let latest_foreign_def_path_hashes = self.latest_foreign_def_path_hashes.lock().clone(); let hygiene_encode_context = HygieneEncodeContext::default(); @@ -566,7 +570,7 @@ impl<'sess> OnDiskCache<'sess> { }) } - fn with_decoder<'a, 'tcx, T, F: FnOnce(&mut CacheDecoder<'sess, 'tcx>) -> T>( + fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>( &'sess self, tcx: TyCtxt<'tcx>, pos: AbsoluteBytePos, @@ -577,12 +581,10 @@ impl<'sess> OnDiskCache<'sess> { { let cnum_map = self.cnum_map.get_or_init(|| Self::compute_cnum_map(tcx)); + let serialized_data = self.serialized_data.read(); let mut decoder = CacheDecoder { tcx, - opaque: opaque::Decoder::new( - self.serialized_data.as_deref().unwrap_or(&[]), - pos.to_usize(), - ), + opaque: opaque::Decoder::new(serialized_data.as_deref().unwrap_or(&[]), pos.to_usize()), source_map: self.source_map, cnum_map, file_index_to_file: &self.file_index_to_file,