Actually cache goals

This commit is contained in:
Michael Goulet 2023-02-24 02:35:15 +00:00
parent 67698aa6ad
commit d21e4d8411
3 changed files with 73 additions and 78 deletions

View File

@ -17,6 +17,7 @@ use crate::mir::{
}; };
use crate::thir::Thir; use crate::thir::Thir;
use crate::traits; use crate::traits;
use crate::traits::solve;
use crate::traits::solve::{ExternalConstraints, ExternalConstraintsData}; use crate::traits::solve::{ExternalConstraints, ExternalConstraintsData};
use crate::ty::query::{self, TyCtxtAt}; use crate::ty::query::{self, TyCtxtAt};
use crate::ty::{ use crate::ty::{
@ -537,6 +538,9 @@ pub struct GlobalCtxt<'tcx> {
/// Merge this with `selection_cache`? /// Merge this with `selection_cache`?
pub evaluation_cache: traits::EvaluationCache<'tcx>, pub evaluation_cache: traits::EvaluationCache<'tcx>,
/// Caches the results of goal evaluation in the new solver.
pub new_solver_evaluation_cache: solve::EvaluationCache<'tcx>,
/// Data layout specification for the current target. /// Data layout specification for the current target.
pub data_layout: TargetDataLayout, pub data_layout: TargetDataLayout,
@ -712,6 +716,7 @@ impl<'tcx> TyCtxt<'tcx> {
pred_rcache: Default::default(), pred_rcache: Default::default(),
selection_cache: Default::default(), selection_cache: Default::default(),
evaluation_cache: Default::default(), evaluation_cache: Default::default(),
new_solver_evaluation_cache: Default::default(),
data_layout, data_layout,
alloc_map: Lock::new(interpret::AllocMap::new()), alloc_map: Lock::new(interpret::AllocMap::new()),
} }

View File

@ -8,12 +8,10 @@
//! //!
//! FIXME(@lcnr): Write that section, feel free to ping me if you need help here //! FIXME(@lcnr): Write that section, feel free to ping me if you need help here
//! before then or if I still haven't done that before January 2023. //! before then or if I still haven't done that before January 2023.
use super::overflow::OverflowData;
use super::StackDepth; use super::StackDepth;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::IndexVec; use rustc_index::vec::IndexVec;
use rustc_middle::traits::solve::{CanonicalGoal, QueryResult}; use rustc_middle::traits::solve::{CanonicalGoal, QueryResult};
use rustc_middle::ty::TyCtxt;
rustc_index::newtype_index! { rustc_index::newtype_index! {
pub struct EntryIndex {} pub struct EntryIndex {}
@ -98,26 +96,3 @@ impl<'tcx> ProvisionalCache<'tcx> {
self.entries[entry_index].response self.entries[entry_index].response
} }
} }
pub(super) fn try_move_finished_goal_to_global_cache<'tcx>(
tcx: TyCtxt<'tcx>,
overflow_data: &mut OverflowData,
stack: &IndexVec<super::StackDepth, super::StackElem<'tcx>>,
goal: CanonicalGoal<'tcx>,
response: QueryResult<'tcx>,
) {
// We move goals to the global cache if we either did not hit an overflow or if it's
// the root goal as that will now always hit the same overflow limit.
//
// NOTE: We cannot move any non-root goals to the global cache even if their final result
// isn't impacted by the overflow as that goal still has unstable query dependencies
// because it didn't go its full depth.
//
// FIXME(@lcnr): We could still cache subtrees which are not impacted by overflow though.
// Tracking that info correctly isn't trivial, so I haven't implemented it for now.
let should_cache_globally = !overflow_data.did_overflow() || stack.is_empty();
if should_cache_globally {
// FIXME: move the provisional entry to the global cache.
let _ = (tcx, goal, response);
}
}

View File

@ -6,6 +6,7 @@ pub(super) use crate::solve::search_graph::overflow::OverflowHandler;
use cache::ProvisionalCache; use cache::ProvisionalCache;
use overflow::OverflowData; use overflow::OverflowData;
use rustc_index::vec::IndexVec; use rustc_index::vec::IndexVec;
use rustc_middle::dep_graph::DepKind;
use rustc_middle::traits::solve::{CanonicalGoal, Certainty, MaybeCause, QueryResult}; use rustc_middle::traits::solve::{CanonicalGoal, Certainty, MaybeCause, QueryResult};
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
use std::{collections::hash_map::Entry, mem}; use std::{collections::hash_map::Entry, mem};
@ -139,10 +140,9 @@ impl<'tcx> SearchGraph<'tcx> {
/// updated the provisional cache and we have to recompute the current goal. /// updated the provisional cache and we have to recompute the current goal.
/// ///
/// FIXME: Refer to the rustc-dev-guide entry once it exists. /// FIXME: Refer to the rustc-dev-guide entry once it exists.
#[instrument(level = "debug", skip(self, tcx, actual_goal), ret)] #[instrument(level = "debug", skip(self, actual_goal), ret)]
fn try_finalize_goal( fn try_finalize_goal(
&mut self, &mut self,
tcx: TyCtxt<'tcx>,
actual_goal: CanonicalGoal<'tcx>, actual_goal: CanonicalGoal<'tcx>,
response: QueryResult<'tcx>, response: QueryResult<'tcx>,
) -> bool { ) -> bool {
@ -176,72 +176,87 @@ impl<'tcx> SearchGraph<'tcx> {
self.stack.push(StackElem { goal, has_been_used: false }); self.stack.push(StackElem { goal, has_been_used: false });
false false
} else { } else {
self.try_move_finished_goal_to_global_cache(tcx, stack_elem);
true true
} }
} }
fn try_move_finished_goal_to_global_cache(
&mut self,
tcx: TyCtxt<'tcx>,
stack_elem: StackElem<'tcx>,
) {
let StackElem { goal, .. } = stack_elem;
let cache = &mut self.provisional_cache;
let provisional_entry_index = *cache.lookup_table.get(&goal).unwrap();
let provisional_entry = &mut cache.entries[provisional_entry_index];
let depth = provisional_entry.depth;
// If not, we're done with this goal.
//
// Check whether that this goal doesn't depend on a goal deeper on the stack
// and if so, move it and all nested goals to the global cache.
//
// Note that if any nested goal were to depend on something deeper on the stack,
// this would have also updated the depth of the current goal.
if depth == self.stack.next_index() {
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
let actual_index = cache.lookup_table.remove(&entry.goal);
debug_assert_eq!(Some(i), actual_index);
debug_assert!(entry.depth == depth);
cache::try_move_finished_goal_to_global_cache(
tcx,
&mut self.overflow_data,
&self.stack,
entry.goal,
entry.response,
);
}
}
}
pub(super) fn with_new_goal( pub(super) fn with_new_goal(
&mut self, &mut self,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
canonical_goal: CanonicalGoal<'tcx>, canonical_goal: CanonicalGoal<'tcx>,
mut loop_body: impl FnMut(&mut Self) -> QueryResult<'tcx>, mut loop_body: impl FnMut(&mut Self) -> QueryResult<'tcx>,
) -> QueryResult<'tcx> { ) -> QueryResult<'tcx> {
if let Some(result) = tcx.new_solver_evaluation_cache.get(&canonical_goal, tcx) {
return result;
}
match self.try_push_stack(tcx, canonical_goal) { match self.try_push_stack(tcx, canonical_goal) {
Ok(()) => {} Ok(()) => {}
// Our goal is already on the stack, eager return. // Our goal is already on the stack, eager return.
Err(response) => return response, Err(response) => return response,
} }
self.repeat_while_none( // This is for global caching, so we properly track query dependencies.
|this| { // Everything that affects the `Result` should be performed within this
let result = this.deal_with_overflow(tcx, canonical_goal); // `with_anon_task` closure.
let stack_elem = this.stack.pop().unwrap(); let (result, dep_node) = tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
this.try_move_finished_goal_to_global_cache(tcx, stack_elem); self.repeat_while_none(
result |this| {
}, let result = this.deal_with_overflow(tcx, canonical_goal);
|this| { let _ = this.stack.pop().unwrap();
let result = loop_body(this); result
if this.try_finalize_goal(tcx, canonical_goal, result) { },
Some(result) |this| {
} else { let result = loop_body(this);
None this.try_finalize_goal(canonical_goal, result).then(|| result)
} },
}, )
) });
let cache = &mut self.provisional_cache;
let provisional_entry_index = *cache.lookup_table.get(&canonical_goal).unwrap();
let provisional_entry = &mut cache.entries[provisional_entry_index];
let depth = provisional_entry.depth;
// If not, we're done with this goal.
//
// Check whether that this goal doesn't depend on a goal deeper on the stack
// and if so, move it to the global cache.
//
// Note that if any nested goal were to depend on something deeper on the stack,
// this would have also updated the depth of the current goal.
if depth == self.stack.next_index() {
// If the current goal is the head of a cycle, we drop all other
// cycle participants without moving them to the global cache.
let other_cycle_participants = provisional_entry_index.index() + 1;
for (i, entry) in cache.entries.drain_enumerated(other_cycle_participants..) {
let actual_index = cache.lookup_table.remove(&entry.goal);
debug_assert_eq!(Some(i), actual_index);
debug_assert!(entry.depth == depth);
}
let current_goal = cache.entries.pop().unwrap();
let actual_index = cache.lookup_table.remove(&current_goal.goal);
debug_assert_eq!(Some(provisional_entry_index), actual_index);
debug_assert!(current_goal.depth == depth);
// We move the root goal to the global cache if we either did not hit an overflow or if it's
// the root goal as that will now always hit the same overflow limit.
//
// NOTE: We cannot move any non-root goals to the global cache. When replaying the root goal's
// dependencies, our non-root goal may no longer appear as child of the root goal.
//
// See https://github.com/rust-lang/rust/pull/108071 for some additional context.
let should_cache_globally = !self.overflow_data.did_overflow() || self.stack.is_empty();
if should_cache_globally {
tcx.new_solver_evaluation_cache.insert(
current_goal.goal,
dep_node,
current_goal.response,
);
}
}
result
} }
} }