Auto merge of #96458 - Aaron1011:no-cycle-caching, r=jackh726,cjgillot

Don't cache results of coinductive cycle
This commit is contained in:
bors 2022-05-06 22:41:16 +00:00
commit 4799baa70d

View File

@ -741,39 +741,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if reached_depth >= stack.depth {
debug!(?result, "CACHE MISS");
self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
stack.cache().on_completion(
stack.dfn,
|fresh_trait_pred, provisional_result, provisional_dep_node| {
// Create a new `DepNode` that has dependencies on:
// * The `DepNode` for the original evaluation that resulted in a provisional cache
// entry being crated
// * The `DepNode` for the *current* evaluation, which resulted in us completing
// provisional caches entries and inserting them into the evaluation cache
//
// This ensures that when a query reads this entry from the evaluation cache,
// it will end up (transitively) depending on all of the incr-comp dependencies
// created during the evaluation of this trait. For example, evaluating a trait
// will usually require us to invoke `type_of(field_def_id)` to determine the
// constituent types, and we want any queries reading from this evaluation
// cache entry to end up with a transitive `type_of(field_def_id`)` dependency.
//
// By using `in_task`, we're also creating an edge from the *current* query
// to the newly-created `combined_dep_node`. This is probably redundant,
// but it's better to add too many dep graph edges than to add too few
// dep graph edges.
let ((), combined_dep_node) = self.in_task(|this| {
this.tcx().dep_graph.read_index(provisional_dep_node);
this.tcx().dep_graph.read_index(dep_node);
});
self.insert_evaluation_cache(
param_env,
fresh_trait_pred,
combined_dep_node,
provisional_result.max(result),
);
},
);
stack.cache().on_completion(stack.dfn);
} else {
debug!(?result, "PROVISIONAL");
debug!(
@ -782,13 +750,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fresh_trait_pred, stack.depth, reached_depth,
);
stack.cache().insert_provisional(
stack.dfn,
reached_depth,
fresh_trait_pred,
result,
dep_node,
);
stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result);
}
Ok(result)
@ -2531,11 +2493,6 @@ struct ProvisionalEvaluation {
from_dfn: usize,
reached_depth: usize,
result: EvaluationResult,
/// The `DepNodeIndex` created for the `evaluate_stack` call for this provisional
/// evaluation. When we create an entry in the evaluation cache using this provisional
/// cache entry (see `on_completion`), we use this `dep_node` to ensure that future reads from
/// the cache will have all of the necessary incr comp dependencies tracked.
dep_node: DepNodeIndex,
}
impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
@ -2578,7 +2535,6 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> {
reached_depth: usize,
fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
result: EvaluationResult,
dep_node: DepNodeIndex,
) {
debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
@ -2604,10 +2560,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> {
}
}
map.insert(
fresh_trait_pred,
ProvisionalEvaluation { from_dfn, reached_depth, result, dep_node },
);
map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result });
}
/// Invoked when the node with dfn `dfn` does not get a successful
@ -2633,8 +2586,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> {
/// Invoked when the node at depth `depth` completed without
/// depending on anything higher in the stack (if that completion
/// was a failure, then `on_failure` should have been invoked
/// already). The callback `op` will be invoked for each
/// provisional entry that we can now confirm.
/// already).
///
/// Note that we may still have provisional cache items remaining
/// in the cache when this is done. For example, if there is a
@ -2655,19 +2607,26 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> {
/// would be 2, representing the C node, and hence we would
/// remove the result for D, which has DFN 3, but not the results for
/// A and B, which have DFNs 0 and 1 respectively).
fn on_completion(
&self,
dfn: usize,
mut op: impl FnMut(ty::PolyTraitPredicate<'tcx>, EvaluationResult, DepNodeIndex),
) {
///
/// Note that we *do not* attempt to cache these cycle participants
/// in the evaluation cache. Doing so would require carefully computing
/// the correct `DepNode` to store in the cache entry:
/// cycle participants may implicitly depend on query results
/// related to other participants in the cycle, due to our logic
/// which examines the evaluation stack.
///
/// We used to try to perform this caching,
/// but it lead to multiple incremental compilation ICEs
/// (see #92987 and #96319), and was very hard to understand.
/// Fortunately, removing the caching didn't seem to
/// have a performance impact in practice.
fn on_completion(&self, dfn: usize) {
debug!(?dfn, "on_completion");
for (fresh_trait_pred, eval) in
self.map.borrow_mut().drain_filter(|_k, eval| eval.from_dfn >= dfn)
{
debug!(?fresh_trait_pred, ?eval, "on_completion");
op(fresh_trait_pred, eval.result, eval.dep_node);
}
}
}