mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Auto merge of #93741 - Mark-Simulacrum:global-job-id, r=cjgillot
Refactor query system to maintain a global job id counter This replaces the per-shard counters with a single global counter, simplifying the JobId struct down to just a u64 and removing the need to pipe a DepKind generic through a bunch of code. The performance implications on non-parallel compilers are likely minimal (this switches to `Cell<u64>` as the backing storage over a `u64`, but the latter was already inside a `RefCell` so it's not really a significance divergence). On parallel compilers, the cost of a single global u64 counter may be more significant: it adds a serialization point in theory. On the other hand, we can imagine changing the counter to have a thread-local component if it becomes worrisome or some similar structure. The new design is sufficiently simpler that it warrants the potential for slight changes down the line if/when we get parallel compilation to be more of a default. A u64 counter, instead of u32 (the old per-shard width), is chosen to avoid possibly overflowing it and causing problems; it is effectively impossible that we would overflow a u64 counter in this context.
This commit is contained in:
commit
e7aca89598
@ -1673,7 +1673,7 @@ CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } }
|
|||||||
pub mod tls {
|
pub mod tls {
|
||||||
use super::{ptr_eq, GlobalCtxt, TyCtxt};
|
use super::{ptr_eq, GlobalCtxt, TyCtxt};
|
||||||
|
|
||||||
use crate::dep_graph::{DepKind, TaskDepsRef};
|
use crate::dep_graph::TaskDepsRef;
|
||||||
use crate::ty::query;
|
use crate::ty::query;
|
||||||
use rustc_data_structures::sync::{self, Lock};
|
use rustc_data_structures::sync::{self, Lock};
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
use rustc_data_structures::thin_vec::ThinVec;
|
||||||
@ -1698,7 +1698,7 @@ pub mod tls {
|
|||||||
|
|
||||||
/// The current query job, if any. This is updated by `JobOwner::start` in
|
/// The current query job, if any. This is updated by `JobOwner::start` in
|
||||||
/// `ty::query::plumbing` when executing a query.
|
/// `ty::query::plumbing` when executing a query.
|
||||||
pub query: Option<query::QueryJobId<DepKind>>,
|
pub query: Option<query::QueryJobId>,
|
||||||
|
|
||||||
/// Where to store diagnostics for the current query job, if any.
|
/// Where to store diagnostics for the current query job, if any.
|
||||||
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
|
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
|
||||||
|
@ -15,6 +15,7 @@ extern crate rustc_macros;
|
|||||||
extern crate rustc_middle;
|
extern crate rustc_middle;
|
||||||
|
|
||||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||||
|
use rustc_data_structures::sync::AtomicU64;
|
||||||
use rustc_middle::arena::Arena;
|
use rustc_middle::arena::Arena;
|
||||||
use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex};
|
use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex};
|
||||||
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
|
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
//! manage the caches, and so forth.
|
//! manage the caches, and so forth.
|
||||||
|
|
||||||
use crate::{on_disk_cache, Queries};
|
use crate::{on_disk_cache, Queries};
|
||||||
use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
|
use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
||||||
use rustc_middle::ty::tls::{self, ImplicitCtxt};
|
use rustc_middle::ty::tls::{self, ImplicitCtxt};
|
||||||
use rustc_middle::ty::TyCtxt;
|
use rustc_middle::ty::TyCtxt;
|
||||||
use rustc_query_system::dep_graph::HasDepContext;
|
use rustc_query_system::dep_graph::HasDepContext;
|
||||||
@ -15,6 +15,7 @@ use rustc_errors::{Diagnostic, Handler};
|
|||||||
use rustc_serialize::opaque;
|
use rustc_serialize::opaque;
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct QueryCtxt<'tcx> {
|
pub struct QueryCtxt<'tcx> {
|
||||||
@ -42,11 +43,20 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl QueryContext for QueryCtxt<'_> {
|
impl QueryContext for QueryCtxt<'_> {
|
||||||
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
|
fn next_job_id(&self) -> QueryJobId {
|
||||||
|
QueryJobId(
|
||||||
|
NonZeroU64::new(
|
||||||
|
self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_query_job(&self) -> Option<QueryJobId> {
|
||||||
tls::with_related_context(**self, |icx| icx.query)
|
tls::with_related_context(**self, |icx| icx.query)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>> {
|
fn try_collect_active_jobs(&self) -> Option<QueryMap> {
|
||||||
self.queries.try_collect_active_jobs(**self)
|
self.queries.try_collect_active_jobs(**self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,7 +91,7 @@ impl QueryContext for QueryCtxt<'_> {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn start_query<R>(
|
fn start_query<R>(
|
||||||
&self,
|
&self,
|
||||||
token: QueryJobId<Self::DepKind>,
|
token: QueryJobId,
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
compute: impl FnOnce() -> R,
|
compute: impl FnOnce() -> R,
|
||||||
) -> R {
|
) -> R {
|
||||||
@ -152,7 +162,7 @@ impl<'tcx> QueryCtxt<'tcx> {
|
|||||||
|
|
||||||
pub fn try_print_query_stack(
|
pub fn try_print_query_stack(
|
||||||
self,
|
self,
|
||||||
query: Option<QueryJobId<DepKind>>,
|
query: Option<QueryJobId>,
|
||||||
handler: &Handler,
|
handler: &Handler,
|
||||||
num_frames: Option<usize>,
|
num_frames: Option<usize>,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
@ -320,7 +330,7 @@ macro_rules! define_queries {
|
|||||||
type Cache = query_storage::$name<$tcx>;
|
type Cache = query_storage::$name<$tcx>;
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Self::Key>
|
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<Self::Key>
|
||||||
where QueryCtxt<$tcx>: 'a
|
where QueryCtxt<$tcx>: 'a
|
||||||
{
|
{
|
||||||
&tcx.queries.$name
|
&tcx.queries.$name
|
||||||
@ -471,10 +481,9 @@ macro_rules! define_queries_struct {
|
|||||||
|
|
||||||
pub on_disk_cache: Option<OnDiskCache<$tcx>>,
|
pub on_disk_cache: Option<OnDiskCache<$tcx>>,
|
||||||
|
|
||||||
$($(#[$attr])* $name: QueryState<
|
jobs: AtomicU64,
|
||||||
crate::dep_graph::DepKind,
|
|
||||||
query_keys::$name<$tcx>,
|
$($(#[$attr])* $name: QueryState<query_keys::$name<$tcx>>,)*
|
||||||
>,)*
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<$tcx> Queries<$tcx> {
|
impl<$tcx> Queries<$tcx> {
|
||||||
@ -487,6 +496,7 @@ macro_rules! define_queries_struct {
|
|||||||
local_providers: Box::new(local_providers),
|
local_providers: Box::new(local_providers),
|
||||||
extern_providers: Box::new(extern_providers),
|
extern_providers: Box::new(extern_providers),
|
||||||
on_disk_cache,
|
on_disk_cache,
|
||||||
|
jobs: AtomicU64::new(1),
|
||||||
$($name: Default::default()),*
|
$($name: Default::default()),*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -494,14 +504,13 @@ macro_rules! define_queries_struct {
|
|||||||
pub(crate) fn try_collect_active_jobs(
|
pub(crate) fn try_collect_active_jobs(
|
||||||
&$tcx self,
|
&$tcx self,
|
||||||
tcx: TyCtxt<$tcx>,
|
tcx: TyCtxt<$tcx>,
|
||||||
) -> Option<QueryMap<crate::dep_graph::DepKind>> {
|
) -> Option<QueryMap> {
|
||||||
let tcx = QueryCtxt { tcx, queries: self };
|
let tcx = QueryCtxt { tcx, queries: self };
|
||||||
let mut jobs = QueryMap::default();
|
let mut jobs = QueryMap::default();
|
||||||
|
|
||||||
$(
|
$(
|
||||||
self.$name.try_collect_active_jobs(
|
self.$name.try_collect_active_jobs(
|
||||||
tcx,
|
tcx,
|
||||||
dep_graph::DepKind::$name,
|
|
||||||
make_query::$name,
|
make_query::$name,
|
||||||
&mut jobs,
|
&mut jobs,
|
||||||
)?;
|
)?;
|
||||||
|
@ -59,7 +59,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
|
|||||||
fn describe(tcx: CTX, key: Self::Key) -> String;
|
fn describe(tcx: CTX, key: Self::Key) -> String;
|
||||||
|
|
||||||
// Don't use this method to access query results, instead use the methods on TyCtxt
|
// Don't use this method to access query results, instead use the methods on TyCtxt
|
||||||
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, Self::Key>
|
fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
|
||||||
where
|
where
|
||||||
CTX: 'a;
|
CTX: 'a;
|
||||||
|
|
||||||
|
@ -7,13 +7,11 @@ use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Leve
|
|||||||
use rustc_session::Session;
|
use rustc_session::Session;
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
use {
|
use {
|
||||||
crate::dep_graph::DepKind,
|
|
||||||
parking_lot::{Condvar, Mutex},
|
parking_lot::{Condvar, Mutex},
|
||||||
rustc_data_structures::fx::FxHashSet,
|
rustc_data_structures::fx::FxHashSet,
|
||||||
rustc_data_structures::sync::Lock,
|
rustc_data_structures::sync::Lock,
|
||||||
@ -33,80 +31,57 @@ pub struct QueryInfo {
|
|||||||
pub query: QueryStackFrame,
|
pub query: QueryStackFrame,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
|
pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
|
||||||
|
|
||||||
/// A value uniquely identifying an active query job within a shard in the query cache.
|
|
||||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
|
||||||
pub struct QueryShardJobId(pub NonZeroU32);
|
|
||||||
|
|
||||||
/// A value uniquely identifying an active query job.
|
/// A value uniquely identifying an active query job.
|
||||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||||
pub struct QueryJobId<D> {
|
pub struct QueryJobId(pub NonZeroU64);
|
||||||
/// Which job within a shard is this
|
|
||||||
pub job: QueryShardJobId,
|
|
||||||
|
|
||||||
/// In which shard is this job
|
impl QueryJobId {
|
||||||
pub shard: u16,
|
fn query(self, map: &QueryMap) -> QueryStackFrame {
|
||||||
|
|
||||||
/// What kind of query this job is.
|
|
||||||
pub kind: D,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D> QueryJobId<D>
|
|
||||||
where
|
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
{
|
|
||||||
pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
|
|
||||||
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
|
|
||||||
map.get(&self).unwrap().query.clone()
|
map.get(&self).unwrap().query.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn span(self, map: &QueryMap<D>) -> Span {
|
fn span(self, map: &QueryMap) -> Span {
|
||||||
map.get(&self).unwrap().job.span
|
map.get(&self).unwrap().job.span
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
|
fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
|
||||||
map.get(&self).unwrap().job.parent
|
map.get(&self).unwrap().job.parent
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
|
fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
|
||||||
map.get(&self).unwrap().job.latch.as_ref()
|
map.get(&self).unwrap().job.latch.as_ref()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct QueryJobInfo<D> {
|
pub struct QueryJobInfo {
|
||||||
pub query: QueryStackFrame,
|
pub query: QueryStackFrame,
|
||||||
pub job: QueryJob<D>,
|
pub job: QueryJob,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents an active query job.
|
/// Represents an active query job.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct QueryJob<D> {
|
pub struct QueryJob {
|
||||||
pub id: QueryShardJobId,
|
pub id: QueryJobId,
|
||||||
|
|
||||||
/// The span corresponding to the reason for which this query was required.
|
/// The span corresponding to the reason for which this query was required.
|
||||||
pub span: Span,
|
pub span: Span,
|
||||||
|
|
||||||
/// The parent query job which created this job and is implicitly waiting on it.
|
/// The parent query job which created this job and is implicitly waiting on it.
|
||||||
pub parent: Option<QueryJobId<D>>,
|
pub parent: Option<QueryJobId>,
|
||||||
|
|
||||||
/// The latch that is used to wait on this job.
|
/// The latch that is used to wait on this job.
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
latch: Option<QueryLatch<D>>,
|
latch: Option<QueryLatch>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D> QueryJob<D>
|
impl QueryJob {
|
||||||
where
|
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
{
|
|
||||||
/// Creates a new query job.
|
/// Creates a new query job.
|
||||||
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
|
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
|
||||||
QueryJob {
|
QueryJob {
|
||||||
id,
|
id,
|
||||||
span,
|
span,
|
||||||
@ -117,7 +92,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
pub(super) fn latch(&mut self) -> QueryLatch<D> {
|
pub(super) fn latch(&mut self) -> QueryLatch {
|
||||||
if self.latch.is_none() {
|
if self.latch.is_none() {
|
||||||
self.latch = Some(QueryLatch::new());
|
self.latch = Some(QueryLatch::new());
|
||||||
}
|
}
|
||||||
@ -139,16 +114,13 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(parallel_compiler))]
|
#[cfg(not(parallel_compiler))]
|
||||||
impl<D> QueryJobId<D>
|
impl QueryJobId {
|
||||||
where
|
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
{
|
|
||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
pub(super) fn find_cycle_in_stack(
|
pub(super) fn find_cycle_in_stack(
|
||||||
&self,
|
&self,
|
||||||
query_map: QueryMap<D>,
|
query_map: QueryMap,
|
||||||
current_job: &Option<QueryJobId<D>>,
|
current_job: &Option<QueryJobId>,
|
||||||
span: Span,
|
span: Span,
|
||||||
) -> CycleError {
|
) -> CycleError {
|
||||||
// Find the waitee amongst `current_job` parents
|
// Find the waitee amongst `current_job` parents
|
||||||
@ -184,15 +156,15 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
struct QueryWaiter<D> {
|
struct QueryWaiter {
|
||||||
query: Option<QueryJobId<D>>,
|
query: Option<QueryJobId>,
|
||||||
condvar: Condvar,
|
condvar: Condvar,
|
||||||
span: Span,
|
span: Span,
|
||||||
cycle: Lock<Option<CycleError>>,
|
cycle: Lock<Option<CycleError>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
impl<D> QueryWaiter<D> {
|
impl QueryWaiter {
|
||||||
fn notify(&self, registry: &rayon_core::Registry) {
|
fn notify(&self, registry: &rayon_core::Registry) {
|
||||||
rayon_core::mark_unblocked(registry);
|
rayon_core::mark_unblocked(registry);
|
||||||
self.condvar.notify_one();
|
self.condvar.notify_one();
|
||||||
@ -200,34 +172,27 @@ impl<D> QueryWaiter<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
struct QueryLatchInfo<D> {
|
struct QueryLatchInfo {
|
||||||
complete: bool,
|
complete: bool,
|
||||||
waiters: Vec<Lrc<QueryWaiter<D>>>,
|
waiters: Vec<Lrc<QueryWaiter>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(super) struct QueryLatch<D> {
|
pub(super) struct QueryLatch {
|
||||||
info: Lrc<Mutex<QueryLatchInfo<D>>>,
|
info: Lrc<Mutex<QueryLatchInfo>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
impl<D: Eq + Hash> QueryLatch<D> {
|
impl QueryLatch {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
QueryLatch {
|
QueryLatch {
|
||||||
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
|
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
impl<D> QueryLatch<D> {
|
|
||||||
/// Awaits for the query job to complete.
|
/// Awaits for the query job to complete.
|
||||||
pub(super) fn wait_on(
|
pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
|
||||||
&self,
|
|
||||||
query: Option<QueryJobId<D>>,
|
|
||||||
span: Span,
|
|
||||||
) -> Result<(), CycleError> {
|
|
||||||
let waiter =
|
let waiter =
|
||||||
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
|
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
|
||||||
self.wait_on_inner(&waiter);
|
self.wait_on_inner(&waiter);
|
||||||
@ -242,7 +207,7 @@ impl<D> QueryLatch<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Awaits the caller on this latch by blocking the current thread.
|
/// Awaits the caller on this latch by blocking the current thread.
|
||||||
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
|
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
|
||||||
let mut info = self.info.lock();
|
let mut info = self.info.lock();
|
||||||
if !info.complete {
|
if !info.complete {
|
||||||
// We push the waiter on to the `waiters` list. It can be accessed inside
|
// We push the waiter on to the `waiters` list. It can be accessed inside
|
||||||
@ -276,7 +241,7 @@ impl<D> QueryLatch<D> {
|
|||||||
|
|
||||||
/// Removes a single waiter from the list of waiters.
|
/// Removes a single waiter from the list of waiters.
|
||||||
/// This is used to break query cycles.
|
/// This is used to break query cycles.
|
||||||
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
|
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
|
||||||
let mut info = self.info.lock();
|
let mut info = self.info.lock();
|
||||||
debug_assert!(!info.complete);
|
debug_assert!(!info.complete);
|
||||||
// Remove the waiter from the list of waiters
|
// Remove the waiter from the list of waiters
|
||||||
@ -286,7 +251,7 @@ impl<D> QueryLatch<D> {
|
|||||||
|
|
||||||
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
|
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
type Waiter<D> = (QueryJobId<D>, usize);
|
type Waiter = (QueryJobId, usize);
|
||||||
|
|
||||||
/// Visits all the non-resumable and resumable waiters of a query.
|
/// Visits all the non-resumable and resumable waiters of a query.
|
||||||
/// Only waiters in a query are visited.
|
/// Only waiters in a query are visited.
|
||||||
@ -298,14 +263,9 @@ type Waiter<D> = (QueryJobId<D>, usize);
|
|||||||
/// required information to resume the waiter.
|
/// required information to resume the waiter.
|
||||||
/// If all `visit` calls returns None, this function also returns None.
|
/// If all `visit` calls returns None, this function also returns None.
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn visit_waiters<D, F>(
|
fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
|
||||||
query_map: &QueryMap<D>,
|
|
||||||
query: QueryJobId<D>,
|
|
||||||
mut visit: F,
|
|
||||||
) -> Option<Option<Waiter<D>>>
|
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
|
||||||
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
|
|
||||||
{
|
{
|
||||||
// Visit the parent query which is a non-resumable waiter since it's on the same stack
|
// Visit the parent query which is a non-resumable waiter since it's on the same stack
|
||||||
if let Some(parent) = query.parent(query_map) {
|
if let Some(parent) = query.parent(query_map) {
|
||||||
@ -334,16 +294,13 @@ where
|
|||||||
/// If a cycle is detected, this initial value is replaced with the span causing
|
/// If a cycle is detected, this initial value is replaced with the span causing
|
||||||
/// the cycle.
|
/// the cycle.
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn cycle_check<D>(
|
fn cycle_check(
|
||||||
query_map: &QueryMap<D>,
|
query_map: &QueryMap,
|
||||||
query: QueryJobId<D>,
|
query: QueryJobId,
|
||||||
span: Span,
|
span: Span,
|
||||||
stack: &mut Vec<(Span, QueryJobId<D>)>,
|
stack: &mut Vec<(Span, QueryJobId)>,
|
||||||
visited: &mut FxHashSet<QueryJobId<D>>,
|
visited: &mut FxHashSet<QueryJobId>,
|
||||||
) -> Option<Option<Waiter<D>>>
|
) -> Option<Option<Waiter>> {
|
||||||
where
|
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
{
|
|
||||||
if !visited.insert(query) {
|
if !visited.insert(query) {
|
||||||
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
|
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
|
||||||
// We detected a query cycle, fix up the initial span and return Some
|
// We detected a query cycle, fix up the initial span and return Some
|
||||||
@ -378,14 +335,11 @@ where
|
|||||||
/// from `query` without going through any of the queries in `visited`.
|
/// from `query` without going through any of the queries in `visited`.
|
||||||
/// This is achieved with a depth first search.
|
/// This is achieved with a depth first search.
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn connected_to_root<D>(
|
fn connected_to_root(
|
||||||
query_map: &QueryMap<D>,
|
query_map: &QueryMap,
|
||||||
query: QueryJobId<D>,
|
query: QueryJobId,
|
||||||
visited: &mut FxHashSet<QueryJobId<D>>,
|
visited: &mut FxHashSet<QueryJobId>,
|
||||||
) -> bool
|
) -> bool {
|
||||||
where
|
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
{
|
|
||||||
// We already visited this or we're deliberately ignoring it
|
// We already visited this or we're deliberately ignoring it
|
||||||
if !visited.insert(query) {
|
if !visited.insert(query) {
|
||||||
return false;
|
return false;
|
||||||
@ -404,10 +358,9 @@ where
|
|||||||
|
|
||||||
// Deterministically pick an query from a list
|
// Deterministically pick an query from a list
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
|
fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
F: Fn(&T) -> (Span, QueryJobId),
|
||||||
F: Fn(&T) -> (Span, QueryJobId<D>),
|
|
||||||
{
|
{
|
||||||
// Deterministically pick an entry point
|
// Deterministically pick an entry point
|
||||||
// FIXME: Sort this instead
|
// FIXME: Sort this instead
|
||||||
@ -431,10 +384,10 @@ where
|
|||||||
/// If a cycle was not found, the starting query is removed from `jobs` and
|
/// If a cycle was not found, the starting query is removed from `jobs` and
|
||||||
/// the function returns false.
|
/// the function returns false.
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
fn remove_cycle<D: DepKind>(
|
fn remove_cycle(
|
||||||
query_map: &QueryMap<D>,
|
query_map: &QueryMap,
|
||||||
jobs: &mut Vec<QueryJobId<D>>,
|
jobs: &mut Vec<QueryJobId>,
|
||||||
wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
|
wakelist: &mut Vec<Lrc<QueryWaiter>>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let mut visited = FxHashSet::default();
|
let mut visited = FxHashSet::default();
|
||||||
let mut stack = Vec::new();
|
let mut stack = Vec::new();
|
||||||
@ -489,7 +442,7 @@ fn remove_cycle<D: DepKind>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
|
.collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
|
||||||
|
|
||||||
// Deterministically pick an entry point
|
// Deterministically pick an entry point
|
||||||
let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
|
let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
|
||||||
@ -544,7 +497,7 @@ pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) {
|
|||||||
|
|
||||||
let mut wakelist = Vec::new();
|
let mut wakelist = Vec::new();
|
||||||
let query_map = tcx.try_collect_active_jobs().unwrap();
|
let query_map = tcx.try_collect_active_jobs().unwrap();
|
||||||
let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect();
|
let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
|
||||||
|
|
||||||
let mut found_cycle = false;
|
let mut found_cycle = false;
|
||||||
|
|
||||||
@ -630,7 +583,7 @@ pub(crate) fn report_cycle<'a>(
|
|||||||
|
|
||||||
pub fn print_query_stack<CTX: QueryContext>(
|
pub fn print_query_stack<CTX: QueryContext>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
mut current_query: Option<QueryJobId<CTX::DepKind>>,
|
mut current_query: Option<QueryJobId>,
|
||||||
handler: &Handler,
|
handler: &Handler,
|
||||||
num_frames: Option<usize>,
|
num_frames: Option<usize>,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
|
@ -117,10 +117,12 @@ impl QuerySideEffects {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait QueryContext: HasDepContext {
|
pub trait QueryContext: HasDepContext {
|
||||||
/// Get the query information from the TLS context.
|
fn next_job_id(&self) -> QueryJobId;
|
||||||
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
|
|
||||||
|
|
||||||
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
|
/// Get the query information from the TLS context.
|
||||||
|
fn current_query_job(&self) -> Option<QueryJobId>;
|
||||||
|
|
||||||
|
fn try_collect_active_jobs(&self) -> Option<QueryMap>;
|
||||||
|
|
||||||
/// Load side effects associated to the node in the previous session.
|
/// Load side effects associated to the node in the previous session.
|
||||||
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
|
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
|
||||||
@ -140,7 +142,7 @@ pub trait QueryContext: HasDepContext {
|
|||||||
/// captured during execution and the actual result.
|
/// captured during execution and the actual result.
|
||||||
fn start_query<R>(
|
fn start_query<R>(
|
||||||
&self,
|
&self,
|
||||||
token: QueryJobId<Self::DepKind>,
|
token: QueryJobId,
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
compute: impl FnOnce() -> R,
|
compute: impl FnOnce() -> R,
|
||||||
) -> R;
|
) -> R;
|
||||||
|
@ -5,9 +5,7 @@
|
|||||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
|
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
|
||||||
use crate::query::caches::QueryCache;
|
use crate::query::caches::QueryCache;
|
||||||
use crate::query::config::{QueryDescription, QueryVtable};
|
use crate::query::config::{QueryDescription, QueryVtable};
|
||||||
use crate::query::job::{
|
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
|
||||||
report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
|
|
||||||
};
|
|
||||||
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
|
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
|
||||||
use rustc_data_structures::fingerprint::Fingerprint;
|
use rustc_data_structures::fingerprint::Fingerprint;
|
||||||
use rustc_data_structures::fx::{FxHashMap, FxHasher};
|
use rustc_data_structures::fx::{FxHashMap, FxHasher};
|
||||||
@ -24,7 +22,6 @@ use std::collections::hash_map::Entry;
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::num::NonZeroU32;
|
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
pub struct QueryCacheStore<C: QueryCache> {
|
pub struct QueryCacheStore<C: QueryCache> {
|
||||||
@ -69,36 +66,32 @@ impl<C: QueryCache> QueryCacheStore<C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct QueryStateShard<D, K> {
|
struct QueryStateShard<K> {
|
||||||
active: FxHashMap<K, QueryResult<D>>,
|
active: FxHashMap<K, QueryResult>,
|
||||||
|
|
||||||
/// Used to generate unique ids for active jobs.
|
|
||||||
jobs: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, K> Default for QueryStateShard<D, K> {
|
impl<K> Default for QueryStateShard<K> {
|
||||||
fn default() -> QueryStateShard<D, K> {
|
fn default() -> QueryStateShard<K> {
|
||||||
QueryStateShard { active: Default::default(), jobs: 0 }
|
QueryStateShard { active: Default::default() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct QueryState<D, K> {
|
pub struct QueryState<K> {
|
||||||
shards: Sharded<QueryStateShard<D, K>>,
|
shards: Sharded<QueryStateShard<K>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates the state of a query for a given key in a query map.
|
/// Indicates the state of a query for a given key in a query map.
|
||||||
enum QueryResult<D> {
|
enum QueryResult {
|
||||||
/// An already executing query. The query job can be used to await for its completion.
|
/// An already executing query. The query job can be used to await for its completion.
|
||||||
Started(QueryJob<D>),
|
Started(QueryJob),
|
||||||
|
|
||||||
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
|
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
|
||||||
/// silently panic.
|
/// silently panic.
|
||||||
Poisoned,
|
Poisoned,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, K> QueryState<D, K>
|
impl<K> QueryState<K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
K: Eq + Hash + Clone + Debug,
|
K: Eq + Hash + Clone + Debug,
|
||||||
{
|
{
|
||||||
pub fn all_inactive(&self) -> bool {
|
pub fn all_inactive(&self) -> bool {
|
||||||
@ -109,19 +102,17 @@ where
|
|||||||
pub fn try_collect_active_jobs<CTX: Copy>(
|
pub fn try_collect_active_jobs<CTX: Copy>(
|
||||||
&self,
|
&self,
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
kind: D,
|
|
||||||
make_query: fn(CTX, K) -> QueryStackFrame,
|
make_query: fn(CTX, K) -> QueryStackFrame,
|
||||||
jobs: &mut QueryMap<D>,
|
jobs: &mut QueryMap,
|
||||||
) -> Option<()> {
|
) -> Option<()> {
|
||||||
// We use try_lock_shards here since we are called from the
|
// We use try_lock_shards here since we are called from the
|
||||||
// deadlock handler, and this shouldn't be locked.
|
// deadlock handler, and this shouldn't be locked.
|
||||||
let shards = self.shards.try_lock_shards()?;
|
let shards = self.shards.try_lock_shards()?;
|
||||||
for (shard_id, shard) in shards.iter().enumerate() {
|
for shard in shards.iter() {
|
||||||
for (k, v) in shard.active.iter() {
|
for (k, v) in shard.active.iter() {
|
||||||
if let QueryResult::Started(ref job) = *v {
|
if let QueryResult::Started(ref job) = *v {
|
||||||
let id = QueryJobId::new(job.id, shard_id, kind);
|
|
||||||
let query = make_query(tcx, k.clone());
|
let query = make_query(tcx, k.clone());
|
||||||
jobs.insert(id, QueryJobInfo { query, job: job.clone() });
|
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -130,22 +121,21 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D, K> Default for QueryState<D, K> {
|
impl<K> Default for QueryState<K> {
|
||||||
fn default() -> QueryState<D, K> {
|
fn default() -> QueryState<K> {
|
||||||
QueryState { shards: Default::default() }
|
QueryState { shards: Default::default() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A type representing the responsibility to execute the job in the `job` field.
|
/// A type representing the responsibility to execute the job in the `job` field.
|
||||||
/// This will poison the relevant query if dropped.
|
/// This will poison the relevant query if dropped.
|
||||||
struct JobOwner<'tcx, D, K>
|
struct JobOwner<'tcx, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
K: Eq + Hash + Clone,
|
K: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
state: &'tcx QueryState<D, K>,
|
state: &'tcx QueryState<K>,
|
||||||
key: K,
|
key: K,
|
||||||
id: QueryJobId<D>,
|
id: QueryJobId,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cold]
|
#[cold]
|
||||||
@ -166,9 +156,8 @@ where
|
|||||||
cache.store_nocache(value)
|
cache.store_nocache(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx, D, K> JobOwner<'tcx, D, K>
|
impl<'tcx, K> JobOwner<'tcx, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
K: Eq + Hash + Clone,
|
K: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
/// Either gets a `JobOwner` corresponding the query, allowing us to
|
/// Either gets a `JobOwner` corresponding the query, allowing us to
|
||||||
@ -182,12 +171,11 @@ where
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn try_start<'b, CTX>(
|
fn try_start<'b, CTX>(
|
||||||
tcx: &'b CTX,
|
tcx: &'b CTX,
|
||||||
state: &'b QueryState<CTX::DepKind, K>,
|
state: &'b QueryState<K>,
|
||||||
span: Span,
|
span: Span,
|
||||||
key: K,
|
key: K,
|
||||||
lookup: QueryLookup,
|
lookup: QueryLookup,
|
||||||
dep_kind: CTX::DepKind,
|
) -> TryGetJob<'b, K>
|
||||||
) -> TryGetJob<'b, CTX::DepKind, K>
|
|
||||||
where
|
where
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
@ -197,27 +185,21 @@ where
|
|||||||
|
|
||||||
match lock.active.entry(key) {
|
match lock.active.entry(key) {
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
// Generate an id unique within this shard.
|
let id = tcx.next_job_id();
|
||||||
let id = lock.jobs.checked_add(1).unwrap();
|
|
||||||
lock.jobs = id;
|
|
||||||
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
|
|
||||||
|
|
||||||
let job = tcx.current_query_job();
|
let job = tcx.current_query_job();
|
||||||
let job = QueryJob::new(id, span, job);
|
let job = QueryJob::new(id, span, job);
|
||||||
|
|
||||||
let key = entry.key().clone();
|
let key = entry.key().clone();
|
||||||
entry.insert(QueryResult::Started(job));
|
entry.insert(QueryResult::Started(job));
|
||||||
|
|
||||||
let global_id = QueryJobId::new(id, shard, dep_kind);
|
let owner = JobOwner { state, id, key };
|
||||||
let owner = JobOwner { state, id: global_id, key };
|
|
||||||
return TryGetJob::NotYetStarted(owner);
|
return TryGetJob::NotYetStarted(owner);
|
||||||
}
|
}
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
match entry.get_mut() {
|
match entry.get_mut() {
|
||||||
#[cfg(not(parallel_compiler))]
|
#[cfg(not(parallel_compiler))]
|
||||||
QueryResult::Started(job) => {
|
QueryResult::Started(job) => {
|
||||||
let id = QueryJobId::new(job.id, shard, dep_kind);
|
let id = job.id;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
// If we are single-threaded we know that we have cycle error,
|
// If we are single-threaded we know that we have cycle error,
|
||||||
@ -295,9 +277,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
|
impl<'tcx, K> Drop for JobOwner<'tcx, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
K: Eq + Hash + Clone,
|
K: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
@ -329,13 +310,12 @@ pub(crate) struct CycleError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The result of `try_start`.
|
/// The result of `try_start`.
|
||||||
enum TryGetJob<'tcx, D, K>
|
enum TryGetJob<'tcx, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
|
||||||
K: Eq + Hash + Clone,
|
K: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
|
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
|
||||||
NotYetStarted(JobOwner<'tcx, D, K>),
|
NotYetStarted(JobOwner<'tcx, K>),
|
||||||
|
|
||||||
/// The query was already completed.
|
/// The query was already completed.
|
||||||
/// Returns the result of the query and its dep-node index
|
/// Returns the result of the query and its dep-node index
|
||||||
@ -375,7 +355,7 @@ where
|
|||||||
|
|
||||||
fn try_execute_query<CTX, C>(
|
fn try_execute_query<CTX, C>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
state: &QueryState<CTX::DepKind, C::Key>,
|
state: &QueryState<C::Key>,
|
||||||
cache: &QueryCacheStore<C>,
|
cache: &QueryCacheStore<C>,
|
||||||
span: Span,
|
span: Span,
|
||||||
key: C::Key,
|
key: C::Key,
|
||||||
@ -388,14 +368,7 @@ where
|
|||||||
C::Key: Clone + DepNodeParams<CTX::DepContext>,
|
C::Key: Clone + DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
match JobOwner::<'_, CTX::DepKind, C::Key>::try_start(
|
match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone(), lookup) {
|
||||||
&tcx,
|
|
||||||
state,
|
|
||||||
span,
|
|
||||||
key.clone(),
|
|
||||||
lookup,
|
|
||||||
query.dep_kind,
|
|
||||||
) {
|
|
||||||
TryGetJob::NotYetStarted(job) => {
|
TryGetJob::NotYetStarted(job) => {
|
||||||
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
|
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
|
||||||
let result = job.complete(cache, result, dep_node_index);
|
let result = job.complete(cache, result, dep_node_index);
|
||||||
@ -427,7 +400,7 @@ fn execute_job<CTX, K, V>(
|
|||||||
key: K,
|
key: K,
|
||||||
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
|
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
|
||||||
query: &QueryVtable<CTX, K, V>,
|
query: &QueryVtable<CTX, K, V>,
|
||||||
job_id: QueryJobId<CTX::DepKind>,
|
job_id: QueryJobId,
|
||||||
) -> (V, DepNodeIndex)
|
) -> (V, DepNodeIndex)
|
||||||
where
|
where
|
||||||
K: Clone + DepNodeParams<CTX::DepContext>,
|
K: Clone + DepNodeParams<CTX::DepContext>,
|
||||||
|
Loading…
Reference in New Issue
Block a user