2020-02-08 06:38:00 +00:00
|
|
|
use crate::dep_graph::DepNodeIndex;
|
|
|
|
|
2020-06-02 17:19:49 +00:00
|
|
|
use rustc_arena::TypedArena;
|
2020-02-08 06:38:00 +00:00
|
|
|
use rustc_data_structures::fx::FxHashMap;
|
2022-02-20 17:57:52 +00:00
|
|
|
use rustc_data_structures::sharded;
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
use rustc_data_structures::sharded::Sharded;
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
use rustc_data_structures::sync::Lock;
|
2020-03-27 17:46:25 +00:00
|
|
|
use rustc_data_structures::sync::WorkerLocal;
|
2022-10-30 17:20:08 +00:00
|
|
|
use rustc_index::vec::{Idx, IndexVec};
|
2020-10-12 14:29:41 +00:00
|
|
|
use std::fmt::Debug;
|
2020-02-08 06:38:00 +00:00
|
|
|
use std::hash::Hash;
|
2022-10-31 23:16:24 +00:00
|
|
|
use std::marker::PhantomData;
|
|
|
|
|
|
|
|
pub trait CacheSelector<'tcx, V> {
|
|
|
|
type Cache
|
|
|
|
where
|
|
|
|
V: Clone;
|
|
|
|
type ArenaCache;
|
|
|
|
}
|
2020-02-08 06:38:00 +00:00
|
|
|
|
2021-05-01 22:11:51 +00:00
|
|
|
pub trait QueryStorage {
|
2021-01-03 14:19:16 +00:00
|
|
|
type Value: Debug;
|
2020-03-27 17:41:13 +00:00
|
|
|
type Stored: Clone;
|
|
|
|
|
|
|
|
/// Store a value without putting it in the cache.
|
|
|
|
/// This is meant to be used with cycle errors.
|
|
|
|
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
|
|
|
|
}
|
|
|
|
|
2021-05-01 22:11:51 +00:00
|
|
|
pub trait QueryCache: QueryStorage + Sized {
|
2020-10-12 14:29:41 +00:00
|
|
|
type Key: Hash + Eq + Clone + Debug;
|
2020-02-08 06:38:00 +00:00
|
|
|
|
|
|
|
/// Checks if the query is already computed and in the cache.
|
|
|
|
/// It returns the shard index and a lock guard to the shard,
|
|
|
|
/// which will be used if the query is not in the cache and we need
|
|
|
|
/// to compute it.
|
2022-02-20 03:44:19 +00:00
|
|
|
fn lookup<R, OnHit>(
|
2020-02-08 06:38:00 +00:00
|
|
|
&self,
|
2020-10-23 20:34:32 +00:00
|
|
|
key: &Self::Key,
|
2020-02-08 06:38:00 +00:00
|
|
|
// `on_hit` can be called while holding a lock to the query state shard.
|
|
|
|
on_hit: OnHit,
|
2022-02-20 03:56:56 +00:00
|
|
|
) -> Result<R, ()>
|
2020-02-08 06:38:00 +00:00
|
|
|
where
|
2020-10-23 20:34:32 +00:00
|
|
|
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
|
2020-02-08 06:38:00 +00:00
|
|
|
|
2022-02-20 03:44:19 +00:00
|
|
|
fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
|
2020-02-08 06:38:00 +00:00
|
|
|
|
2022-02-20 03:44:19 +00:00
|
|
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
|
2020-02-08 06:38:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-31 23:16:24 +00:00
|
|
|
pub struct DefaultCacheSelector<K>(PhantomData<K>);
|
|
|
|
|
|
|
|
impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector<K> {
|
|
|
|
type Cache = DefaultCache<K, V>
|
|
|
|
where
|
|
|
|
V: Clone;
|
|
|
|
type ArenaCache = ArenaCache<'tcx, K, V>;
|
|
|
|
}
|
|
|
|
|
2022-02-20 03:44:19 +00:00
|
|
|
pub struct DefaultCache<K, V> {
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
|
2022-02-20 03:44:19 +00:00
|
|
|
}
|
2020-03-07 17:36:24 +00:00
|
|
|
|
|
|
|
impl<K, V> Default for DefaultCache<K, V> {
|
|
|
|
fn default() -> Self {
|
2022-02-20 17:57:52 +00:00
|
|
|
DefaultCache { cache: Default::default() }
|
2020-03-07 17:36:24 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-08 06:38:00 +00:00
|
|
|
|
2021-01-03 14:19:16 +00:00
|
|
|
impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
|
2020-03-27 17:41:13 +00:00
|
|
|
type Value = V;
|
|
|
|
type Stored = V;
|
|
|
|
|
2020-04-28 09:57:38 +00:00
|
|
|
#[inline]
|
2020-03-27 17:41:13 +00:00
|
|
|
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
|
|
|
|
// We have no dedicated storage
|
|
|
|
value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-12 14:29:41 +00:00
|
|
|
impl<K, V> QueryCache for DefaultCache<K, V>
|
|
|
|
where
|
|
|
|
K: Eq + Hash + Clone + Debug,
|
2021-01-03 14:19:16 +00:00
|
|
|
V: Clone + Debug,
|
2020-10-12 14:29:41 +00:00
|
|
|
{
|
2020-03-07 17:36:24 +00:00
|
|
|
type Key = K;
|
2020-02-08 06:38:00 +00:00
|
|
|
|
|
|
|
#[inline(always)]
|
2022-02-20 03:56:56 +00:00
|
|
|
fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
|
2020-02-08 06:38:00 +00:00
|
|
|
where
|
|
|
|
OnHit: FnOnce(&V, DepNodeIndex) -> R,
|
|
|
|
{
|
2022-02-20 03:44:19 +00:00
|
|
|
let key_hash = sharded::make_hash(key);
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let lock = self.cache.get_shard_by_hash(key_hash).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let lock = self.cache.lock();
|
2022-02-20 03:56:56 +00:00
|
|
|
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
|
2020-02-08 06:38:00 +00:00
|
|
|
|
2020-10-23 20:34:32 +00:00
|
|
|
if let Some((_, value)) = result {
|
|
|
|
let hit_result = on_hit(&value.0, value.1);
|
|
|
|
Ok(hit_result)
|
|
|
|
} else {
|
2022-02-20 03:56:56 +00:00
|
|
|
Err(())
|
2020-10-23 20:34:32 +00:00
|
|
|
}
|
2020-02-08 06:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2022-02-20 03:44:19 +00:00
|
|
|
fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let mut lock = self.cache.get_shard_by_value(&key).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let mut lock = self.cache.lock();
|
2022-05-08 12:42:12 +00:00
|
|
|
// We may be overwriting another value. This is all right, since the dep-graph
|
|
|
|
// will check that the fingerprint matches.
|
2022-02-20 17:57:52 +00:00
|
|
|
lock.insert(key, (value.clone(), index));
|
2020-03-27 17:41:13 +00:00
|
|
|
value
|
2020-02-08 06:38:00 +00:00
|
|
|
}
|
|
|
|
|
2022-02-20 03:44:19 +00:00
|
|
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
{
|
|
|
|
let shards = self.cache.lock_shards();
|
|
|
|
for shard in shards.iter() {
|
|
|
|
for (k, v) in shard.iter() {
|
|
|
|
f(k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
{
|
|
|
|
let map = self.cache.lock();
|
|
|
|
for (k, v) in map.iter() {
|
2021-04-29 14:23:17 +00:00
|
|
|
f(k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
2020-02-08 06:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-27 17:46:25 +00:00
|
|
|
|
|
|
|
pub struct ArenaCache<'tcx, K, V> {
|
2020-04-01 12:29:55 +00:00
|
|
|
arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
|
2020-03-27 17:46:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
|
|
|
|
fn default() -> Self {
|
2022-02-20 17:57:52 +00:00
|
|
|
ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
|
2020-03-27 17:46:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-03 14:19:16 +00:00
|
|
|
impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
|
2020-03-27 17:46:25 +00:00
|
|
|
type Value = V;
|
|
|
|
type Stored = &'tcx V;
|
|
|
|
|
2020-04-28 09:57:38 +00:00
|
|
|
#[inline]
|
2020-03-27 17:46:25 +00:00
|
|
|
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
|
|
|
|
let value = self.arena.alloc((value, DepNodeIndex::INVALID));
|
2020-04-01 12:29:55 +00:00
|
|
|
let value = unsafe { &*(&value.0 as *const _) };
|
|
|
|
&value
|
2020-03-27 17:46:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-12 14:29:41 +00:00
|
|
|
impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
|
|
|
|
where
|
|
|
|
K: Eq + Hash + Clone + Debug,
|
2021-01-04 15:55:50 +00:00
|
|
|
V: Debug,
|
2020-10-12 14:29:41 +00:00
|
|
|
{
|
2020-03-27 17:46:25 +00:00
|
|
|
type Key = K;
|
|
|
|
|
|
|
|
#[inline(always)]
|
2022-02-20 03:56:56 +00:00
|
|
|
fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
|
2020-03-27 17:46:25 +00:00
|
|
|
where
|
|
|
|
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
|
|
|
|
{
|
2022-02-20 03:44:19 +00:00
|
|
|
let key_hash = sharded::make_hash(key);
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let lock = self.cache.get_shard_by_hash(key_hash).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let lock = self.cache.lock();
|
2022-02-20 03:56:56 +00:00
|
|
|
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
|
2020-03-27 17:46:25 +00:00
|
|
|
|
|
|
|
if let Some((_, value)) = result {
|
2020-10-23 20:34:32 +00:00
|
|
|
let hit_result = on_hit(&&value.0, value.1);
|
|
|
|
Ok(hit_result)
|
2020-03-27 17:46:25 +00:00
|
|
|
} else {
|
2022-02-20 03:56:56 +00:00
|
|
|
Err(())
|
2020-03-27 17:46:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2022-02-20 03:44:19 +00:00
|
|
|
fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
|
2020-03-27 17:46:25 +00:00
|
|
|
let value = self.arena.alloc((value, index));
|
2020-04-01 12:29:55 +00:00
|
|
|
let value = unsafe { &*(value as *const _) };
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let mut lock = self.cache.get_shard_by_value(&key).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let mut lock = self.cache.lock();
|
2022-05-08 12:42:12 +00:00
|
|
|
// We may be overwriting another value. This is all right, since the dep-graph
|
|
|
|
// will check that the fingerprint matches.
|
2022-02-20 17:57:52 +00:00
|
|
|
lock.insert(key, value);
|
2020-03-27 17:46:25 +00:00
|
|
|
&value.0
|
|
|
|
}
|
|
|
|
|
2022-02-20 03:44:19 +00:00
|
|
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
2022-02-20 17:57:52 +00:00
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
{
|
|
|
|
let shards = self.cache.lock_shards();
|
|
|
|
for shard in shards.iter() {
|
|
|
|
for (k, v) in shard.iter() {
|
|
|
|
f(k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
{
|
|
|
|
let map = self.cache.lock();
|
|
|
|
for (k, v) in map.iter() {
|
2021-04-29 14:23:17 +00:00
|
|
|
f(k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
2020-03-27 17:46:25 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-30 17:20:08 +00:00
|
|
|
|
2022-10-31 23:16:24 +00:00
|
|
|
pub struct VecCacheSelector<K>(PhantomData<K>);
|
|
|
|
|
|
|
|
impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
|
|
|
|
type Cache = VecCache<K, V>
|
|
|
|
where
|
|
|
|
V: Clone;
|
|
|
|
type ArenaCache = VecArenaCache<'tcx, K, V>;
|
|
|
|
}
|
|
|
|
|
2022-10-30 17:20:08 +00:00
|
|
|
pub struct VecCache<K: Idx, V> {
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<K: Idx, V> Default for VecCache<K, V> {
|
|
|
|
fn default() -> Self {
|
|
|
|
VecCache { cache: Default::default() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<K: Eq + Idx, V: Clone + Debug> QueryStorage for VecCache<K, V> {
|
|
|
|
type Value = V;
|
|
|
|
type Stored = V;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
|
|
|
|
// We have no dedicated storage
|
|
|
|
value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<K, V> QueryCache for VecCache<K, V>
|
|
|
|
where
|
|
|
|
K: Eq + Idx + Clone + Debug,
|
|
|
|
V: Clone + Debug,
|
|
|
|
{
|
|
|
|
type Key = K;
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
|
|
|
|
where
|
|
|
|
OnHit: FnOnce(&V, DepNodeIndex) -> R,
|
|
|
|
{
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let lock = self.cache.lock();
|
|
|
|
if let Some(Some(value)) = lock.get(*key) {
|
|
|
|
let hit_result = on_hit(&value.0, value.1);
|
|
|
|
Ok(hit_result)
|
|
|
|
} else {
|
|
|
|
Err(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let mut lock = self.cache.lock();
|
|
|
|
lock.insert(key, (value.clone(), index));
|
|
|
|
value
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
{
|
|
|
|
let shards = self.cache.lock_shards();
|
|
|
|
for shard in shards.iter() {
|
|
|
|
for (k, v) in shard.iter_enumerated() {
|
|
|
|
if let Some(v) = v {
|
|
|
|
f(&k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
{
|
|
|
|
let map = self.cache.lock();
|
|
|
|
for (k, v) in map.iter_enumerated() {
|
|
|
|
if let Some(v) = v {
|
|
|
|
f(&k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct VecArenaCache<'tcx, K: Idx, V> {
|
|
|
|
arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
cache: Sharded<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
cache: Lock<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> {
|
|
|
|
fn default() -> Self {
|
|
|
|
VecArenaCache {
|
|
|
|
arena: WorkerLocal::new(|_| TypedArena::default()),
|
|
|
|
cache: Default::default(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> {
|
|
|
|
type Value = V;
|
|
|
|
type Stored = &'tcx V;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
|
|
|
|
let value = self.arena.alloc((value, DepNodeIndex::INVALID));
|
|
|
|
let value = unsafe { &*(&value.0 as *const _) };
|
|
|
|
&value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V>
|
|
|
|
where
|
|
|
|
K: Eq + Idx + Clone + Debug,
|
|
|
|
V: Debug,
|
|
|
|
{
|
|
|
|
type Key = K;
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
|
|
|
|
where
|
|
|
|
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
|
|
|
|
{
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let lock = self.cache.lock();
|
|
|
|
if let Some(Some(value)) = lock.get(*key) {
|
|
|
|
let hit_result = on_hit(&&value.0, value.1);
|
|
|
|
Ok(hit_result)
|
|
|
|
} else {
|
|
|
|
Err(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
|
|
|
|
let value = self.arena.alloc((value, index));
|
|
|
|
let value = unsafe { &*(value as *const _) };
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
let mut lock = self.cache.lock();
|
|
|
|
lock.insert(key, value);
|
|
|
|
&value.0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
|
|
|
#[cfg(parallel_compiler)]
|
|
|
|
{
|
|
|
|
let shards = self.cache.lock_shards();
|
|
|
|
for shard in shards.iter() {
|
|
|
|
for (k, v) in shard.iter_enumerated() {
|
|
|
|
if let Some(v) = v {
|
|
|
|
f(&k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[cfg(not(parallel_compiler))]
|
|
|
|
{
|
|
|
|
let map = self.cache.lock();
|
|
|
|
for (k, v) in map.iter_enumerated() {
|
|
|
|
if let Some(v) = v {
|
|
|
|
f(&k, &v.0, v.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|