[core] Enforce a deadlock-free locking order for Mutexes.

If `debug_assertions` or the `"validate-locks"` feature are enabled,
change `wgpu-core` to use a wrapper around `parking_lot::Mutex` that
checks for potential deadlocks.

At the moment, `wgpu-core` does contain deadlocks, so the ranking in
the `lock::rank` module is incomplete, in the interests of keeping it
acyclic. #5572 tracks the work needed to complete the ranking.
This commit is contained in:
Jim Blandy 2024-04-14 16:04:01 -07:00
parent 7840e75bf7
commit b3c5a6fb84
19 changed files with 613 additions and 82 deletions

View File

@ -2,7 +2,7 @@ use crate::hal_api::HalApi;
use crate::resource_log;
use hal::Device as _;
use parking_lot::Mutex;
use crate::lock::{rank, Mutex};
/// A pool of free [`wgpu_hal::CommandEncoder`]s, owned by a `Device`.
///
@ -21,7 +21,7 @@ pub(crate) struct CommandAllocator<A: HalApi> {
impl<A: HalApi> CommandAllocator<A> {
pub(crate) fn new() -> Self {
Self {
free_encoders: Mutex::new(Vec::new()),
free_encoders: Mutex::new(rank::COMMAND_ALLOCATOR_FREE_ENCODERS, Vec::new()),
}
}

View File

@ -23,6 +23,7 @@ use crate::device::{Device, DeviceError};
use crate::error::{ErrorFormatter, PrettyError};
use crate::hub::Hub;
use crate::id::CommandBufferId;
use crate::lock::{rank, Mutex};
use crate::snatch::SnatchGuard;
use crate::init_tracker::BufferInitTrackerAction;
@ -31,7 +32,6 @@ use crate::track::{Tracker, UsageScope};
use crate::{api_log, global::Global, hal_api::HalApi, id, resource_log, Label};
use hal::CommandEncoder as _;
use parking_lot::Mutex;
use thiserror::Error;
#[cfg(feature = "trace")]
@ -338,25 +338,28 @@ impl<A: HalApi> CommandBuffer<A> {
.as_str(),
None,
),
data: Mutex::new(Some(CommandBufferMutable {
encoder: CommandEncoder {
raw: encoder,
is_open: false,
list: Vec::new(),
label,
},
status: CommandEncoderStatus::Recording,
trackers: Tracker::new(),
buffer_memory_init_actions: Default::default(),
texture_memory_actions: Default::default(),
pending_query_resets: QueryResetMap::new(),
#[cfg(feature = "trace")]
commands: if enable_tracing {
Some(Vec::new())
} else {
None
},
})),
data: Mutex::new(
rank::COMMAND_BUFFER_DATA,
Some(CommandBufferMutable {
encoder: CommandEncoder {
raw: encoder,
is_open: false,
list: Vec::new(),
label,
},
status: CommandEncoderStatus::Recording,
trackers: Tracker::new(),
buffer_memory_init_actions: Default::default(),
texture_memory_actions: Default::default(),
pending_query_resets: QueryResetMap::new(),
#[cfg(feature = "trace")]
commands: if enable_tracing {
Some(Vec::new())
} else {
None
},
}),
),
}
}

View File

@ -7,6 +7,7 @@ use crate::{
},
hal_api::HalApi,
id,
lock::Mutex,
pipeline::{ComputePipeline, RenderPipeline},
resource::{
self, Buffer, DestroyedBuffer, DestroyedTexture, QuerySet, Resource, Sampler,
@ -18,7 +19,6 @@ use crate::{
};
use smallvec::SmallVec;
use parking_lot::Mutex;
use std::sync::Arc;
use thiserror::Error;

View File

@ -14,6 +14,7 @@ use crate::{
hal_label,
id::{self, DeviceId, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
lock::{rank, Mutex},
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
ResourceInfo, ResourceType, StagingBuffer, Texture, TextureInner,
@ -22,7 +23,6 @@ use crate::{
};
use hal::{CommandEncoder as _, Device as _, Queue as _};
use parking_lot::Mutex;
use smallvec::SmallVec;
use std::{
@ -317,7 +317,7 @@ fn prepare_staging_buffer<A: HalApi>(
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size) }?;
let staging_buffer = StagingBuffer {
raw: Mutex::new(Some(buffer)),
raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
device: device.clone(),
size,
info: ResourceInfo::new(

View File

@ -18,6 +18,7 @@ use crate::{
TextureInitTracker, TextureInitTrackerAction,
},
instance::Adapter,
lock::{rank, Mutex, MutexGuard},
pipeline,
pool::ResourcePool,
registry::Registry,
@ -41,7 +42,7 @@ use crate::{
use arrayvec::ArrayVec;
use hal::{CommandEncoder as _, Device as _};
use once_cell::sync::OnceCell;
use parking_lot::{Mutex, MutexGuard, RwLock};
use parking_lot::RwLock;
use smallvec::SmallVec;
use thiserror::Error;
@ -274,33 +275,39 @@ impl<A: HalApi> Device<A> {
fence: RwLock::new(Some(fence)),
snatchable_lock: unsafe { SnatchLock::new() },
valid: AtomicBool::new(true),
trackers: Mutex::new(Tracker::new()),
trackers: Mutex::new(rank::DEVICE_TRACKERS, Tracker::new()),
tracker_indices: TrackerIndexAllocators::new(),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: Mutex::new(Some(life::ResourceMaps::new())),
life_tracker: Mutex::new(rank::DEVICE_LIFE_TRACKER, life::LifetimeTracker::new()),
temp_suspected: Mutex::new(
rank::DEVICE_TEMP_SUSPECTED,
Some(life::ResourceMaps::new()),
),
bgl_pool: ResourcePool::new(),
#[cfg(feature = "trace")]
trace: Mutex::new(trace_path.and_then(|path| match trace::Trace::new(path) {
Ok(mut trace) => {
trace.add(trace::Action::Init {
desc: desc.clone(),
backend: A::VARIANT,
});
Some(trace)
}
Err(e) => {
log::error!("Unable to start a trace in '{path:?}': {e}");
None
}
})),
trace: Mutex::new(
rank::DEVICE_TRACE,
trace_path.and_then(|path| match trace::Trace::new(path) {
Ok(mut trace) => {
trace.add(trace::Action::Init {
desc: desc.clone(),
backend: A::VARIANT,
});
Some(trace)
}
Err(e) => {
log::error!("Unable to start a trace in '{path:?}': {e}");
None
}
}),
),
alignments,
limits: desc.required_limits.clone(),
features: desc.required_features,
downlevel,
instance_flags,
pending_writes: Mutex::new(Some(pending_writes)),
deferred_destroy: Mutex::new(Vec::new()),
usage_scopes: Default::default(),
pending_writes: Mutex::new(rank::DEVICE_PENDING_WRITES, Some(pending_writes)),
deferred_destroy: Mutex::new(rank::DEVICE_DEFERRED_DESTROY, Vec::new()),
usage_scopes: Mutex::new(rank::DEVICE_USAGE_SCOPES, Default::default()),
})
}
@ -650,13 +657,13 @@ impl<A: HalApi> Device<A> {
usage: desc.usage,
size: desc.size,
initialization_status: RwLock::new(BufferInitTracker::new(aligned_size)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
sync_mapped_writes: Mutex::new(rank::BUFFER_SYNC_MAPPED_WRITES, None),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
bind_groups: Mutex::new(Vec::new()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
})
}
@ -689,8 +696,8 @@ impl<A: HalApi> Device<A> {
Some(self.tracker_indices.textures.clone()),
),
clear_mode: RwLock::new(clear_mode),
views: Mutex::new(Vec::new()),
bind_groups: Mutex::new(Vec::new()),
views: Mutex::new(rank::TEXTURE_VIEWS, Vec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, Vec::new()),
}
}
@ -707,13 +714,13 @@ impl<A: HalApi> Device<A> {
usage: desc.usage,
size: desc.size,
initialization_status: RwLock::new(BufferInitTracker::new(0)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
sync_mapped_writes: Mutex::new(rank::BUFFER_SYNC_MAPPED_WRITES, None),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
bind_groups: Mutex::new(Vec::new()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
}
}

View File

@ -1,8 +1,8 @@
use parking_lot::Mutex;
use wgt::Backend;
use crate::{
id::{Id, Marker},
lock::{rank, Mutex},
Epoch, Index,
};
use std::{fmt::Debug, marker::PhantomData};
@ -117,12 +117,15 @@ impl<T: Marker> IdentityManager<T> {
impl<T: Marker> IdentityManager<T> {
pub fn new() -> Self {
Self {
values: Mutex::new(IdentityValues {
free: Vec::new(),
next_index: 0,
count: 0,
id_source: IdSource::None,
}),
values: Mutex::new(
rank::IDENTITY_MANAGER_VALUES,
IdentityValues {
free: Vec::new(),
next_index: 0,
count: 0,
id_source: IdSource::None,
},
),
_phantom: PhantomData,
}
}

View File

@ -6,13 +6,14 @@ use crate::{
device::{queue::Queue, resource::Device, DeviceDescriptor},
global::Global,
hal_api::HalApi,
id::{markers, AdapterId, DeviceId, Id, Marker, QueueId, SurfaceId},
id::markers,
id::{AdapterId, DeviceId, Id, Marker, QueueId, SurfaceId},
lock::{rank, Mutex},
present::Presentation,
resource::{Resource, ResourceInfo, ResourceType},
resource_log, LabelHelpers, DOWNLEVEL_WARNING_MESSAGE,
};
use parking_lot::Mutex;
use wgt::{Backend, Backends, PowerPreference};
use hal::{Adapter as _, Instance as _, OpenDevice};
@ -530,7 +531,7 @@ impl Global {
let mut any_created = false;
let surface = Surface {
presentation: Mutex::new(None),
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
#[cfg(vulkan)]
@ -594,7 +595,7 @@ impl Global {
profiling::scope!("Instance::create_surface_metal");
let surface = Surface {
presentation: Mutex::new(None),
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
metal: Some(self.instance.metal.as_ref().map_or(
Err(CreateSurfaceError::BackendNotEnabled(Backend::Metal)),
@ -623,7 +624,7 @@ impl Global {
create_surface_func: impl FnOnce(&HalInstance<hal::api::Dx12>) -> HalSurface<hal::api::Dx12>,
) -> Result<SurfaceId, CreateSurfaceError> {
let surface = Surface {
presentation: Mutex::new(None),
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
dx12: Some(create_surface_func(
self.instance

View File

@ -63,6 +63,7 @@ pub mod id;
pub mod identity;
mod init_tracker;
pub mod instance;
mod lock;
pub mod pipeline;
mod pool;
pub mod present;

41
wgpu-core/src/lock/mod.rs Normal file
View File

@ -0,0 +1,41 @@
//! Instrumented lock types.
//!
//! This module defines a set of instrumented wrappers for the lock
//! types used in `wgpu-core` ([`Mutex`], [`RwLock`], and
//! [`SnatchLock`]) that help us understand and validate `wgpu-core`
//! synchronization.
//!
//! - The [`ranked`] module defines lock types that perform run-time
//! checks to ensure that each thread acquires locks only in a
//! specific order, to prevent deadlocks.
//!
//! - The [`vanilla`] module defines lock types that are
//! uninstrumented, no-overhead wrappers around the standard lock
//! types.
//!
//! (We plan to add more wrappers in the future.)
//!
//! If the `wgpu_validate_locks` config is set (for example, with
//! `RUSTFLAGS='--cfg wgpu_validate_locks'`), `wgpu-core` uses the
//! [`ranked`] module's locks. We hope to make this the default for
//! debug builds soon.
//!
//! Otherwise, `wgpu-core` uses the [`vanilla`] module's locks.
//!
//! [`Mutex`]: parking_lot::Mutex
//! [`RwLock`]: parking_lot::RwLock
//! [`SnatchLock`]: crate::snatch::SnatchLock
pub mod rank;
#[cfg_attr(not(wgpu_validate_locks), allow(dead_code))]
mod ranked;
#[cfg_attr(wgpu_validate_locks, allow(dead_code))]
mod vanilla;
#[cfg(wgpu_validate_locks)]
pub use ranked::{Mutex, MutexGuard};
#[cfg(not(wgpu_validate_locks))]
pub use vanilla::{Mutex, MutexGuard};

143
wgpu-core/src/lock/rank.rs Normal file
View File

@ -0,0 +1,143 @@
//! Ranks for `wgpu-core` locks, restricting acquisition order.
//!
//! See [`LockRank`].
/// The rank of a lock.
///
/// Each [`Mutex`], [`RwLock`], and [`SnatchLock`] in `wgpu-core` has been
/// assigned a *rank*: a node in the DAG defined at the bottom of
/// `wgpu-core/src/lock/rank.rs`. The rank of the most recently
/// acquired lock you are still holding determines which locks you may
/// attempt to acquire next.
///
/// When you create a lock in `wgpu-core`, you must specify its rank
/// by passing in a [`LockRank`] value. This module declares a
/// pre-defined set of ranks to cover everything in `wgpu-core`, named
/// after the type in which they occur, and the name of the type's
/// field that is a lock. For example, [`CommandBuffer::data`] is a
/// `Mutex`, and its rank here is the constant
/// [`COMMAND_BUFFER_DATA`].
///
/// [`Mutex`]: parking_lot::Mutex
/// [`RwLock`]: parking_lot::RwLock
/// [`SnatchLock`]: crate::snatch::SnatchLock
/// [`CommandBuffer::data`]: crate::command::CommandBuffer::data
#[derive(Debug, Copy, Clone)]
pub struct LockRank {
/// The bit representing this lock.
///
/// There should only be a single bit set in this value.
pub(super) bit: LockRankSet,
/// A bitmask of permitted successor ranks.
///
/// If `rank` is the rank of the most recently acquired lock we
/// are still holding, then `rank.followers` is the mask of
/// locks we are allowed to acquire next.
///
/// The `define_lock_ranks!` macro ensures that there are no
/// cycles in the graph of lock ranks and their followers.
pub(super) followers: LockRankSet,
}
/// Define a set of lock ranks, and each rank's permitted successors.
macro_rules! define_lock_ranks {
{
$(
$( #[ $attr:meta ] )*
rank $name:ident $member:literal followed by { $( $follower:ident ),* $(,)? };
)*
} => {
// An enum that assigns a unique number to each rank.
#[allow(non_camel_case_types, clippy::upper_case_acronyms)]
enum LockRankNumber { $( $name, )* }
bitflags::bitflags! {
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
/// A bitflags type representing a set of lock ranks.
pub struct LockRankSet: u32 {
$(
const $name = 1 << (LockRankNumber:: $name as u32);
)*
}
}
impl LockRankSet {
pub fn name(self) -> &'static str {
match self {
$(
LockRankSet:: $name => $member,
)*
_ => "<unrecognized LockRankSet bit>",
}
}
}
$(
// If there is any cycle in the ranking, the initializers
// for `followers` will be cyclic, and rustc will give us
// an error message explaining the cycle.
$( #[ $attr ] )*
pub const $name: LockRank = LockRank {
bit: LockRankSet:: $name,
followers: LockRankSet::empty() $( .union($follower.bit) )*,
};
)*
}
}
define_lock_ranks! {
rank COMMAND_BUFFER_DATA "CommandBuffer::data" followed by {
DEVICE_USAGE_SCOPES,
SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
BUFFER_BIND_GROUP_STATE_BUFFERS,
TEXTURE_BIND_GROUP_STATE_TEXTURES,
BUFFER_MAP_STATE,
STATELESS_BIND_GROUP_STATE_RESOURCES,
};
rank STAGING_BUFFER_RAW "StagingBuffer::raw" followed by { };
rank COMMAND_ALLOCATOR_FREE_ENCODERS "CommandAllocator::free_encoders" followed by {
SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
};
rank DEVICE_TRACKERS "Device::trackers" followed by { };
rank DEVICE_LIFE_TRACKER "Device::life_tracker" followed by {
COMMAND_ALLOCATOR_FREE_ENCODERS,
// Uncomment this to see an interesting cycle.
// DEVICE_TEMP_SUSPECTED,
};
rank DEVICE_TEMP_SUSPECTED "Device::temp_suspected" followed by {
SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
COMMAND_BUFFER_DATA,
DEVICE_TRACKERS,
};
rank DEVICE_PENDING_WRITES "Device::pending_writes" followed by {
COMMAND_ALLOCATOR_FREE_ENCODERS,
SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
DEVICE_LIFE_TRACKER,
};
rank DEVICE_DEFERRED_DESTROY "Device::deferred_destroy" followed by { };
#[allow(dead_code)]
rank DEVICE_TRACE "Device::trace" followed by { };
rank DEVICE_USAGE_SCOPES "Device::usage_scopes" followed by { };
rank BUFFER_SYNC_MAPPED_WRITES "Buffer::sync_mapped_writes" followed by { };
rank BUFFER_MAP_STATE "Buffer::map_state" followed by { DEVICE_PENDING_WRITES };
rank BUFFER_BIND_GROUPS "Buffer::bind_groups" followed by { };
rank TEXTURE_VIEWS "Texture::views" followed by { };
rank TEXTURE_BIND_GROUPS "Texture::bind_groups" followed by { };
rank IDENTITY_MANAGER_VALUES "IdentityManager::values" followed by { };
rank RESOURCE_POOL_INNER "ResourcePool::inner" followed by { };
rank BUFFER_BIND_GROUP_STATE_BUFFERS "BufferBindGroupState::buffers" followed by { };
rank STATELESS_BIND_GROUP_STATE_RESOURCES "StatelessBindGroupState::resources" followed by { };
rank TEXTURE_BIND_GROUP_STATE_TEXTURES "TextureBindGroupState::textures" followed by { };
rank SHARED_TRACKER_INDEX_ALLOCATOR_INNER "SharedTrackerIndexAllocator::inner" followed by { };
rank SURFACE_PRESENTATION "Surface::presentation" followed by { };
#[cfg(test)]
rank PAWN "pawn" followed by { ROOK, BISHOP };
#[cfg(test)]
rank ROOK "rook" followed by { KNIGHT };
#[cfg(test)]
rank KNIGHT "knight" followed by { };
#[cfg(test)]
rank BISHOP "bishop" followed by { };
}

View File

@ -0,0 +1,264 @@
//! Lock types that enforce well-ranked lock acquisition order.
//!
//! This module's [`Mutex`] type is instrumented to check that `wgpu-core`
//! acquires locks according to their rank, to prevent deadlocks. To use it,
//! put `--cfg wgpu_validate_locks` in `RUSTFLAGS`.
//!
//! The [`LockRank`] constants in the [`lock::rank`] module describe edges in a
//! directed graph of lock acquisitions: each lock's rank says, if this is the most
//! recently acquired lock that you are still holding, then these are the locks you
//! are allowed to acquire next.
//!
//! As long as this graph doesn't have cycles, any number of threads can acquire
//! locks along paths through the graph without deadlock:
//!
//! - Assume that if a thread is holding a lock, then it will either release it,
//! or block trying to acquire another one. No thread just sits on its locks
//! forever for unrelated reasons. If it did, then that would be a source of
//! deadlock "outside the system" that we can't do anything about.
//!
//! - This module asserts that threads acquire and release locks in a stack-like
//! order: a lock is dropped only when it is the *most recently acquired* lock
//! *still held* - call this the "youngest" lock. This stack-like ordering
//! isn't a Rust requirement; Rust lets you drop guards in any order you like.
//! This is a restriction we impose.
//!
//! - Consider the directed graph whose nodes are locks, and whose edges go from
//! each lock to its permitted followers, the locks in its [`LockRank::followers`]
//! set. The definition of the [`lock::rank`] module's [`LockRank`] constants
//! ensures that this graph has no cycles, including trivial cycles from a node to
//! itself.
//!
//! - This module then asserts that each thread attempts to acquire a lock only if
//! it is among its youngest lock's permitted followers. Thus, as a thread
//! acquires locks, it must be traversing a path through the graph along its
//! edges.
//!
//! - Because there are no cycles in the graph, whenever one thread is blocked
//! waiting to acquire a lock, that lock must be held by a different thread: if
//! you were allowed to acquire a lock you already hold, that would be a cycle in
//! the graph.
//!
//! - Furthermore, because the graph has no cycles, as we work our way from each
//! thread to the thread it is blocked waiting for, we must eventually reach an
//! end point: there must be some thread that is able to acquire its next lock, or
//! that is about to release a lock.
//!
//! Thus, the system as a whole is always able to make progress: it is free of
//! deadlocks.
//!
//! Note that this validation only monitors each thread's behavior in isolation:
//! there's only thread-local state, nothing communicated between threads. So we
//! don't detect deadlocks, per se, only the potential to cause deadlocks. This
//! means that the validation is conservative, but more reproducible, since it's not
//! dependent on any particular interleaving of execution.
//!
//! [`lock::rank`]: crate::lock::rank
use super::rank::LockRank;
use std::{cell::Cell, panic::Location};
/// A `Mutex` instrumented for deadlock prevention.
///
/// This is just a wrapper around a [`parking_lot::Mutex`], along with
/// its rank in the `wgpu_core` lock ordering.
///
/// For details, see [the module documentation][mod].
///
/// [mod]: crate::lock::ranked
pub struct Mutex<T> {
inner: parking_lot::Mutex<T>,
rank: LockRank,
}
/// A guard produced by locking [`Mutex`].
///
/// This is just a wrapper around a [`parking_lot::MutexGuard`], along
/// with the state needed to track lock acquisition.
///
/// For details, see [the module documentation][mod].
///
/// [mod]: crate::lock::ranked
pub struct MutexGuard<'a, T> {
inner: parking_lot::MutexGuard<'a, T>,
saved: LockState,
}
/// Per-thread state for the deadlock checker.
#[derive(Debug, Copy, Clone)]
struct LockState {
/// The last lock we acquired, and where.
last_acquired: Option<(LockRank, &'static Location<'static>)>,
/// The number of locks currently held.
///
/// This is used to enforce stack-like lock acquisition and release.
depth: u32,
}
impl LockState {
const INITIAL: LockState = LockState {
last_acquired: None,
depth: 0,
};
}
impl<T> Mutex<T> {
#[inline]
pub fn new(rank: LockRank, value: T) -> Mutex<T> {
Mutex {
inner: parking_lot::Mutex::new(value),
rank,
}
}
#[inline]
#[track_caller]
pub fn lock(&self) -> MutexGuard<T> {
let state = LOCK_STATE.get();
let location = Location::caller();
// Initially, it's fine to acquire any lock. So we only
// need to check when `last_acquired` is `Some`.
if let Some((ref last_rank, ref last_location)) = state.last_acquired {
assert!(
last_rank.followers.contains(self.rank.bit),
"Attempt to acquire nested mutexes in wrong order:\n\
last locked {:<35} at {}\n\
now locking {:<35} at {}\n\
Locking {} after locking {} is not permitted.",
last_rank.bit.name(),
last_location,
self.rank.bit.name(),
location,
self.rank.bit.name(),
last_rank.bit.name(),
);
}
LOCK_STATE.set(LockState {
last_acquired: Some((self.rank, location)),
depth: state.depth + 1,
});
MutexGuard {
inner: self.inner.lock(),
saved: state,
}
}
}
impl<'a, T> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
let prior = LOCK_STATE.replace(self.saved);
// Although Rust allows mutex guards to be dropped in any
// order, this analysis requires that locks be acquired and
// released in stack order: the next lock to be released must be
// the most recently acquired lock still held.
assert_eq!(
prior.depth,
self.saved.depth + 1,
"Lock not released in stacking order"
);
}
}
thread_local! {
static LOCK_STATE: Cell<LockState> = const { Cell::new(LockState::INITIAL) };
}
impl<'a, T> std::ops::Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<'a, T> std::ops::DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.deref_mut()
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.inner.fmt(f)
}
}
/// Locks can be acquired in the order indicated by their ranks.
#[test]
fn permitted() {
use super::rank;
let lock1 = Mutex::new(rank::PAWN, ());
let lock2 = Mutex::new(rank::ROOK, ());
let _guard1 = lock1.lock();
let _guard2 = lock2.lock();
}
/// Locks can only be acquired in the order indicated by their ranks.
#[test]
#[should_panic(expected = "Locking pawn after locking rook")]
fn forbidden_unrelated() {
use super::rank;
let lock1 = Mutex::new(rank::ROOK, ());
let lock2 = Mutex::new(rank::PAWN, ());
let _guard1 = lock1.lock();
let _guard2 = lock2.lock();
}
/// Lock acquisitions can't skip ranks.
///
/// These two locks *could* be acquired in this order, but only if other locks
/// are acquired in between them. Skipping ranks isn't allowed.
#[test]
#[should_panic(expected = "Locking knight after locking pawn")]
fn forbidden_skip() {
use super::rank;
let lock1 = Mutex::new(rank::PAWN, ());
let lock2 = Mutex::new(rank::KNIGHT, ());
let _guard1 = lock1.lock();
let _guard2 = lock2.lock();
}
/// Locks can be acquired and released in a stack-like order.
#[test]
fn stack_like() {
use super::rank;
let lock1 = Mutex::new(rank::PAWN, ());
let lock2 = Mutex::new(rank::ROOK, ());
let lock3 = Mutex::new(rank::BISHOP, ());
let guard1 = lock1.lock();
let guard2 = lock2.lock();
drop(guard2);
let guard3 = lock3.lock();
drop(guard3);
drop(guard1);
}
/// Locks can only be acquired and released in a stack-like order.
#[test]
#[should_panic(expected = "Lock not released in stacking order")]
fn non_stack_like() {
use super::rank;
let lock1 = Mutex::new(rank::PAWN, ());
let lock2 = Mutex::new(rank::ROOK, ());
let guard1 = lock1.lock();
let guard2 = lock2.lock();
// Avoid a double panic from dropping this while unwinding due to the panic
// we're testing for.
std::mem::forget(guard2);
drop(guard1);
}

View File

@ -0,0 +1,53 @@
//! Plain, uninstrumented wrappers around [`parking_lot`] lock types.
//!
//! These definitions are used when no particular lock instrumentation
//! Cargo feature is selected.
/// A plain wrapper around [`parking_lot::Mutex`].
///
/// This is just like [`parking_lot::Mutex`], except that our [`new`]
/// method takes a rank, indicating where the new mutex should sitc in
/// `wgpu-core`'s lock ordering. The rank is ignored.
///
/// See the [`lock`] module documentation for other wrappers.
///
/// [`new`]: Mutex::new
/// [`lock`]: crate::lock
pub struct Mutex<T>(parking_lot::Mutex<T>);
/// A guard produced by locking [`Mutex`].
///
/// This is just a wrapper around a [`parking_lot::MutexGuard`].
pub struct MutexGuard<'a, T>(parking_lot::MutexGuard<'a, T>);
impl<T> Mutex<T> {
#[inline]
pub fn new(_rank: super::rank::LockRank, value: T) -> Mutex<T> {
Mutex(parking_lot::Mutex::new(value))
}
#[inline]
pub fn lock(&self) -> MutexGuard<T> {
MutexGuard(self.0.lock())
}
}
impl<'a, T> std::ops::Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl<'a, T> std::ops::DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.0.deref_mut()
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

View File

@ -5,8 +5,8 @@ use std::{
};
use once_cell::sync::OnceCell;
use parking_lot::Mutex;
use crate::lock::{rank, Mutex};
use crate::{PreHashedKey, PreHashedMap};
type SlotInner<V> = Weak<V>;
@ -22,7 +22,7 @@ pub struct ResourcePool<K, V> {
impl<K: Clone + Eq + Hash, V> ResourcePool<K, V> {
pub fn new() -> Self {
Self {
inner: Mutex::new(HashMap::default()),
inner: Mutex::new(rank::RESOURCE_POOL_INNER, HashMap::default()),
}
}

View File

@ -21,13 +21,14 @@ use crate::{
hal_api::HalApi,
hal_label, id,
init_tracker::TextureInitTracker,
lock::{rank, Mutex},
resource::{self, ResourceInfo},
snatch::Snatchable,
track,
};
use hal::{Queue as _, Surface as _};
use parking_lot::{Mutex, RwLock};
use parking_lot::RwLock;
use thiserror::Error;
use wgt::SurfaceStatus as Status;
@ -227,8 +228,8 @@ impl Global {
clear_mode: RwLock::new(resource::TextureClearMode::Surface {
clear_view: Some(clear_view),
}),
views: Mutex::new(Vec::new()),
bind_groups: Mutex::new(Vec::new()),
views: Mutex::new(rank::TEXTURE_VIEWS, Vec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, Vec::new()),
};
let (id, resource) = fid.assign(Arc::new(texture));

View File

@ -13,6 +13,7 @@ use crate::{
TextureViewId,
},
init_tracker::{BufferInitTracker, TextureInitTracker},
lock::Mutex,
resource, resource_log,
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
@ -21,7 +22,7 @@ use crate::{
};
use hal::CommandEncoder;
use parking_lot::{Mutex, RwLock};
use parking_lot::RwLock;
use smallvec::SmallVec;
use thiserror::Error;
use wgt::WasmNotSendSync;

View File

@ -11,6 +11,7 @@ use super::{PendingTransition, ResourceTracker, TrackerIndex};
use crate::{
hal_api::HalApi,
id::BufferId,
lock::{rank, Mutex},
resource::{Buffer, Resource},
snatch::SnatchGuard,
storage::Storage,
@ -20,7 +21,6 @@ use crate::{
},
};
use hal::{BufferBarrier, BufferUses};
use parking_lot::Mutex;
use wgt::{strict_assert, strict_assert_eq};
impl ResourceUses for BufferUses {
@ -51,7 +51,7 @@ pub(crate) struct BufferBindGroupState<A: HalApi> {
impl<A: HalApi> BufferBindGroupState<A> {
pub fn new() -> Self {
Self {
buffers: Mutex::new(Vec::new()),
buffers: Mutex::new(rank::BUFFER_BIND_GROUP_STATE_BUFFERS, Vec::new()),
_phantom: PhantomData,
}

View File

@ -102,10 +102,15 @@ mod stateless;
mod texture;
use crate::{
binding_model, command, conv, hal_api::HalApi, id, pipeline, resource, snatch::SnatchGuard,
binding_model, command, conv,
hal_api::HalApi,
id,
lock::{rank, Mutex},
pipeline, resource,
snatch::SnatchGuard,
};
use parking_lot::{Mutex, RwLock};
use parking_lot::RwLock;
use std::{fmt, ops, sync::Arc};
use thiserror::Error;
@ -191,7 +196,10 @@ pub(crate) struct SharedTrackerIndexAllocator {
impl SharedTrackerIndexAllocator {
pub fn new() -> Self {
SharedTrackerIndexAllocator {
inner: Mutex::new(TrackerIndexAllocator::new()),
inner: Mutex::new(
rank::SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
TrackerIndexAllocator::new(),
),
}
}

View File

@ -6,9 +6,14 @@
use std::sync::Arc;
use parking_lot::Mutex;
use crate::{id::Id, resource::Resource, resource_log, storage::Storage, track::ResourceMetadata};
use crate::{
id::Id,
lock::{rank, Mutex},
resource::Resource,
resource_log,
storage::Storage,
track::ResourceMetadata,
};
use super::{ResourceTracker, TrackerIndex};
@ -24,7 +29,7 @@ pub(crate) struct StatelessBindGroupSate<T: Resource> {
impl<T: Resource> StatelessBindGroupSate<T> {
pub fn new() -> Self {
Self {
resources: Mutex::new(Vec::new()),
resources: Mutex::new(rank::STATELESS_BIND_GROUP_STATE_RESOURCES, Vec::new()),
}
}

View File

@ -24,6 +24,7 @@ use super::{
};
use crate::{
hal_api::HalApi,
lock::{rank, Mutex},
resource::{Resource, Texture, TextureInner},
snatch::SnatchGuard,
track::{
@ -36,7 +37,6 @@ use hal::TextureUses;
use arrayvec::ArrayVec;
use naga::FastHashMap;
use parking_lot::Mutex;
use wgt::{strict_assert, strict_assert_eq};
use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain};
@ -164,7 +164,7 @@ pub(crate) struct TextureBindGroupState<A: HalApi> {
impl<A: HalApi> TextureBindGroupState<A> {
pub fn new() -> Self {
Self {
textures: Mutex::new(Vec::new()),
textures: Mutex::new(rank::TEXTURE_BIND_GROUP_STATE_TEXTURES, Vec::new()),
}
}