mirror of
https://github.com/gfx-rs/wgpu.git
synced 2025-02-16 08:53:20 +00:00
[core] Ensure all lock guards are properly nested.
The lock analyzers in the `wgpu_core::lock` module can be a bit simpler if they can assume that locks are acquired and released in a stack-like order: that a guard is only dropped when it is the most recently acquired lock guard still held. So: - Change `Device::maintain` to take a `RwLockReadGuard` for the device's hal fence, rather than just a reference to it. - Adjust the order in which guards are dropped in `Device::maintain` and `Queue::submit`. Fixes #5610.
This commit is contained in:
parent
7c842a3b48
commit
4a07ab2b57
@ -2033,7 +2033,6 @@ impl Global {
|
||||
// Wait for all work to finish before configuring the surface.
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
let fence = device.fence.read();
|
||||
let fence = fence.as_ref().unwrap();
|
||||
match device.maintain(fence, wgt::Maintain::Wait, snatch_guard) {
|
||||
Ok((closures, _)) => {
|
||||
user_callbacks = closures;
|
||||
@ -2146,7 +2145,6 @@ impl Global {
|
||||
) -> Result<DevicePoll, WaitIdleError> {
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
let fence = device.fence.read();
|
||||
let fence = fence.as_ref().unwrap();
|
||||
let (closures, queue_empty) = device.maintain(fence, maintain, snatch_guard)?;
|
||||
|
||||
// Some deferred destroys are scheduled in maintain so run this right after
|
||||
|
@ -14,7 +14,7 @@ use crate::{
|
||||
hal_label,
|
||||
id::{self, DeviceId, QueueId},
|
||||
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
|
||||
lock::{rank, Mutex},
|
||||
lock::{rank, Mutex, RwLockWriteGuard},
|
||||
resource::{
|
||||
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
|
||||
ResourceInfo, ResourceType, StagingBuffer, Texture, TextureInner,
|
||||
@ -1162,8 +1162,8 @@ impl Global {
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
|
||||
// Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
|
||||
let mut fence = device.fence.write();
|
||||
let fence = fence.as_mut().unwrap();
|
||||
let mut fence_guard = device.fence.write();
|
||||
let fence = fence_guard.as_mut().unwrap();
|
||||
let submit_index = device
|
||||
.active_submission_index
|
||||
.fetch_add(1, Ordering::Relaxed)
|
||||
@ -1459,8 +1459,8 @@ impl Global {
|
||||
}
|
||||
}
|
||||
|
||||
let mut pending_writes = device.pending_writes.lock();
|
||||
let pending_writes = pending_writes.as_mut().unwrap();
|
||||
let mut pending_writes_guard = device.pending_writes.lock();
|
||||
let pending_writes = pending_writes_guard.as_mut().unwrap();
|
||||
|
||||
{
|
||||
used_surface_textures.set_size(hub.textures.read().len());
|
||||
@ -1550,18 +1550,22 @@ impl Global {
|
||||
active_executions,
|
||||
);
|
||||
|
||||
// This will schedule destruction of all resources that are no longer needed
|
||||
// by the user but used in the command stream, among other things.
|
||||
let (closures, _) = match device.maintain(fence, wgt::Maintain::Poll, snatch_guard) {
|
||||
Ok(closures) => closures,
|
||||
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
|
||||
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
|
||||
Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
|
||||
};
|
||||
|
||||
// pending_write_resources has been drained, so it's empty, but we
|
||||
// want to retain its heap allocation.
|
||||
pending_writes.temp_resources = pending_write_resources;
|
||||
drop(pending_writes_guard);
|
||||
|
||||
// This will schedule destruction of all resources that are no longer needed
|
||||
// by the user but used in the command stream, among other things.
|
||||
let fence_guard = RwLockWriteGuard::downgrade(fence_guard);
|
||||
let (closures, _) =
|
||||
match device.maintain(fence_guard, wgt::Maintain::Poll, snatch_guard) {
|
||||
Ok(closures) => closures,
|
||||
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
|
||||
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
|
||||
Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
|
||||
};
|
||||
|
||||
device.lock_life().post_submit();
|
||||
|
||||
(submit_index, closures)
|
||||
|
@ -397,11 +397,12 @@ impl<A: HalApi> Device<A> {
|
||||
/// return it to our callers.)
|
||||
pub(crate) fn maintain<'this>(
|
||||
&'this self,
|
||||
fence: &A::Fence,
|
||||
fence_guard: crate::lock::RwLockReadGuard<Option<A::Fence>>,
|
||||
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
|
||||
snatch_guard: SnatchGuard,
|
||||
) -> Result<(UserClosures, bool), WaitIdleError> {
|
||||
profiling::scope!("Device::maintain");
|
||||
let fence = fence_guard.as_ref().unwrap();
|
||||
let last_done_index = if maintain.is_wait() {
|
||||
let index_to_wait_for = match maintain {
|
||||
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
|
||||
@ -481,6 +482,7 @@ impl<A: HalApi> Device<A> {
|
||||
|
||||
// Don't hold the locks while calling release_gpu_resources.
|
||||
drop(life_tracker);
|
||||
drop(fence_guard);
|
||||
drop(snatch_guard);
|
||||
|
||||
if should_release_gpu_resource {
|
||||
|
Loading…
Reference in New Issue
Block a user