mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-21 22:33:49 +00:00
move LifetimeTracker
into the Queue
The `Device` should not contain any `Arc`s to resources as that creates cycles (since all resources hold strong references to the `Device`). Note that `LifetimeTracker` internally has `Arc`s to resources.
This commit is contained in:
parent
e934595bb2
commit
6cc53421bf
@ -219,9 +219,11 @@ impl Global {
|
||||
device.check_is_valid()?;
|
||||
buffer.check_usage(wgt::BufferUsages::MAP_WRITE)?;
|
||||
|
||||
let last_submission = device
|
||||
let last_submission = device.get_queue().and_then(|queue| {
|
||||
queue
|
||||
.lock_life()
|
||||
.get_buffer_latest_submission_index(&buffer);
|
||||
.get_buffer_latest_submission_index(&buffer)
|
||||
});
|
||||
|
||||
if let Some(last_submission) = last_submission {
|
||||
device.wait_for_submit(last_submission)?;
|
||||
|
@ -14,7 +14,7 @@ use crate::{
|
||||
hal_label,
|
||||
id::{self, QueueId},
|
||||
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
|
||||
lock::{rank, Mutex, RwLockWriteGuard},
|
||||
lock::{rank, Mutex, MutexGuard, RwLockWriteGuard},
|
||||
resource::{
|
||||
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
|
||||
DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
|
||||
@ -37,12 +37,13 @@ use std::{
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
use super::Device;
|
||||
use super::{life::LifetimeTracker, Device};
|
||||
|
||||
pub struct Queue {
|
||||
raw: ManuallyDrop<Box<dyn hal::DynQueue>>,
|
||||
pub(crate) device: Arc<Device>,
|
||||
pub(crate) pending_writes: Mutex<ManuallyDrop<PendingWrites>>,
|
||||
life_tracker: Mutex<LifetimeTracker>,
|
||||
}
|
||||
|
||||
impl Queue {
|
||||
@ -94,12 +95,18 @@ impl Queue {
|
||||
raw: ManuallyDrop::new(raw),
|
||||
device,
|
||||
pending_writes,
|
||||
life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
|
||||
self.raw.as_ref()
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
|
||||
self.life_tracker.lock()
|
||||
}
|
||||
}
|
||||
|
||||
crate::impl_resource_type!(Queue);
|
||||
@ -1292,7 +1299,7 @@ impl Queue {
|
||||
profiling::scope!("cleanup");
|
||||
|
||||
// this will register the new submission to the life time tracker
|
||||
self.device.lock_life().track_submission(
|
||||
self.lock_life().track_submission(
|
||||
submit_index,
|
||||
pending_writes.temp_resources.drain(..),
|
||||
active_executions,
|
||||
@ -1341,7 +1348,7 @@ impl Queue {
|
||||
) -> Option<SubmissionIndex> {
|
||||
api_log!("Queue::on_submitted_work_done");
|
||||
//TODO: flush pending writes
|
||||
self.device.lock_life().add_work_done_closure(closure)
|
||||
self.lock_life().add_work_done_closure(closure)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,10 +4,9 @@ use crate::{
|
||||
binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
|
||||
command, conv,
|
||||
device::{
|
||||
bgl, create_validator,
|
||||
life::{LifetimeTracker, WaitIdleError},
|
||||
map_buffer, AttachmentData, DeviceLostInvocation, HostMap, MissingDownlevelFlags,
|
||||
MissingFeatures, RenderPassContext, CLEANUP_WAIT_MS,
|
||||
bgl, create_validator, life::WaitIdleError, map_buffer, AttachmentData,
|
||||
DeviceLostInvocation, HostMap, MissingDownlevelFlags, MissingFeatures, RenderPassContext,
|
||||
CLEANUP_WAIT_MS,
|
||||
},
|
||||
hal_label,
|
||||
init_tracker::{
|
||||
@ -15,7 +14,7 @@ use crate::{
|
||||
TextureInitTrackerAction,
|
||||
},
|
||||
instance::Adapter,
|
||||
lock::{rank, Mutex, MutexGuard, RwLock},
|
||||
lock::{rank, Mutex, RwLock},
|
||||
pipeline,
|
||||
pool::ResourcePool,
|
||||
resource::{
|
||||
@ -114,7 +113,6 @@ pub struct Device {
|
||||
/// Stores the state of buffers and textures.
|
||||
pub(crate) trackers: Mutex<DeviceTracker>,
|
||||
pub(crate) tracker_indices: TrackerIndexAllocators,
|
||||
life_tracker: Mutex<LifetimeTracker>,
|
||||
/// Pool of bind group layouts, allowing deduplication.
|
||||
pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>,
|
||||
pub(crate) alignments: hal::Alignments,
|
||||
@ -266,7 +264,6 @@ impl Device {
|
||||
device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None),
|
||||
trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()),
|
||||
tracker_indices: TrackerIndexAllocators::new(),
|
||||
life_tracker: Mutex::new(rank::DEVICE_LIFE_TRACKER, LifetimeTracker::new()),
|
||||
bgl_pool: ResourcePool::new(),
|
||||
#[cfg(feature = "trace")]
|
||||
trace: Mutex::new(
|
||||
@ -332,11 +329,6 @@ impl Device {
|
||||
assert!(self.queue_to_drop.set(queue).is_ok());
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
|
||||
self.life_tracker.lock()
|
||||
}
|
||||
|
||||
/// Run some destroy operations that were deferred.
|
||||
///
|
||||
/// Destroying the resources requires taking a write lock on the device's snatch lock,
|
||||
@ -449,7 +441,9 @@ impl Device {
|
||||
.map_err(|e| self.handle_hal_error(e))?;
|
||||
}
|
||||
|
||||
let mut life_tracker = self.lock_life();
|
||||
let (submission_closures, mapping_closures, queue_empty) =
|
||||
if let Some(queue) = self.get_queue() {
|
||||
let mut life_tracker = queue.lock_life();
|
||||
let submission_closures =
|
||||
life_tracker.triage_submissions(submission_index, &self.command_allocator);
|
||||
|
||||
@ -457,6 +451,11 @@ impl Device {
|
||||
|
||||
let queue_empty = life_tracker.queue_empty();
|
||||
|
||||
(submission_closures, mapping_closures, queue_empty)
|
||||
} else {
|
||||
(SmallVec::new(), Vec::new(), true)
|
||||
};
|
||||
|
||||
// Detect if we have been destroyed and now need to lose the device.
|
||||
// If we are invalid (set at start of destroy) and our queue is empty,
|
||||
// and we have a DeviceLostClosure, return the closure to be called by
|
||||
@ -481,7 +480,6 @@ impl Device {
|
||||
}
|
||||
|
||||
// Don't hold the locks while calling release_gpu_resources.
|
||||
drop(life_tracker);
|
||||
drop(fence);
|
||||
drop(snatch_guard);
|
||||
|
||||
@ -3562,7 +3560,8 @@ impl Device {
|
||||
unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) }
|
||||
.map_err(|e| self.handle_hal_error(e))?;
|
||||
drop(fence);
|
||||
let closures = self
|
||||
if let Some(queue) = self.get_queue() {
|
||||
let closures = queue
|
||||
.lock_life()
|
||||
.triage_submissions(submission_index, &self.command_allocator);
|
||||
assert!(
|
||||
@ -3570,6 +3569,7 @@ impl Device {
|
||||
"wait_for_submit is not expected to work with closures"
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -118,9 +118,9 @@ define_lock_ranks! {
|
||||
rank QUEUE_PENDING_WRITES "Queue::pending_writes" followed by {
|
||||
COMMAND_ALLOCATOR_FREE_ENCODERS,
|
||||
SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
|
||||
DEVICE_LIFE_TRACKER,
|
||||
QUEUE_LIFE_TRACKER,
|
||||
}
|
||||
rank DEVICE_LIFE_TRACKER "Device::life_tracker" followed by {
|
||||
rank QUEUE_LIFE_TRACKER "Queue::life_tracker" followed by {
|
||||
COMMAND_ALLOCATOR_FREE_ENCODERS,
|
||||
DEVICE_TRACE,
|
||||
}
|
||||
|
@ -634,7 +634,12 @@ impl Buffer {
|
||||
.buffers
|
||||
.set_single(self, internal_use);
|
||||
|
||||
let submit_index = device.lock_life().map(self).unwrap_or(0); // '0' means no wait is necessary
|
||||
let submit_index = if let Some(queue) = device.get_queue() {
|
||||
queue.lock_life().map(self).unwrap_or(0) // '0' means no wait is necessary
|
||||
} else {
|
||||
// TODO: map immediately
|
||||
0
|
||||
};
|
||||
|
||||
Ok(submit_index)
|
||||
}
|
||||
@ -783,15 +788,14 @@ impl Buffer {
|
||||
let mut pending_writes = queue.pending_writes.lock();
|
||||
if pending_writes.contains_buffer(self) {
|
||||
pending_writes.consume_temp(temp);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let mut life_lock = device.lock_life();
|
||||
} else {
|
||||
let mut life_lock = queue.lock_life();
|
||||
let last_submit_index = life_lock.get_buffer_latest_submission_index(self);
|
||||
if let Some(last_submit_index) = last_submit_index {
|
||||
life_lock.schedule_resource_destruction(temp, last_submit_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -1252,15 +1256,14 @@ impl Texture {
|
||||
let mut pending_writes = queue.pending_writes.lock();
|
||||
if pending_writes.contains_texture(self) {
|
||||
pending_writes.consume_temp(temp);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let mut life_lock = device.lock_life();
|
||||
} else {
|
||||
let mut life_lock = queue.lock_life();
|
||||
let last_submit_index = life_lock.get_texture_latest_submission_index(self);
|
||||
if let Some(last_submit_index) = last_submit_index {
|
||||
life_lock.schedule_resource_destruction(temp, last_submit_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -1971,15 +1974,14 @@ impl Blas {
|
||||
let mut pending_writes = queue.pending_writes.lock();
|
||||
if pending_writes.contains_blas(self) {
|
||||
pending_writes.consume_temp(temp);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let mut life_lock = device.lock_life();
|
||||
} else {
|
||||
let mut life_lock = queue.lock_life();
|
||||
let last_submit_index = life_lock.get_blas_latest_submission_index(self);
|
||||
if let Some(last_submit_index) = last_submit_index {
|
||||
life_lock.schedule_resource_destruction(temp, last_submit_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -2061,15 +2063,14 @@ impl Tlas {
|
||||
let mut pending_writes = queue.pending_writes.lock();
|
||||
if pending_writes.contains_tlas(self) {
|
||||
pending_writes.consume_temp(temp);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let mut life_lock = device.lock_life();
|
||||
} else {
|
||||
let mut life_lock = queue.lock_life();
|
||||
let last_submit_index = life_lock.get_tlas_latest_submission_index(self);
|
||||
if let Some(last_submit_index) = last_submit_index {
|
||||
life_lock.schedule_resource_destruction(temp, last_submit_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user