move LifetimeTracker into the Queue

The `Device` should not contain any `Arc`s to resources as that creates cycles (since all resources hold strong references to the `Device`).
Note that `LifetimeTracker` internally has `Arc`s to resources.
This commit is contained in:
teoxoy 2024-10-15 12:41:29 +02:00 committed by Teodor Tanasoaia
parent e934595bb2
commit 6cc53421bf
5 changed files with 73 additions and 63 deletions

View File

@ -219,9 +219,11 @@ impl Global {
device.check_is_valid()?; device.check_is_valid()?;
buffer.check_usage(wgt::BufferUsages::MAP_WRITE)?; buffer.check_usage(wgt::BufferUsages::MAP_WRITE)?;
let last_submission = device let last_submission = device.get_queue().and_then(|queue| {
.lock_life() queue
.get_buffer_latest_submission_index(&buffer); .lock_life()
.get_buffer_latest_submission_index(&buffer)
});
if let Some(last_submission) = last_submission { if let Some(last_submission) = last_submission {
device.wait_for_submit(last_submission)?; device.wait_for_submit(last_submission)?;

View File

@ -14,7 +14,7 @@ use crate::{
hal_label, hal_label,
id::{self, QueueId}, id::{self, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
lock::{rank, Mutex, RwLockWriteGuard}, lock::{rank, Mutex, MutexGuard, RwLockWriteGuard},
resource::{ resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError, Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled, DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
@ -37,12 +37,13 @@ use std::{
}; };
use thiserror::Error; use thiserror::Error;
use super::Device; use super::{life::LifetimeTracker, Device};
pub struct Queue { pub struct Queue {
raw: ManuallyDrop<Box<dyn hal::DynQueue>>, raw: ManuallyDrop<Box<dyn hal::DynQueue>>,
pub(crate) device: Arc<Device>, pub(crate) device: Arc<Device>,
pub(crate) pending_writes: Mutex<ManuallyDrop<PendingWrites>>, pub(crate) pending_writes: Mutex<ManuallyDrop<PendingWrites>>,
life_tracker: Mutex<LifetimeTracker>,
} }
impl Queue { impl Queue {
@ -94,12 +95,18 @@ impl Queue {
raw: ManuallyDrop::new(raw), raw: ManuallyDrop::new(raw),
device, device,
pending_writes, pending_writes,
life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
}) })
} }
pub(crate) fn raw(&self) -> &dyn hal::DynQueue { pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
self.raw.as_ref() self.raw.as_ref()
} }
#[track_caller]
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
self.life_tracker.lock()
}
} }
crate::impl_resource_type!(Queue); crate::impl_resource_type!(Queue);
@ -1292,7 +1299,7 @@ impl Queue {
profiling::scope!("cleanup"); profiling::scope!("cleanup");
// this will register the new submission to the life time tracker // this will register the new submission to the life time tracker
self.device.lock_life().track_submission( self.lock_life().track_submission(
submit_index, submit_index,
pending_writes.temp_resources.drain(..), pending_writes.temp_resources.drain(..),
active_executions, active_executions,
@ -1341,7 +1348,7 @@ impl Queue {
) -> Option<SubmissionIndex> { ) -> Option<SubmissionIndex> {
api_log!("Queue::on_submitted_work_done"); api_log!("Queue::on_submitted_work_done");
//TODO: flush pending writes //TODO: flush pending writes
self.device.lock_life().add_work_done_closure(closure) self.lock_life().add_work_done_closure(closure)
} }
} }

View File

@ -4,10 +4,9 @@ use crate::{
binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError}, binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
command, conv, command, conv,
device::{ device::{
bgl, create_validator, bgl, create_validator, life::WaitIdleError, map_buffer, AttachmentData,
life::{LifetimeTracker, WaitIdleError}, DeviceLostInvocation, HostMap, MissingDownlevelFlags, MissingFeatures, RenderPassContext,
map_buffer, AttachmentData, DeviceLostInvocation, HostMap, MissingDownlevelFlags, CLEANUP_WAIT_MS,
MissingFeatures, RenderPassContext, CLEANUP_WAIT_MS,
}, },
hal_label, hal_label,
init_tracker::{ init_tracker::{
@ -15,7 +14,7 @@ use crate::{
TextureInitTrackerAction, TextureInitTrackerAction,
}, },
instance::Adapter, instance::Adapter,
lock::{rank, Mutex, MutexGuard, RwLock}, lock::{rank, Mutex, RwLock},
pipeline, pipeline,
pool::ResourcePool, pool::ResourcePool,
resource::{ resource::{
@ -114,7 +113,6 @@ pub struct Device {
/// Stores the state of buffers and textures. /// Stores the state of buffers and textures.
pub(crate) trackers: Mutex<DeviceTracker>, pub(crate) trackers: Mutex<DeviceTracker>,
pub(crate) tracker_indices: TrackerIndexAllocators, pub(crate) tracker_indices: TrackerIndexAllocators,
life_tracker: Mutex<LifetimeTracker>,
/// Pool of bind group layouts, allowing deduplication. /// Pool of bind group layouts, allowing deduplication.
pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>, pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>,
pub(crate) alignments: hal::Alignments, pub(crate) alignments: hal::Alignments,
@ -266,7 +264,6 @@ impl Device {
device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None), device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None),
trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()), trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()),
tracker_indices: TrackerIndexAllocators::new(), tracker_indices: TrackerIndexAllocators::new(),
life_tracker: Mutex::new(rank::DEVICE_LIFE_TRACKER, LifetimeTracker::new()),
bgl_pool: ResourcePool::new(), bgl_pool: ResourcePool::new(),
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
trace: Mutex::new( trace: Mutex::new(
@ -332,11 +329,6 @@ impl Device {
assert!(self.queue_to_drop.set(queue).is_ok()); assert!(self.queue_to_drop.set(queue).is_ok());
} }
#[track_caller]
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
self.life_tracker.lock()
}
/// Run some destroy operations that were deferred. /// Run some destroy operations that were deferred.
/// ///
/// Destroying the resources requires taking a write lock on the device's snatch lock, /// Destroying the resources requires taking a write lock on the device's snatch lock,
@ -449,13 +441,20 @@ impl Device {
.map_err(|e| self.handle_hal_error(e))?; .map_err(|e| self.handle_hal_error(e))?;
} }
let mut life_tracker = self.lock_life(); let (submission_closures, mapping_closures, queue_empty) =
let submission_closures = if let Some(queue) = self.get_queue() {
life_tracker.triage_submissions(submission_index, &self.command_allocator); let mut life_tracker = queue.lock_life();
let submission_closures =
life_tracker.triage_submissions(submission_index, &self.command_allocator);
let mapping_closures = life_tracker.handle_mapping(self.raw(), &snatch_guard); let mapping_closures = life_tracker.handle_mapping(self.raw(), &snatch_guard);
let queue_empty = life_tracker.queue_empty(); let queue_empty = life_tracker.queue_empty();
(submission_closures, mapping_closures, queue_empty)
} else {
(SmallVec::new(), Vec::new(), true)
};
// Detect if we have been destroyed and now need to lose the device. // Detect if we have been destroyed and now need to lose the device.
// If we are invalid (set at start of destroy) and our queue is empty, // If we are invalid (set at start of destroy) and our queue is empty,
@ -481,7 +480,6 @@ impl Device {
} }
// Don't hold the locks while calling release_gpu_resources. // Don't hold the locks while calling release_gpu_resources.
drop(life_tracker);
drop(fence); drop(fence);
drop(snatch_guard); drop(snatch_guard);
@ -3562,13 +3560,15 @@ impl Device {
unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) } unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) }
.map_err(|e| self.handle_hal_error(e))?; .map_err(|e| self.handle_hal_error(e))?;
drop(fence); drop(fence);
let closures = self if let Some(queue) = self.get_queue() {
.lock_life() let closures = queue
.triage_submissions(submission_index, &self.command_allocator); .lock_life()
assert!( .triage_submissions(submission_index, &self.command_allocator);
closures.is_empty(), assert!(
"wait_for_submit is not expected to work with closures" closures.is_empty(),
); "wait_for_submit is not expected to work with closures"
);
}
} }
Ok(()) Ok(())
} }

View File

@ -118,9 +118,9 @@ define_lock_ranks! {
rank QUEUE_PENDING_WRITES "Queue::pending_writes" followed by { rank QUEUE_PENDING_WRITES "Queue::pending_writes" followed by {
COMMAND_ALLOCATOR_FREE_ENCODERS, COMMAND_ALLOCATOR_FREE_ENCODERS,
SHARED_TRACKER_INDEX_ALLOCATOR_INNER, SHARED_TRACKER_INDEX_ALLOCATOR_INNER,
DEVICE_LIFE_TRACKER, QUEUE_LIFE_TRACKER,
} }
rank DEVICE_LIFE_TRACKER "Device::life_tracker" followed by { rank QUEUE_LIFE_TRACKER "Queue::life_tracker" followed by {
COMMAND_ALLOCATOR_FREE_ENCODERS, COMMAND_ALLOCATOR_FREE_ENCODERS,
DEVICE_TRACE, DEVICE_TRACE,
} }

View File

@ -634,7 +634,12 @@ impl Buffer {
.buffers .buffers
.set_single(self, internal_use); .set_single(self, internal_use);
let submit_index = device.lock_life().map(self).unwrap_or(0); // '0' means no wait is necessary let submit_index = if let Some(queue) = device.get_queue() {
queue.lock_life().map(self).unwrap_or(0) // '0' means no wait is necessary
} else {
// TODO: map immediately
0
};
Ok(submit_index) Ok(submit_index)
} }
@ -783,16 +788,15 @@ impl Buffer {
let mut pending_writes = queue.pending_writes.lock(); let mut pending_writes = queue.pending_writes.lock();
if pending_writes.contains_buffer(self) { if pending_writes.contains_buffer(self) {
pending_writes.consume_temp(temp); pending_writes.consume_temp(temp);
return Ok(()); } else {
let mut life_lock = queue.lock_life();
let last_submit_index = life_lock.get_buffer_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
} }
} }
let mut life_lock = device.lock_life();
let last_submit_index = life_lock.get_buffer_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
Ok(()) Ok(())
} }
} }
@ -1252,16 +1256,15 @@ impl Texture {
let mut pending_writes = queue.pending_writes.lock(); let mut pending_writes = queue.pending_writes.lock();
if pending_writes.contains_texture(self) { if pending_writes.contains_texture(self) {
pending_writes.consume_temp(temp); pending_writes.consume_temp(temp);
return Ok(()); } else {
let mut life_lock = queue.lock_life();
let last_submit_index = life_lock.get_texture_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
} }
} }
let mut life_lock = device.lock_life();
let last_submit_index = life_lock.get_texture_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
Ok(()) Ok(())
} }
} }
@ -1971,16 +1974,15 @@ impl Blas {
let mut pending_writes = queue.pending_writes.lock(); let mut pending_writes = queue.pending_writes.lock();
if pending_writes.contains_blas(self) { if pending_writes.contains_blas(self) {
pending_writes.consume_temp(temp); pending_writes.consume_temp(temp);
return Ok(()); } else {
let mut life_lock = queue.lock_life();
let last_submit_index = life_lock.get_blas_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
} }
} }
let mut life_lock = device.lock_life();
let last_submit_index = life_lock.get_blas_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
Ok(()) Ok(())
} }
} }
@ -2061,16 +2063,15 @@ impl Tlas {
let mut pending_writes = queue.pending_writes.lock(); let mut pending_writes = queue.pending_writes.lock();
if pending_writes.contains_tlas(self) { if pending_writes.contains_tlas(self) {
pending_writes.consume_temp(temp); pending_writes.consume_temp(temp);
return Ok(()); } else {
let mut life_lock = queue.lock_life();
let last_submit_index = life_lock.get_tlas_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
} }
} }
let mut life_lock = device.lock_life();
let last_submit_index = life_lock.get_tlas_latest_submission_index(self);
if let Some(last_submit_index) = last_submit_index {
life_lock.schedule_resource_destruction(temp, last_submit_index);
}
Ok(()) Ok(())
} }
} }