use ManuallyDrop instead of Option for PendingWrites

This commit is contained in:
teoxoy 2024-07-18 14:53:17 +02:00 committed by Erich Gubler
parent 77e45d46df
commit 9a0adefe88
4 changed files with 16 additions and 27 deletions

View File

@ -2347,11 +2347,7 @@ impl Global {
// need to wait for submissions or triage them. We know we were
// just polled, so `life_tracker.free_resources` is empty.
debug_assert!(device.lock_life().queue_empty());
{
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
pending_writes.deactivate();
}
device.pending_writes.lock().deactivate();
drop(device);
}

View File

@ -407,7 +407,6 @@ impl Global {
// `device.pending_writes.consume`.
let mut staging_buffer = StagingBuffer::new(device, data_size)?;
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
let staging_buffer = {
profiling::scope!("copy");
@ -418,7 +417,7 @@ impl Global {
let result = self.queue_write_staging_buffer_impl(
&queue,
device,
pending_writes,
&mut pending_writes,
&staging_buffer,
buffer_id,
buffer_offset,
@ -478,7 +477,6 @@ impl Global {
.ok_or_else(|| QueueWriteError::Transfer(TransferError::InvalidBufferId(buffer_id)))?;
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
// At this point, we have taken ownership of the staging_buffer from the
// user. Platform validation requires that the staging buffer always
@ -489,7 +487,7 @@ impl Global {
let result = self.queue_write_staging_buffer_impl(
&queue,
device,
pending_writes,
&mut pending_writes,
&staging_buffer,
buffer_id,
buffer_offset,
@ -713,7 +711,6 @@ impl Global {
wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64).unwrap();
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
let encoder = pending_writes.activate();
// If the copy does not fully cover the layers, we need to initialize to
@ -967,7 +964,7 @@ impl Global {
extract_texture_selector(&destination.to_untagged(), &size, &dst)?;
let mut pending_writes = device.pending_writes.lock();
let encoder = pending_writes.as_mut().unwrap().activate();
let encoder = pending_writes.activate();
// If the copy does not fully cover the layers, we need to initialize to
// zero *first* as we don't keep track of partial texture layer inits.
@ -1315,8 +1312,7 @@ impl Global {
}
}
let mut pending_writes_guard = device.pending_writes.lock();
let pending_writes = pending_writes_guard.as_mut().unwrap();
let mut pending_writes = device.pending_writes.lock();
{
used_surface_textures.set_size(hub.textures.read().len());
@ -1402,17 +1398,12 @@ impl Global {
profiling::scope!("cleanup");
// this will register the new submission to the life time tracker
let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources);
device.lock_life().track_submission(
submit_index,
pending_write_resources.drain(..),
pending_writes.temp_resources.drain(..),
active_executions,
);
// pending_write_resources has been drained, so it's empty, but we
// want to retain its heap allocation.
pending_writes.temp_resources = pending_write_resources;
drop(pending_writes_guard);
drop(pending_writes);
// This will schedule destruction of all resources that are no longer needed
// by the user but used in the command stream, among other things.

View File

@ -46,6 +46,7 @@ use wgt::{DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimensi
use std::{
borrow::Cow,
iter,
mem::ManuallyDrop,
num::NonZeroU32,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
@ -142,7 +143,7 @@ pub struct Device<A: HalApi> {
pub(crate) features: wgt::Features,
pub(crate) downlevel: wgt::DownlevelCapabilities,
pub(crate) instance_flags: wgt::InstanceFlags,
pub(crate) pending_writes: Mutex<Option<PendingWrites<A>>>,
pub(crate) pending_writes: Mutex<ManuallyDrop<PendingWrites<A>>>,
pub(crate) deferred_destroy: Mutex<Vec<DeferredDestroy<A>>>,
#[cfg(feature = "trace")]
pub(crate) trace: Mutex<Option<trace::Trace>>,
@ -169,7 +170,8 @@ impl<A: HalApi> Drop for Device<A> {
fn drop(&mut self) {
resource_log!("Drop {}", self.error_ident());
let raw = self.raw.take().unwrap();
let pending_writes = self.pending_writes.lock().take().unwrap();
// SAFETY: We are in the Drop impl and we don't use self.pending_writes anymore after this point.
let pending_writes = unsafe { ManuallyDrop::take(&mut self.pending_writes.lock()) };
pending_writes.dispose(&raw);
self.command_allocator.dispose(&raw);
unsafe {
@ -307,7 +309,10 @@ impl<A: HalApi> Device<A> {
features: desc.required_features,
downlevel,
instance_flags,
pending_writes: Mutex::new(rank::DEVICE_PENDING_WRITES, Some(pending_writes)),
pending_writes: Mutex::new(
rank::DEVICE_PENDING_WRITES,
ManuallyDrop::new(pending_writes),
),
deferred_destroy: Mutex::new(rank::DEVICE_DEFERRED_DESTROY, Vec::new()),
usage_scopes: Mutex::new(rank::DEVICE_USAGE_SCOPES, Default::default()),
})
@ -3611,7 +3616,7 @@ impl<A: HalApi> Device<A> {
/// Wait for idle and remove resources that we can, before we die.
pub(crate) fn prepare_to_die(&self) {
self.pending_writes.lock().as_mut().unwrap().deactivate();
self.pending_writes.lock().deactivate();
let current_index = self
.last_successful_submission_index
.load(Ordering::Acquire);

View File

@ -657,7 +657,6 @@ impl<A: HalApi> Buffer<A> {
}
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
let staging_buffer = staging_buffer.flush();
@ -746,7 +745,6 @@ impl<A: HalApi> Buffer<A> {
};
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.contains_buffer(self) {
pending_writes.consume_temp(temp);
} else {
@ -1210,7 +1208,6 @@ impl<A: HalApi> Texture<A> {
};
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.contains_texture(self) {
pending_writes.consume_temp(temp);
} else {