remove the Mutex around StagingBuffer's internal buffer

This commit is contained in:
teoxoy 2024-07-11 13:40:29 +02:00 committed by Teodor Tanasoaia
parent a0c185a28c
commit 4d285d8b61
3 changed files with 27 additions and 36 deletions

View File

@ -14,7 +14,7 @@ use crate::{
hal_label,
id::{self, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
lock::{rank, Mutex, RwLockWriteGuard},
lock::RwLockWriteGuard,
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
DestroyedTexture, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
@ -29,7 +29,8 @@ use hal::{CommandEncoder as _, Device as _, Queue as _};
use smallvec::SmallVec;
use std::{
iter, mem,
iter,
mem::{self, ManuallyDrop},
ptr::{self, NonNull},
sync::{atomic::Ordering, Arc},
};
@ -329,7 +330,7 @@ pub(crate) fn prepare_staging_buffer<A: HalApi>(
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size.get()) }?;
let staging_buffer = StagingBuffer {
raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
raw: ManuallyDrop::new(buffer),
device: device.clone(),
size,
is_coherent: mapping.is_coherent,
@ -341,14 +342,9 @@ pub(crate) fn prepare_staging_buffer<A: HalApi>(
impl<A: HalApi> StagingBuffer<A> {
unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> {
if !self.is_coherent {
unsafe {
device.flush_mapped_ranges(
self.raw.lock().as_ref().unwrap(),
iter::once(0..self.size.get()),
)
};
unsafe { device.flush_mapped_ranges(self.raw(), iter::once(0..self.size.get())) };
}
unsafe { device.unmap_buffer(self.raw.lock().as_ref().unwrap())? };
unsafe { device.unmap_buffer(self.raw())? };
Ok(())
}
}
@ -630,20 +626,15 @@ impl Global {
dst_offset: buffer_offset,
size: staging_buffer.size,
};
let inner_buffer = staging_buffer.raw.lock();
let barriers = iter::once(hal::BufferBarrier {
buffer: inner_buffer.as_ref().unwrap(),
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
})
.chain(transition.map(|pending| pending.into_hal(&dst, &snatch_guard)));
let encoder = pending_writes.activate();
unsafe {
encoder.transition_buffers(barriers);
encoder.copy_buffer_to_buffer(
inner_buffer.as_ref().unwrap(),
dst_raw,
iter::once(region),
);
encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, iter::once(region));
}
pending_writes.insert_buffer(&dst);
@ -890,9 +881,8 @@ impl Global {
});
{
let inner_buffer = staging_buffer.raw.lock();
let barrier = hal::BufferBarrier {
buffer: inner_buffer.as_ref().unwrap(),
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
@ -904,7 +894,7 @@ impl Global {
unsafe {
encoder.transition_textures(transition.map(|pending| pending.into_hal(dst_raw)));
encoder.transition_buffers(iter::once(barrier));
encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions);
encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, regions);
}
}

View File

@ -143,7 +143,6 @@ define_lock_ranks! {
rank RENDER_BUNDLE_SCOPE_QUERY_SETS "RenderBundleScope::query_sets" followed by { }
rank RESOURCE_POOL_INNER "ResourcePool::inner" followed by { }
rank SHARED_TRACKER_INDEX_ALLOCATOR_INNER "SharedTrackerIndexAllocator::inner" followed by { }
rank STAGING_BUFFER_RAW "StagingBuffer::raw" followed by { }
rank STATELESS_BIND_GROUP_STATE_RESOURCES "StatelessBindGroupState::resources" followed by { }
rank SURFACE_PRESENTATION "Surface::presentation" followed by { }
rank TEXTURE_BIND_GROUPS "Texture::bind_groups" followed by { }

View File

@ -24,7 +24,8 @@ use thiserror::Error;
use std::{
borrow::Borrow,
fmt::Debug,
iter, mem,
iter,
mem::{self, ManuallyDrop},
ops::Range,
ptr::NonNull,
sync::{
@ -668,13 +669,11 @@ impl<A: HalApi> Buffer<A> {
}
let _ = ptr;
let raw_staging_buffer_guard = staging_buffer.raw.lock();
let raw_staging_buffer = raw_staging_buffer_guard.as_ref().unwrap();
if !staging_buffer.is_coherent {
unsafe {
device
.raw()
.flush_mapped_ranges(raw_staging_buffer, iter::once(0..self.size));
.flush_mapped_ranges(staging_buffer.raw(), iter::once(0..self.size));
}
}
@ -685,7 +684,7 @@ impl<A: HalApi> Buffer<A> {
size,
});
let transition_src = hal::BufferBarrier {
buffer: raw_staging_buffer,
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
let transition_dst = hal::BufferBarrier {
@ -701,13 +700,12 @@ impl<A: HalApi> Buffer<A> {
);
if self.size > 0 {
encoder.copy_buffer_to_buffer(
raw_staging_buffer,
staging_buffer.raw(),
raw_buf,
region.into_iter(),
);
}
}
drop(raw_staging_buffer_guard);
pending_writes.consume_temp(queue::TempResource::StagingBuffer(staging_buffer));
pending_writes.insert_buffer(self);
}
@ -865,21 +863,25 @@ impl<A: HalApi> Drop for DestroyedBuffer<A> {
/// [`Device::pending_writes`]: crate::device::Device
#[derive(Debug)]
pub struct StagingBuffer<A: HalApi> {
pub(crate) raw: Mutex<Option<A::Buffer>>,
pub(crate) raw: ManuallyDrop<A::Buffer>,
pub(crate) device: Arc<Device<A>>,
pub(crate) size: wgt::BufferSize,
pub(crate) is_coherent: bool,
}
impl<A: HalApi> StagingBuffer<A> {
pub(crate) fn raw(&self) -> &A::Buffer {
&self.raw
}
}
impl<A: HalApi> Drop for StagingBuffer<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.lock().take() {
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_buffer(raw);
}
}
use hal::Device;
resource_log!("Destroy raw {}", self.error_ident());
// SAFETY: We are in the Drop impl and we don't use self.raw anymore after this point.
let raw = unsafe { ManuallyDrop::take(&mut self.raw) };
unsafe { self.device.raw().destroy_buffer(raw) };
}
}