use StagingBuffer instead of Buffer for mapped_at_creation Buffers

This commit is contained in:
teoxoy 2024-07-04 12:47:54 +02:00 committed by Teodor Tanasoaia
parent 5266bd1f08
commit fabbca294a
3 changed files with 20 additions and 35 deletions

View File

@ -317,7 +317,7 @@ impl<A: HalApi> PendingWrites<A> {
}
}
fn prepare_staging_buffer<A: HalApi>(
pub(crate) fn prepare_staging_buffer<A: HalApi>(
device: &Arc<Device<A>>,
size: wgt::BufferAddress,
instance_flags: wgt::InstanceFlags,

View File

@ -587,33 +587,17 @@ impl<A: HalApi> Device<A> {
};
hal::BufferUses::MAP_WRITE
} else {
// buffer needs staging area for initialization only
let stage_desc = wgt::BufferDescriptor {
label: Some(Cow::Borrowed(
"(wgpu internal) initializing unmappable buffer",
)),
size: desc.size,
usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC,
mapped_at_creation: false,
};
let stage = self.create_buffer_impl(&stage_desc, true)?;
let (staging_buffer, staging_buffer_ptr) =
queue::prepare_staging_buffer(self, desc.size, self.instance_flags)?;
let snatch_guard = self.snatchable_lock.read();
let stage_raw = stage.raw(&snatch_guard).unwrap();
let mapping = unsafe { self.raw().map_buffer(stage_raw, 0..stage.size) }
.map_err(DeviceError::from)?;
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero initialize memory and then mark both staging and buffer as initialized
// Zero initialize memory and then mark the buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
unsafe { std::ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) };
unsafe { std::ptr::write_bytes(staging_buffer_ptr.as_ptr(), 0, buffer.size as usize) };
buffer.initialization_status.write().drain(0..buffer.size);
stage.initialization_status.write().drain(0..buffer.size);
*buffer.map_state.lock() = resource::BufferMapState::Init {
ptr: mapping.ptr,
needs_flush: !mapping.is_coherent,
stage_buffer: stage,
staging_buffer: Arc::new(staging_buffer),
ptr: staging_buffer_ptr,
};
hal::BufferUses::COPY_DST
};

View File

@ -260,9 +260,8 @@ pub enum BufferMapAsyncStatus {
pub(crate) enum BufferMapState<A: HalApi> {
/// Mapped at creation.
Init {
staging_buffer: Arc<StagingBuffer<A>>,
ptr: NonNull<u8>,
stage_buffer: Arc<Buffer<A>>,
needs_flush: bool,
},
/// Waiting for GPU to be done before mapping
Waiting(BufferPendingMapping<A>),
@ -657,9 +656,8 @@ impl<A: HalApi> Buffer<A> {
log::debug!("{} map state -> Idle", self.error_ident());
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
BufferMapState::Init {
staging_buffer,
ptr,
stage_buffer,
needs_flush,
} => {
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -674,12 +672,14 @@ impl<A: HalApi> Buffer<A> {
});
}
let _ = ptr;
if needs_flush {
let raw_staging_buffer_guard = staging_buffer.raw.lock();
let raw_staging_buffer = raw_staging_buffer_guard.as_ref().unwrap();
if !staging_buffer.is_coherent {
unsafe {
device.raw().flush_mapped_ranges(
stage_buffer.raw(&snatch_guard).unwrap(),
iter::once(0..self.size),
);
device
.raw()
.flush_mapped_ranges(raw_staging_buffer, iter::once(0..self.size));
}
}
@ -690,7 +690,7 @@ impl<A: HalApi> Buffer<A> {
size,
});
let transition_src = hal::BufferBarrier {
buffer: stage_buffer.raw(&snatch_guard).unwrap(),
buffer: raw_staging_buffer,
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
let transition_dst = hal::BufferBarrier {
@ -706,13 +706,14 @@ impl<A: HalApi> Buffer<A> {
);
if self.size > 0 {
encoder.copy_buffer_to_buffer(
stage_buffer.raw(&snatch_guard).unwrap(),
raw_staging_buffer,
raw_buf,
region.into_iter(),
);
}
}
pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer));
drop(raw_staging_buffer_guard);
pending_writes.consume_temp(queue::TempResource::StagingBuffer(staging_buffer));
pending_writes.insert_buffer(self);
}
BufferMapState::Idle => {