mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-22 06:44:14 +00:00
Handle cases where buffer contents are destroyed
This commit is contained in:
parent
8059c03273
commit
0a7d81351c
@ -139,6 +139,9 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
|
||||
self.device_maintain_ids::<B>(device).unwrap();
|
||||
self.device_create_buffer::<B>(device, &desc, id).unwrap();
|
||||
}
|
||||
A::FreeBuffer(id) => {
|
||||
self.buffer_destroy::<B>(id).unwrap();
|
||||
}
|
||||
A::DestroyBuffer(id) => {
|
||||
self.buffer_drop::<B>(id, true);
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ pub enum CreateBindGroupError {
|
||||
Device(#[from] DeviceError),
|
||||
#[error("bind group layout is invalid")]
|
||||
InvalidLayout,
|
||||
#[error("buffer {0:?} is invalid")]
|
||||
#[error("buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(BufferId),
|
||||
#[error("texture view {0:?} is invalid")]
|
||||
InvalidTextureView(TextureViewId),
|
||||
|
@ -125,6 +125,13 @@ pub enum CreateRenderBundleError {
|
||||
InvalidSampleCount(u32),
|
||||
}
|
||||
|
||||
/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum ExecutionError {
|
||||
#[error("buffer {0:?} is destroyed")]
|
||||
DestroyedBuffer(id::BufferId),
|
||||
}
|
||||
|
||||
pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
|
||||
|
||||
//Note: here, `RenderBundle` is just wrapping a raw stream of render commands.
|
||||
@ -151,8 +158,9 @@ impl RenderBundle {
|
||||
/// However the point of this function is to be lighter, since we already had
|
||||
/// a chance to go through the commands in `render_bundle_encoder_finish`.
|
||||
///
|
||||
/// Note that the function isn't expected to fail.
|
||||
/// Note that the function isn't expected to fail, generally.
|
||||
/// All the validation has already been done by this point.
|
||||
/// The only failure condition is if some of the used buffers are destroyed.
|
||||
pub(crate) unsafe fn execute<B: GfxBackend>(
|
||||
&self,
|
||||
cmd_buf: &mut B::CommandBuffer,
|
||||
@ -163,7 +171,7 @@ impl RenderBundle {
|
||||
bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>,
|
||||
pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>,
|
||||
buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>,
|
||||
) {
|
||||
) -> Result<(), ExecutionError> {
|
||||
use hal::command::CommandBuffer as _;
|
||||
|
||||
let mut offsets = self.base.dynamic_offsets.as_slice();
|
||||
@ -197,9 +205,14 @@ impl RenderBundle {
|
||||
offset,
|
||||
size,
|
||||
} => {
|
||||
let buffer = buffer_guard.get(buffer_id).unwrap();
|
||||
let &(ref buffer, _) = buffer_guard
|
||||
.get(buffer_id)
|
||||
.unwrap()
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
|
||||
let view = hal::buffer::IndexBufferView {
|
||||
buffer: &buffer.raw,
|
||||
buffer,
|
||||
range: hal::buffer::SubRange {
|
||||
offset,
|
||||
size: size.map(|s| s.get()),
|
||||
@ -215,12 +228,17 @@ impl RenderBundle {
|
||||
offset,
|
||||
size,
|
||||
} => {
|
||||
let buffer = buffer_guard.get(buffer_id).unwrap();
|
||||
let &(ref buffer, _) = buffer_guard
|
||||
.get(buffer_id)
|
||||
.unwrap()
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
|
||||
let range = hal::buffer::SubRange {
|
||||
offset,
|
||||
size: size.map(|s| s.get()),
|
||||
};
|
||||
cmd_buf.bind_vertex_buffers(slot, iter::once((&buffer.raw, range)));
|
||||
cmd_buf.bind_vertex_buffers(slot, iter::once((buffer, range)));
|
||||
}
|
||||
RenderCommand::SetPushConstant {
|
||||
stages,
|
||||
@ -287,8 +305,13 @@ impl RenderBundle {
|
||||
count: None,
|
||||
indexed: false,
|
||||
} => {
|
||||
let buffer = buffer_guard.get(buffer_id).unwrap();
|
||||
cmd_buf.draw_indirect(&buffer.raw, offset, 1, 0);
|
||||
let &(ref buffer, _) = buffer_guard
|
||||
.get(buffer_id)
|
||||
.unwrap()
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
|
||||
cmd_buf.draw_indirect(buffer, offset, 1, 0);
|
||||
}
|
||||
RenderCommand::MultiDrawIndirect {
|
||||
buffer_id,
|
||||
@ -296,8 +319,13 @@ impl RenderBundle {
|
||||
count: None,
|
||||
indexed: true,
|
||||
} => {
|
||||
let buffer = buffer_guard.get(buffer_id).unwrap();
|
||||
cmd_buf.draw_indexed_indirect(&buffer.raw, offset, 1, 0);
|
||||
let &(ref buffer, _) = buffer_guard
|
||||
.get(buffer_id)
|
||||
.unwrap()
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
|
||||
cmd_buf.draw_indexed_indirect(buffer, offset, 1, 0);
|
||||
}
|
||||
RenderCommand::MultiDrawIndirect { .. }
|
||||
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
|
||||
@ -311,6 +339,8 @@ impl RenderBundle {
|
||||
| RenderCommand::SetScissor(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,7 @@ pub enum ComputePassError {
|
||||
BindGroupIndexOutOfRange { index: u8, max: u32 },
|
||||
#[error("compute pipeline {0:?} is invalid")]
|
||||
InvalidPipeline(id::ComputePipelineId),
|
||||
#[error("indirect buffer {0:?} is invalid")]
|
||||
#[error("indirect buffer {0:?} is invalid or destroyed")]
|
||||
InvalidIndirectBuffer(id::BufferId),
|
||||
#[error(transparent)]
|
||||
ResourceUsageConflict(#[from] UsageConflict),
|
||||
@ -411,6 +411,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
|
||||
.map_err(|_| ComputePassError::InvalidIndirectBuffer(buffer_id))?;
|
||||
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
|
||||
let &(ref buf_raw, _) = indirect_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ComputePassError::InvalidIndirectBuffer(buffer_id))?;
|
||||
|
||||
state.flush_states(
|
||||
raw,
|
||||
@ -420,7 +424,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
&*texture_guard,
|
||||
)?;
|
||||
unsafe {
|
||||
raw.dispatch_indirect(&indirect_buffer.raw, offset);
|
||||
raw.dispatch_indirect(buf_raw, offset);
|
||||
}
|
||||
}
|
||||
ComputeCommand::PushDebugGroup { color, len } => {
|
||||
|
@ -65,6 +65,8 @@ pub enum RenderCommandError {
|
||||
IncompatibleReadOnlyDepthStencil,
|
||||
#[error("buffer {0:?} is in error {1:?}")]
|
||||
Buffer(id::BufferId, BufferError),
|
||||
#[error("buffer {0:?} is destroyed")]
|
||||
DestroyedBuffer(id::BufferId),
|
||||
#[error(transparent)]
|
||||
MissingBufferUsage(#[from] MissingBufferUsageError),
|
||||
#[error(transparent)]
|
||||
|
@ -6,8 +6,8 @@ use crate::{
|
||||
binding_model::BindError,
|
||||
command::{
|
||||
bind::{Binder, LayoutChange},
|
||||
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, RenderCommand,
|
||||
RenderCommandError,
|
||||
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, ExecutionError,
|
||||
RenderCommand, RenderCommandError,
|
||||
},
|
||||
conv,
|
||||
device::{
|
||||
@ -197,7 +197,7 @@ impl OptionalState {
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct IndexState {
|
||||
bound_buffer_view: Option<(id::BufferId, Range<BufferAddress>)>,
|
||||
bound_buffer_view: Option<(id::Valid<id::BufferId>, Range<BufferAddress>)>,
|
||||
format: IndexFormat,
|
||||
limit: u32,
|
||||
}
|
||||
@ -1013,7 +1013,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let pipeline = trackers
|
||||
.render_pipes
|
||||
.use_extend(&*pipeline_guard, pipeline_id, (), ())
|
||||
.unwrap();
|
||||
.map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))?;
|
||||
|
||||
context
|
||||
.check_compatible(&pipeline.pass_context)
|
||||
@ -1101,13 +1101,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
state.index.update_limit();
|
||||
|
||||
if let Some((buffer_id, ref range)) = state.index.bound_buffer_view {
|
||||
let buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
|
||||
.unwrap();
|
||||
let &(ref buffer, _) = buffer_guard[buffer_id].raw.as_ref().unwrap();
|
||||
|
||||
let view = hal::buffer::IndexBufferView {
|
||||
buffer: &buffer.raw,
|
||||
buffer,
|
||||
range: hal::buffer::SubRange {
|
||||
offset: range.start,
|
||||
size: Some(range.end - range.start),
|
||||
@ -1142,18 +1139,22 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
|
||||
.unwrap();
|
||||
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
|
||||
check_buffer_usage(buffer.usage, BufferUsage::INDEX)?;
|
||||
let &(ref buf_raw, _) = buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
|
||||
|
||||
let end = match size {
|
||||
Some(s) => offset + s.get(),
|
||||
None => buffer.size,
|
||||
};
|
||||
state.index.bound_buffer_view = Some((buffer_id, offset..end));
|
||||
state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end));
|
||||
state.index.update_limit();
|
||||
|
||||
let view = hal::buffer::IndexBufferView {
|
||||
buffer: &buffer.raw,
|
||||
buffer: buf_raw,
|
||||
range: hal::buffer::SubRange {
|
||||
offset,
|
||||
size: Some(end - offset),
|
||||
@ -1174,8 +1175,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
|
||||
.unwrap();
|
||||
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
|
||||
check_buffer_usage(buffer.usage, BufferUsage::VERTEX)?;
|
||||
let &(ref buf_raw, _) = buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
|
||||
|
||||
let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len());
|
||||
state
|
||||
.vertex
|
||||
@ -1191,7 +1197,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
size: size.map(|s| s.get()),
|
||||
};
|
||||
unsafe {
|
||||
raw.bind_vertex_buffers(slot, iter::once((&buffer.raw, range)));
|
||||
raw.bind_vertex_buffers(slot, iter::once((buf_raw, range)));
|
||||
}
|
||||
state.vertex.update_limits();
|
||||
}
|
||||
@ -1373,33 +1379,37 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT)?;
|
||||
}
|
||||
|
||||
let buffer = trackers
|
||||
let indirect_buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
|
||||
.unwrap();
|
||||
check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?;
|
||||
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
|
||||
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
|
||||
let &(ref indirect_raw, _) = indirect_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
|
||||
|
||||
let actual_count = count.map_or(1, |c| c.get());
|
||||
|
||||
let begin_offset = offset;
|
||||
let end_offset = offset + stride * actual_count as u64;
|
||||
if end_offset > buffer.size {
|
||||
if end_offset > indirect_buffer.size {
|
||||
return Err(RenderPassError::IndirectBufferOverrun {
|
||||
offset,
|
||||
count,
|
||||
begin_offset,
|
||||
end_offset,
|
||||
buffer_size: buffer.size,
|
||||
buffer_size: indirect_buffer.size,
|
||||
});
|
||||
}
|
||||
|
||||
match indexed {
|
||||
false => unsafe {
|
||||
raw.draw_indirect(&buffer.raw, offset, actual_count, stride as u32);
|
||||
raw.draw_indirect(indirect_raw, offset, actual_count, stride as u32);
|
||||
},
|
||||
true => unsafe {
|
||||
raw.draw_indexed_indirect(
|
||||
&buffer.raw,
|
||||
indirect_raw,
|
||||
offset,
|
||||
actual_count,
|
||||
stride as u32,
|
||||
@ -1427,26 +1437,35 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
wgt::Features::MULTI_DRAW_INDIRECT_COUNT,
|
||||
)?;
|
||||
|
||||
let buffer = trackers
|
||||
let indirect_buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
|
||||
.unwrap();
|
||||
check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?;
|
||||
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
|
||||
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
|
||||
let &(ref indirect_raw, _) = indirect_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
|
||||
|
||||
let count_buffer = trackers
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT)
|
||||
.unwrap();
|
||||
.map_err(|e| RenderCommandError::Buffer(count_buffer_id, e))?;
|
||||
check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT)?;
|
||||
let &(ref count_raw, _) = count_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id))?;
|
||||
|
||||
let begin_offset = offset;
|
||||
let end_offset = offset + stride * max_count as u64;
|
||||
if end_offset > buffer.size {
|
||||
if end_offset > indirect_buffer.size {
|
||||
return Err(RenderPassError::IndirectBufferOverrun {
|
||||
offset,
|
||||
count: None,
|
||||
begin_offset,
|
||||
end_offset,
|
||||
buffer_size: buffer.size,
|
||||
buffer_size: indirect_buffer.size,
|
||||
});
|
||||
}
|
||||
|
||||
@ -1463,9 +1482,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
match indexed {
|
||||
false => unsafe {
|
||||
raw.draw_indirect_count(
|
||||
&buffer.raw,
|
||||
indirect_raw,
|
||||
offset,
|
||||
&count_buffer.raw,
|
||||
count_raw,
|
||||
count_buffer_offset,
|
||||
max_count,
|
||||
stride as u32,
|
||||
@ -1473,9 +1492,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
},
|
||||
true => unsafe {
|
||||
raw.draw_indexed_indirect_count(
|
||||
&buffer.raw,
|
||||
indirect_raw,
|
||||
offset,
|
||||
&count_buffer.raw,
|
||||
count_raw,
|
||||
count_buffer_offset,
|
||||
max_count,
|
||||
stride as u32,
|
||||
@ -1525,6 +1544,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
&*buffer_guard,
|
||||
)
|
||||
}
|
||||
.map_err(|e| match e {
|
||||
ExecutionError::DestroyedBuffer(id) => {
|
||||
RenderCommandError::DestroyedBuffer(id)
|
||||
}
|
||||
})?;
|
||||
|
||||
trackers.merge_extend(&bundle.used)?;
|
||||
state.reset_bundle();
|
||||
|
@ -35,9 +35,9 @@ pub enum CopySide {
|
||||
/// Error encountered while attempting a data transfer.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum TransferError {
|
||||
#[error("buffer {0:?} is invalid")]
|
||||
#[error("buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(BufferId),
|
||||
#[error("texture {0:?} is invalid")]
|
||||
#[error("texture {0:?} is invalid or destroyed")]
|
||||
InvalidTexture(TextureId),
|
||||
#[error("Source and destination cannot be the same buffer")]
|
||||
SameSourceDestinationBuffer,
|
||||
@ -330,6 +330,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.buffers
|
||||
.use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC)
|
||||
.map_err(TransferError::InvalidBuffer)?;
|
||||
let &(ref src_raw, _) = src_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(TransferError::InvalidBuffer(source))?;
|
||||
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
||||
}
|
||||
@ -340,6 +344,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.buffers
|
||||
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
|
||||
.map_err(TransferError::InvalidBuffer)?;
|
||||
let &(ref dst_raw, _) = dst_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(TransferError::InvalidBuffer(destination))?;
|
||||
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
||||
Err(TransferError::MissingCopyDstUsageFlag)?
|
||||
}
|
||||
@ -391,7 +399,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
hal::memory::Dependencies::empty(),
|
||||
barriers,
|
||||
);
|
||||
cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region));
|
||||
cmb_raw.copy_buffer(src_raw, dst_raw, iter::once(region));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -433,6 +441,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.buffers
|
||||
.use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC)
|
||||
.map_err(TransferError::InvalidBuffer)?;
|
||||
let &(ref src_raw, _) = src_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
|
||||
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
||||
}
|
||||
@ -505,7 +517,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
src_barriers.chain(dst_barriers),
|
||||
);
|
||||
cmb_raw.copy_buffer_to_image(
|
||||
&src_buffer.raw,
|
||||
src_raw,
|
||||
&dst_texture.raw,
|
||||
hal::image::Layout::TransferDstOptimal,
|
||||
iter::once(region),
|
||||
@ -566,6 +578,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.buffers
|
||||
.use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST)
|
||||
.map_err(TransferError::InvalidBuffer)?;
|
||||
let &(ref dst_raw, _) = dst_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
|
||||
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
||||
Err(TransferError::MissingCopyDstUsageFlag)?
|
||||
}
|
||||
@ -625,7 +641,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
cmb_raw.copy_image_to_buffer(
|
||||
&src_texture.raw,
|
||||
hal::image::Layout::TransferSrcOptimal,
|
||||
&dst_buffer.raw,
|
||||
dst_raw,
|
||||
iter::once(region),
|
||||
);
|
||||
}
|
||||
|
@ -477,7 +477,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.buffers
|
||||
.push((res.raw, res.memory));
|
||||
.extend(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -691,7 +691,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
.buffers
|
||||
.unregister_locked(buffer_id.0, &mut *buffer_guard)
|
||||
{
|
||||
self.free_resources.buffers.push((buf.raw, buf.memory));
|
||||
self.free_resources.buffers.extend(buf.raw);
|
||||
}
|
||||
} else {
|
||||
let mapping = match std::mem::replace(
|
||||
|
@ -164,12 +164,16 @@ fn map_buffer<B: hal::Backend>(
|
||||
sub_range: hal::buffer::SubRange,
|
||||
kind: HostMap,
|
||||
) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
|
||||
let &mut (_, ref mut memory) = buffer
|
||||
.raw
|
||||
.as_mut()
|
||||
.ok_or(resource::BufferAccessError::Destroyed)?;
|
||||
let (ptr, segment, needs_sync) = {
|
||||
let segment = hal::memory::Segment {
|
||||
offset: sub_range.offset,
|
||||
size: sub_range.size,
|
||||
};
|
||||
let mapped = buffer.memory.map(raw, segment)?;
|
||||
let mapped = memory.map(raw, segment)?;
|
||||
let mr = mapped.range();
|
||||
let segment = hal::memory::Segment {
|
||||
offset: mr.start,
|
||||
@ -180,7 +184,7 @@ fn map_buffer<B: hal::Backend>(
|
||||
|
||||
buffer.sync_mapped_writes = match kind {
|
||||
HostMap::Read if needs_sync => unsafe {
|
||||
raw.invalidate_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment)))
|
||||
raw.invalidate_mapped_memory_ranges(iter::once((memory.memory(), segment)))
|
||||
.or(Err(DeviceError::OutOfMemory))?;
|
||||
None
|
||||
},
|
||||
@ -194,9 +198,13 @@ fn unmap_buffer<B: hal::Backend>(
|
||||
raw: &B::Device,
|
||||
buffer: &mut resource::Buffer<B>,
|
||||
) -> Result<(), resource::BufferAccessError> {
|
||||
let &(_, ref memory) = buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(resource::BufferAccessError::Destroyed)?;
|
||||
if let Some(segment) = buffer.sync_mapped_writes.take() {
|
||||
unsafe {
|
||||
raw.flush_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment)))
|
||||
raw.flush_mapped_memory_ranges(iter::once((memory.memory(), segment)))
|
||||
.or(Err(DeviceError::OutOfMemory))?;
|
||||
}
|
||||
}
|
||||
@ -481,13 +489,12 @@ impl<B: GfxBackend> Device<B> {
|
||||
.map_err(DeviceError::from_bind)?;
|
||||
|
||||
Ok(resource::Buffer {
|
||||
raw: buffer,
|
||||
raw: Some((buffer, memory)),
|
||||
device_id: Stored {
|
||||
value: id::Valid(self_id),
|
||||
ref_count: self.life_guard.add_ref(),
|
||||
},
|
||||
usage: desc.usage,
|
||||
memory,
|
||||
size: desc.size,
|
||||
full_range: (),
|
||||
sync_mapped_writes: None,
|
||||
@ -872,9 +879,11 @@ impl<B: hal::Backend> Device<B> {
|
||||
}
|
||||
|
||||
pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) {
|
||||
unsafe {
|
||||
self.mem_allocator.lock().free(&self.raw, buffer.memory);
|
||||
self.raw.destroy_buffer(buffer.raw);
|
||||
if let Some((raw, memory)) = buffer.raw {
|
||||
unsafe {
|
||||
self.mem_allocator.lock().free(&self.raw, memory);
|
||||
self.raw.destroy_buffer(raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1032,7 +1041,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
resource::BufferUse::MAP_WRITE
|
||||
} else {
|
||||
// buffer needs staging area for initialization only
|
||||
let mut stage = device.create_buffer(
|
||||
let stage = device.create_buffer(
|
||||
device_id,
|
||||
&wgt::BufferDescriptor {
|
||||
label: Some(Cow::Borrowed("<init_buffer>")),
|
||||
@ -1042,15 +1051,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
},
|
||||
gfx_memory::Kind::Linear,
|
||||
)?;
|
||||
let mapped = stage
|
||||
.memory
|
||||
let (stage_buffer, mut stage_memory) = stage.raw.unwrap();
|
||||
let mapped = stage_memory
|
||||
.map(&device.raw, hal::memory::Segment::ALL)
|
||||
.map_err(resource::BufferAccessError::from)?;
|
||||
buffer.map_state = resource::BufferMapState::Init {
|
||||
ptr: mapped.ptr(),
|
||||
needs_flush: !mapped.is_coherent(),
|
||||
stage_buffer: stage.raw,
|
||||
stage_memory: stage.memory,
|
||||
stage_buffer,
|
||||
stage_memory,
|
||||
};
|
||||
resource::BufferUse::COPY_DST
|
||||
};
|
||||
@ -1118,7 +1127,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.map_err(|_| DeviceError::Invalid)?;
|
||||
let mut buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?;
|
||||
.map_err(|_| resource::BufferAccessError::Invalid)?;
|
||||
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?;
|
||||
//assert!(buffer isn't used by the GPU);
|
||||
|
||||
@ -1172,7 +1181,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.map_err(|_| DeviceError::Invalid)?;
|
||||
let mut buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?;
|
||||
.map_err(|_| resource::BufferAccessError::Invalid)?;
|
||||
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
|
||||
//assert!(buffer isn't used by the GPU);
|
||||
|
||||
@ -1201,6 +1210,34 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.register_error(id_in, &mut Token::root())
|
||||
}
|
||||
|
||||
pub fn buffer_destroy<B: GfxBackend>(
|
||||
&self,
|
||||
buffer_id: id::BufferId,
|
||||
) -> Result<(), resource::DestroyError> {
|
||||
span!(_guard, INFO, "Buffer::destroy");
|
||||
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
|
||||
tracing::info!("Buffer {:?} is destroyed", buffer_id);
|
||||
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
|
||||
let buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| resource::DestroyError::Invalid)?;
|
||||
|
||||
let device = &device_guard[buffer.device_id.value];
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref trace) = device.trace {
|
||||
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
|
||||
}
|
||||
|
||||
let _ = device; //TODO: schedule buffer destruction
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, now: bool) {
|
||||
span!(_guard, INFO, "Buffer::drop");
|
||||
|
||||
@ -2006,8 +2043,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let buffer = used
|
||||
.buffers
|
||||
.use_extend(&*buffer_guard, bb.buffer_id, (), internal_use)
|
||||
.unwrap();
|
||||
.map_err(|_| CreateBindGroupError::InvalidBuffer(bb.buffer_id))?;
|
||||
check_buffer_usage(buffer.usage, pub_usage)?;
|
||||
let &(ref buffer_raw, _) = buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(CreateBindGroupError::InvalidBuffer(bb.buffer_id))?;
|
||||
|
||||
let (bind_size, bind_end) = match bb.size {
|
||||
Some(size) => {
|
||||
let end = bb.offset + size.get();
|
||||
@ -2049,7 +2091,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
offset: bb.offset,
|
||||
size: Some(bind_size),
|
||||
};
|
||||
SmallVec::from([hal::pso::Descriptor::Buffer(&buffer.raw, sub_range)])
|
||||
SmallVec::from([hal::pso::Descriptor::Buffer(buffer_raw, sub_range)])
|
||||
}
|
||||
Br::Sampler(id) => {
|
||||
match decl.ty {
|
||||
@ -2057,7 +2099,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let sampler = used
|
||||
.samplers
|
||||
.use_extend(&*sampler_guard, id, (), ())
|
||||
.unwrap();
|
||||
.map_err(|_| CreateBindGroupError::InvalidSampler(id))?;
|
||||
|
||||
// Check the actual sampler to also (not) be a comparison sampler
|
||||
if sampler.comparison != comparison {
|
||||
@ -2079,7 +2121,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let view = used
|
||||
.views
|
||||
.use_extend(&*texture_view_guard, id, (), ())
|
||||
.unwrap();
|
||||
.map_err(|_| CreateBindGroupError::InvalidTextureView(id))?;
|
||||
let (pub_usage, internal_use) = match decl.ty {
|
||||
wgt::BindingType::SampledTexture { .. } => (
|
||||
wgt::TextureUsage::SAMPLED,
|
||||
@ -2168,7 +2210,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let view = used
|
||||
.views
|
||||
.use_extend(&*texture_view_guard, id, (), ())
|
||||
.unwrap();
|
||||
.map_err(|_| CreateBindGroupError::InvalidTextureView(id))?;
|
||||
match view.inner {
|
||||
resource::TextureViewInner::Native {
|
||||
ref raw,
|
||||
@ -3598,7 +3640,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
|
||||
let buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?;
|
||||
.map_err(|_| resource::BufferAccessError::Invalid)?;
|
||||
|
||||
check_buffer_usage(buffer.usage, pub_usage)?;
|
||||
buffer.map_state = match buffer.map_state {
|
||||
@ -3653,7 +3695,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let (buffer_guard, _) = hub.buffers.read(&mut token);
|
||||
let buffer = buffer_guard
|
||||
.get(buffer_id)
|
||||
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?;
|
||||
.map_err(|_| resource::BufferAccessError::Invalid)?;
|
||||
|
||||
match buffer.map_state {
|
||||
resource::BufferMapState::Init { ptr, .. }
|
||||
@ -3679,7 +3721,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
|
||||
let buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?;
|
||||
.map_err(|_| resource::BufferAccessError::Invalid)?;
|
||||
let device = &mut device_guard[buffer.device_id.value];
|
||||
|
||||
tracing::debug!("Buffer {:?} map state -> Idle", buffer_id);
|
||||
@ -3719,6 +3761,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
};
|
||||
}
|
||||
|
||||
let &(ref buf_raw, _) = buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(resource::BufferAccessError::Destroyed)?;
|
||||
|
||||
buffer.life_guard.use_at(device.active_submission_index + 1);
|
||||
let region = hal::command::BufferCopy {
|
||||
src: 0,
|
||||
@ -3733,7 +3780,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
};
|
||||
let transition_dst = hal::memory::Barrier::Buffer {
|
||||
states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE,
|
||||
target: &buffer.raw,
|
||||
target: buf_raw,
|
||||
range: hal::buffer::SubRange::WHOLE,
|
||||
families: None,
|
||||
};
|
||||
@ -3745,7 +3792,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
iter::once(transition_src).chain(iter::once(transition_dst)),
|
||||
);
|
||||
if buffer.size > 0 {
|
||||
cmdbuf.copy_buffer(&stage_buffer, &buffer.raw, iter::once(region));
|
||||
cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region));
|
||||
}
|
||||
}
|
||||
device
|
||||
|
@ -143,8 +143,12 @@ pub enum QueueSubmitError {
|
||||
Queue(#[from] DeviceError),
|
||||
#[error("command buffer {0:?} is invalid")]
|
||||
InvalidCommandBuffer(id::CommandBufferId),
|
||||
#[error("buffer {0:?} is destroyed")]
|
||||
DestroyedBuffer(id::BufferId),
|
||||
#[error("texture {0:?} is destroyed")]
|
||||
DestroyedTexture(id::TextureId),
|
||||
#[error(transparent)]
|
||||
BufferAccess(#[from] BufferAccessError),
|
||||
Unmap(#[from] BufferAccessError),
|
||||
#[error("swap chain output was dropped before the command buffer got submitted")]
|
||||
SwapChainOutputDropped,
|
||||
#[error("GPU got stuck :(")]
|
||||
@ -209,6 +213,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
.buffers
|
||||
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
|
||||
.map_err(TransferError::InvalidBuffer)?;
|
||||
let &(ref dst_raw, _) = dst
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
|
||||
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
|
||||
Err(TransferError::MissingCopyDstUsageFlag)?;
|
||||
}
|
||||
@ -248,7 +256,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
);
|
||||
stage
|
||||
.cmdbuf
|
||||
.copy_buffer(&stage.buffer, &dst.raw, iter::once(region));
|
||||
.copy_buffer(&stage.buffer, dst_raw, iter::once(region));
|
||||
}
|
||||
|
||||
device.pending_writes.consume(stage);
|
||||
@ -497,6 +505,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
// update submission IDs
|
||||
for id in cmdbuf.trackers.buffers.used() {
|
||||
let buffer = &mut buffer_guard[id];
|
||||
if buffer.raw.is_none() {
|
||||
return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
|
||||
}
|
||||
if !buffer.life_guard.use_at(submit_index) {
|
||||
if let BufferMapState::Active { .. } = buffer.map_state {
|
||||
tracing::warn!("Dropped buffer has a pending mapping.");
|
||||
|
@ -35,6 +35,7 @@ pub enum Action<'a> {
|
||||
backend: wgt::Backend,
|
||||
},
|
||||
CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
|
||||
FreeBuffer(id::BufferId),
|
||||
DestroyBuffer(id::BufferId),
|
||||
CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
|
||||
DestroyTexture(id::TextureId),
|
||||
|
@ -130,7 +130,9 @@ pub enum BufferAccessError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("buffer is invalid")]
|
||||
InvalidBuffer,
|
||||
Invalid,
|
||||
#[error("buffer is destroyed")]
|
||||
Destroyed,
|
||||
#[error("buffer is already mapped")]
|
||||
AlreadyMapped,
|
||||
#[error(transparent)]
|
||||
@ -164,10 +166,9 @@ pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Buffer<B: hal::Backend> {
|
||||
pub(crate) raw: B::Buffer,
|
||||
pub(crate) raw: Option<(B::Buffer, MemoryBlock<B>)>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) usage: wgt::BufferUsage,
|
||||
pub(crate) memory: MemoryBlock<B>,
|
||||
pub(crate) size: wgt::BufferAddress,
|
||||
pub(crate) full_range: (),
|
||||
pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
|
||||
@ -425,3 +426,11 @@ impl<B: hal::Backend> Borrow<()> for Sampler<B> {
|
||||
&DUMMY_SELECTOR
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum DestroyError {
|
||||
#[error("resource is invalid")]
|
||||
Invalid,
|
||||
#[error("resource is already destroyed")]
|
||||
AlreadyDestroyed,
|
||||
}
|
||||
|
@ -134,10 +134,11 @@ impl PendingTransition<BufferState> {
|
||||
buf: &'a resource::Buffer<B>,
|
||||
) -> hal::memory::Barrier<'a, B> {
|
||||
tracing::trace!("\tbuffer -> {:?}", self);
|
||||
let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed");
|
||||
hal::memory::Barrier::Buffer {
|
||||
states: conv::map_buffer_state(self.usage.start)
|
||||
..conv::map_buffer_state(self.usage.end),
|
||||
target: &buf.raw,
|
||||
target,
|
||||
range: hal::buffer::SubRange::WHOLE,
|
||||
families: None,
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user