966: Immediate resource destruction and freeing r=cwfitzgerald a=kvark

**Connections**
Fixes #964

**Description**
We are making it so a buffer or a texture can have their native resources freed while they are still referenced, so without waiting for GC.

In addition, the PR adds a few missing cases where error IDs should have been handled, like at render pass encoding.

**Testing**
Tested on wgpu-rs examples, see https://github.com/gfx-rs/wgpu-rs/pull/591

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot] 2020-10-11 17:10:42 +00:00 committed by GitHub
commit f963193be1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 458 additions and 146 deletions

View File

@ -139,6 +139,9 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
self.device_maintain_ids::<B>(device).unwrap(); self.device_maintain_ids::<B>(device).unwrap();
self.device_create_buffer::<B>(device, &desc, id).unwrap(); self.device_create_buffer::<B>(device, &desc, id).unwrap();
} }
A::FreeBuffer(id) => {
self.buffer_destroy::<B>(id).unwrap();
}
A::DestroyBuffer(id) => { A::DestroyBuffer(id) => {
self.buffer_drop::<B>(id, true); self.buffer_drop::<B>(id, true);
} }
@ -146,8 +149,11 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
self.device_maintain_ids::<B>(device).unwrap(); self.device_maintain_ids::<B>(device).unwrap();
self.device_create_texture::<B>(device, &desc, id).unwrap(); self.device_create_texture::<B>(device, &desc, id).unwrap();
} }
A::FreeTexture(id) => {
self.texture_destroy::<B>(id).unwrap();
}
A::DestroyTexture(id) => { A::DestroyTexture(id) => {
self.texture_drop::<B>(id); self.texture_drop::<B>(id, true);
} }
A::CreateTextureView { A::CreateTextureView {
id, id,

View File

@ -45,7 +45,7 @@ pub enum CreateBindGroupError {
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("bind group layout is invalid")] #[error("bind group layout is invalid")]
InvalidLayout, InvalidLayout,
#[error("buffer {0:?} is invalid")] #[error("buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId), InvalidBuffer(BufferId),
#[error("texture view {0:?} is invalid")] #[error("texture view {0:?} is invalid")]
InvalidTextureView(TextureViewId), InvalidTextureView(TextureViewId),

View File

@ -125,6 +125,13 @@ pub enum CreateRenderBundleError {
InvalidSampleCount(u32), InvalidSampleCount(u32),
} }
/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid.
#[derive(Clone, Debug, Error)]
pub enum ExecutionError {
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
}
pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>; pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
//Note: here, `RenderBundle` is just wrapping a raw stream of render commands. //Note: here, `RenderBundle` is just wrapping a raw stream of render commands.
@ -151,8 +158,9 @@ impl RenderBundle {
/// However the point of this function is to be lighter, since we already had /// However the point of this function is to be lighter, since we already had
/// a chance to go through the commands in `render_bundle_encoder_finish`. /// a chance to go through the commands in `render_bundle_encoder_finish`.
/// ///
/// Note that the function isn't expected to fail. /// Note that the function isn't expected to fail, generally.
/// All the validation has already been done by this point. /// All the validation has already been done by this point.
/// The only failure condition is if some of the used buffers are destroyed.
pub(crate) unsafe fn execute<B: GfxBackend>( pub(crate) unsafe fn execute<B: GfxBackend>(
&self, &self,
cmd_buf: &mut B::CommandBuffer, cmd_buf: &mut B::CommandBuffer,
@ -163,7 +171,7 @@ impl RenderBundle {
bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>, bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>,
pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>, pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>,
buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>, buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>,
) { ) -> Result<(), ExecutionError> {
use hal::command::CommandBuffer as _; use hal::command::CommandBuffer as _;
let mut offsets = self.base.dynamic_offsets.as_slice(); let mut offsets = self.base.dynamic_offsets.as_slice();
@ -197,9 +205,14 @@ impl RenderBundle {
offset, offset,
size, size,
} => { } => {
let buffer = buffer_guard.get(buffer_id).unwrap(); let &(ref buffer, _) = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
let view = hal::buffer::IndexBufferView { let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw, buffer,
range: hal::buffer::SubRange { range: hal::buffer::SubRange {
offset, offset,
size: size.map(|s| s.get()), size: size.map(|s| s.get()),
@ -215,12 +228,17 @@ impl RenderBundle {
offset, offset,
size, size,
} => { } => {
let buffer = buffer_guard.get(buffer_id).unwrap(); let &(ref buffer, _) = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
let range = hal::buffer::SubRange { let range = hal::buffer::SubRange {
offset, offset,
size: size.map(|s| s.get()), size: size.map(|s| s.get()),
}; };
cmd_buf.bind_vertex_buffers(slot, iter::once((&buffer.raw, range))); cmd_buf.bind_vertex_buffers(slot, iter::once((buffer, range)));
} }
RenderCommand::SetPushConstant { RenderCommand::SetPushConstant {
stages, stages,
@ -288,8 +306,13 @@ impl RenderBundle {
count: None, count: None,
indexed: false, indexed: false,
} => { } => {
let buffer = buffer_guard.get(buffer_id).unwrap(); let &(ref buffer, _) = buffer_guard
cmd_buf.draw_indirect(&buffer.raw, offset, 1, 0); .get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
cmd_buf.draw_indirect(buffer, offset, 1, 0);
} }
RenderCommand::MultiDrawIndirect { RenderCommand::MultiDrawIndirect {
buffer_id, buffer_id,
@ -297,8 +320,13 @@ impl RenderBundle {
count: None, count: None,
indexed: true, indexed: true,
} => { } => {
let buffer = buffer_guard.get(buffer_id).unwrap(); let &(ref buffer, _) = buffer_guard
cmd_buf.draw_indexed_indirect(&buffer.raw, offset, 1, 0); .get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
cmd_buf.draw_indexed_indirect(buffer, offset, 1, 0);
} }
RenderCommand::MultiDrawIndirect { .. } RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(), | RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
@ -312,6 +340,8 @@ impl RenderBundle {
| RenderCommand::SetScissor(_) => unreachable!(), | RenderCommand::SetScissor(_) => unreachable!(),
} }
} }
Ok(())
} }
} }

View File

@ -121,7 +121,7 @@ pub enum ComputePassError {
BindGroupIndexOutOfRange { index: u8, max: u32 }, BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("compute pipeline {0:?} is invalid")] #[error("compute pipeline {0:?} is invalid")]
InvalidPipeline(id::ComputePipelineId), InvalidPipeline(id::ComputePipelineId),
#[error("indirect buffer {0:?} is invalid")] #[error("indirect buffer {0:?} is invalid or destroyed")]
InvalidIndirectBuffer(id::BufferId), InvalidIndirectBuffer(id::BufferId),
#[error(transparent)] #[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict), ResourceUsageConflict(#[from] UsageConflict),
@ -412,6 +412,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.map_err(|_| ComputePassError::InvalidIndirectBuffer(buffer_id))?; .map_err(|_| ComputePassError::InvalidIndirectBuffer(buffer_id))?;
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?; check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
let &(ref buf_raw, _) = indirect_buffer
.raw
.as_ref()
.ok_or(ComputePassError::InvalidIndirectBuffer(buffer_id))?;
state.flush_states( state.flush_states(
raw, raw,
@ -421,7 +425,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*texture_guard, &*texture_guard,
)?; )?;
unsafe { unsafe {
raw.dispatch_indirect(&indirect_buffer.raw, offset); raw.dispatch_indirect(buf_raw, offset);
} }
} }
ComputeCommand::PushDebugGroup { color, len } => { ComputeCommand::PushDebugGroup { color, len } => {

View File

@ -65,6 +65,8 @@ pub enum RenderCommandError {
IncompatibleReadOnlyDepthStencil, IncompatibleReadOnlyDepthStencil,
#[error("buffer {0:?} is in error {1:?}")] #[error("buffer {0:?} is in error {1:?}")]
Buffer(id::BufferId, BufferError), Buffer(id::BufferId, BufferError),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error(transparent)] #[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError), MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)] #[error(transparent)]

View File

@ -6,8 +6,8 @@ use crate::{
binding_model::BindError, binding_model::BindError,
command::{ command::{
bind::{Binder, LayoutChange}, bind::{Binder, LayoutChange},
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, RenderCommand, BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, ExecutionError,
RenderCommandError, RenderCommand, RenderCommandError,
}, },
conv, conv,
device::{ device::{
@ -197,7 +197,7 @@ impl OptionalState {
#[derive(Debug, Default)] #[derive(Debug, Default)]
struct IndexState { struct IndexState {
bound_buffer_view: Option<(id::BufferId, Range<BufferAddress>)>, bound_buffer_view: Option<(id::Valid<id::BufferId>, Range<BufferAddress>)>,
format: IndexFormat, format: IndexFormat,
limit: u32, limit: u32,
} }
@ -1013,7 +1013,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let pipeline = trackers let pipeline = trackers
.render_pipes .render_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ()) .use_extend(&*pipeline_guard, pipeline_id, (), ())
.unwrap(); .map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))?;
context context
.check_compatible(&pipeline.pass_context) .check_compatible(&pipeline.pass_context)
@ -1101,13 +1101,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
state.index.update_limit(); state.index.update_limit();
if let Some((buffer_id, ref range)) = state.index.bound_buffer_view { if let Some((buffer_id, ref range)) = state.index.bound_buffer_view {
let buffer = trackers let &(ref buffer, _) = buffer_guard[buffer_id].raw.as_ref().unwrap();
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
.unwrap();
let view = hal::buffer::IndexBufferView { let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw, buffer,
range: hal::buffer::SubRange { range: hal::buffer::SubRange {
offset: range.start, offset: range.start,
size: Some(range.end - range.start), size: Some(range.end - range.start),
@ -1142,18 +1139,22 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = trackers let buffer = trackers
.buffers .buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX) .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
.unwrap(); .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
check_buffer_usage(buffer.usage, BufferUsage::INDEX)?; check_buffer_usage(buffer.usage, BufferUsage::INDEX)?;
let &(ref buf_raw, _) = buffer
.raw
.as_ref()
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
let end = match size { let end = match size {
Some(s) => offset + s.get(), Some(s) => offset + s.get(),
None => buffer.size, None => buffer.size,
}; };
state.index.bound_buffer_view = Some((buffer_id, offset..end)); state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end));
state.index.update_limit(); state.index.update_limit();
let view = hal::buffer::IndexBufferView { let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw, buffer: buf_raw,
range: hal::buffer::SubRange { range: hal::buffer::SubRange {
offset, offset,
size: Some(end - offset), size: Some(end - offset),
@ -1174,8 +1175,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = trackers let buffer = trackers
.buffers .buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX) .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
.unwrap(); .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
check_buffer_usage(buffer.usage, BufferUsage::VERTEX)?; check_buffer_usage(buffer.usage, BufferUsage::VERTEX)?;
let &(ref buf_raw, _) = buffer
.raw
.as_ref()
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len()); let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len());
state state
.vertex .vertex
@ -1191,7 +1197,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
size: size.map(|s| s.get()), size: size.map(|s| s.get()),
}; };
unsafe { unsafe {
raw.bind_vertex_buffers(slot, iter::once((&buffer.raw, range))); raw.bind_vertex_buffers(slot, iter::once((buf_raw, range)));
} }
state.vertex.update_limits(); state.vertex.update_limits();
} }
@ -1374,33 +1380,37 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT)?; check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT)?;
} }
let buffer = trackers let indirect_buffer = trackers
.buffers .buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap(); .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?; check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
let &(ref indirect_raw, _) = indirect_buffer
.raw
.as_ref()
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
let actual_count = count.map_or(1, |c| c.get()); let actual_count = count.map_or(1, |c| c.get());
let begin_offset = offset; let begin_offset = offset;
let end_offset = offset + stride * actual_count as u64; let end_offset = offset + stride * actual_count as u64;
if end_offset > buffer.size { if end_offset > indirect_buffer.size {
return Err(RenderPassError::IndirectBufferOverrun { return Err(RenderPassError::IndirectBufferOverrun {
offset, offset,
count, count,
begin_offset, begin_offset,
end_offset, end_offset,
buffer_size: buffer.size, buffer_size: indirect_buffer.size,
}); });
} }
match indexed { match indexed {
false => unsafe { false => unsafe {
raw.draw_indirect(&buffer.raw, offset, actual_count, stride as u32); raw.draw_indirect(indirect_raw, offset, actual_count, stride as u32);
}, },
true => unsafe { true => unsafe {
raw.draw_indexed_indirect( raw.draw_indexed_indirect(
&buffer.raw, indirect_raw,
offset, offset,
actual_count, actual_count,
stride as u32, stride as u32,
@ -1428,26 +1438,35 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
wgt::Features::MULTI_DRAW_INDIRECT_COUNT, wgt::Features::MULTI_DRAW_INDIRECT_COUNT,
)?; )?;
let buffer = trackers let indirect_buffer = trackers
.buffers .buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap(); .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?;
check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?; check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
let &(ref indirect_raw, _) = indirect_buffer
.raw
.as_ref()
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?;
let count_buffer = trackers let count_buffer = trackers
.buffers .buffers
.use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT) .use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT)
.unwrap(); .map_err(|e| RenderCommandError::Buffer(count_buffer_id, e))?;
check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT)?; check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT)?;
let &(ref count_raw, _) = count_buffer
.raw
.as_ref()
.ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id))?;
let begin_offset = offset; let begin_offset = offset;
let end_offset = offset + stride * max_count as u64; let end_offset = offset + stride * max_count as u64;
if end_offset > buffer.size { if end_offset > indirect_buffer.size {
return Err(RenderPassError::IndirectBufferOverrun { return Err(RenderPassError::IndirectBufferOverrun {
offset, offset,
count: None, count: None,
begin_offset, begin_offset,
end_offset, end_offset,
buffer_size: buffer.size, buffer_size: indirect_buffer.size,
}); });
} }
@ -1464,9 +1483,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
match indexed { match indexed {
false => unsafe { false => unsafe {
raw.draw_indirect_count( raw.draw_indirect_count(
&buffer.raw, indirect_raw,
offset, offset,
&count_buffer.raw, count_raw,
count_buffer_offset, count_buffer_offset,
max_count, max_count,
stride as u32, stride as u32,
@ -1474,9 +1493,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}, },
true => unsafe { true => unsafe {
raw.draw_indexed_indirect_count( raw.draw_indexed_indirect_count(
&buffer.raw, indirect_raw,
offset, offset,
&count_buffer.raw, count_raw,
count_buffer_offset, count_buffer_offset,
max_count, max_count,
stride as u32, stride as u32,
@ -1526,6 +1545,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*buffer_guard, &*buffer_guard,
) )
} }
.map_err(|e| match e {
ExecutionError::DestroyedBuffer(id) => {
RenderCommandError::DestroyedBuffer(id)
}
})?;
trackers.merge_extend(&bundle.used)?; trackers.merge_extend(&bundle.used)?;
state.reset_bundle(); state.reset_bundle();

View File

@ -35,9 +35,9 @@ pub enum CopySide {
/// Error encountered while attempting a data transfer. /// Error encountered while attempting a data transfer.
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum TransferError { pub enum TransferError {
#[error("buffer {0:?} is invalid")] #[error("buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId), InvalidBuffer(BufferId),
#[error("texture {0:?} is invalid")] #[error("texture {0:?} is invalid or destroyed")]
InvalidTexture(TextureId), InvalidTexture(TextureId),
#[error("Source and destination cannot be the same buffer")] #[error("Source and destination cannot be the same buffer")]
SameSourceDestinationBuffer, SameSourceDestinationBuffer,
@ -330,6 +330,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers .buffers
.use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC) .use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?; .map_err(TransferError::InvalidBuffer)?;
let &(ref src_raw, _) = src_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(source))?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) { if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
@ -340,6 +344,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers .buffers
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST) .use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?; .map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination))?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) { if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)? Err(TransferError::MissingCopyDstUsageFlag)?
} }
@ -391,7 +399,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::memory::Dependencies::empty(), hal::memory::Dependencies::empty(),
barriers, barriers,
); );
cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region)); cmb_raw.copy_buffer(src_raw, dst_raw, iter::once(region));
} }
Ok(()) Ok(())
} }
@ -433,6 +441,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers .buffers
.use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC) .use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?; .map_err(TransferError::InvalidBuffer)?;
let &(ref src_raw, _) = src_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) { if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
@ -448,6 +460,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureUse::COPY_DST, TextureUse::COPY_DST,
) )
.unwrap(); .unwrap();
let &(ref dst_raw, _) = dst_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) { if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)? Err(TransferError::MissingCopyDstUsageFlag)?
} }
@ -505,8 +521,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
src_barriers.chain(dst_barriers), src_barriers.chain(dst_barriers),
); );
cmb_raw.copy_buffer_to_image( cmb_raw.copy_buffer_to_image(
&src_buffer.raw, src_raw,
&dst_texture.raw, dst_raw,
hal::image::Layout::TransferDstOptimal, hal::image::Layout::TransferDstOptimal,
iter::once(region), iter::once(region),
); );
@ -556,6 +572,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureUse::COPY_SRC, TextureUse::COPY_SRC,
) )
.unwrap(); .unwrap();
let &(ref src_raw, _) = src_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) { if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
@ -566,6 +586,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers .buffers
.use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST) .use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?; .map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) { if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)? Err(TransferError::MissingCopyDstUsageFlag)?
} }
@ -623,9 +647,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
src_barriers.chain(dst_barrier), src_barriers.chain(dst_barrier),
); );
cmb_raw.copy_image_to_buffer( cmb_raw.copy_image_to_buffer(
&src_texture.raw, src_raw,
hal::image::Layout::TransferSrcOptimal, hal::image::Layout::TransferSrcOptimal,
&dst_buffer.raw, dst_raw,
iter::once(region), iter::once(region),
); );
} }
@ -683,6 +707,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureUse::COPY_SRC, TextureUse::COPY_SRC,
) )
.unwrap(); .unwrap();
let &(ref src_raw, _) = src_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) { if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
@ -698,6 +726,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureUse::COPY_DST, TextureUse::COPY_DST,
) )
.unwrap(); .unwrap();
let &(ref dst_raw, _) = dst_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) { if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)? Err(TransferError::MissingCopyDstUsageFlag)?
} }
@ -733,9 +765,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
barriers, barriers,
); );
cmb_raw.copy_image( cmb_raw.copy_image(
&src_texture.raw, src_raw,
hal::image::Layout::TransferSrcOptimal, hal::image::Layout::TransferSrcOptimal,
&dst_texture.raw, dst_raw,
hal::image::Layout::TransferDstOptimal, hal::image::Layout::TransferDstOptimal,
iter::once(region), iter::once(region),
); );

View File

@ -5,7 +5,7 @@
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
use crate::device::trace; use crate::device::trace;
use crate::{ use crate::{
device::DeviceError, device::{queue::TempResource, DeviceError},
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token}, hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
id, resource, id, resource,
track::TrackerSet, track::TrackerSet,
@ -241,10 +241,16 @@ impl<B: hal::Backend> LifetimeTracker<B> {
index: SubmissionIndex, index: SubmissionIndex,
fence: B::Fence, fence: B::Fence,
new_suspects: &SuspectedResources, new_suspects: &SuspectedResources,
temp_buffers: impl Iterator<Item = (B::Buffer, MemoryBlock<B>)>, temp_resources: impl Iterator<Item = (TempResource<B>, MemoryBlock<B>)>,
) { ) {
let mut last_resources = NonReferencedResources::new(); let mut last_resources = NonReferencedResources::new();
last_resources.buffers.extend(temp_buffers); for (res, memory) in temp_resources {
match res {
TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
TempResource::Image(raw) => last_resources.images.push((raw, memory)),
}
}
self.suspected_resources.buffers.extend( self.suspected_resources.buffers.extend(
self.future_suspected_buffers self.future_suspected_buffers
.drain(..) .drain(..)
@ -256,6 +262,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
.map(|stored| stored.value), .map(|stored| stored.value),
); );
self.suspected_resources.extend(new_suspects); self.suspected_resources.extend(new_suspects);
self.active.alloc().init(ActiveSubmission { self.active.alloc().init(ActiveSubmission {
index, index,
fence, fence,
@ -336,6 +343,23 @@ impl<B: hal::Backend> LifetimeTracker<B> {
descriptor_allocator_mutex.lock().cleanup(device); descriptor_allocator_mutex.lock().cleanup(device);
} }
} }
pub fn schedule_resource_destruction(
&mut self,
temp_resource: TempResource<B>,
memory: MemoryBlock<B>,
last_submit_index: SubmissionIndex,
) {
let resources = self
.active
.iter_mut()
.find(|a| a.index == last_submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources);
match temp_resource {
TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
TempResource::Image(raw) => resources.images.push((raw, memory)),
}
}
} }
impl<B: GfxBackend> LifetimeTracker<B> { impl<B: GfxBackend> LifetimeTracker<B> {
@ -432,7 +456,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.find(|a| a.index == submit_index) .find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources) .map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images .images
.push((res.raw, res.memory)); .extend(res.raw);
} }
} }
} }
@ -477,7 +501,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.find(|a| a.index == submit_index) .find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources) .map_or(&mut self.free_resources, |a| &mut a.last_resources)
.buffers .buffers
.push((res.raw, res.memory)); .extend(res.raw);
} }
} }
} }
@ -691,7 +715,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.buffers .buffers
.unregister_locked(buffer_id.0, &mut *buffer_guard) .unregister_locked(buffer_id.0, &mut *buffer_guard)
{ {
self.free_resources.buffers.push((buf.raw, buf.memory)); self.free_resources.buffers.extend(buf.raw);
} }
} else { } else {
let mapping = match std::mem::replace( let mapping = match std::mem::replace(

View File

@ -164,12 +164,16 @@ fn map_buffer<B: hal::Backend>(
sub_range: hal::buffer::SubRange, sub_range: hal::buffer::SubRange,
kind: HostMap, kind: HostMap,
) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> { ) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
let &mut (_, ref mut memory) = buffer
.raw
.as_mut()
.ok_or(resource::BufferAccessError::Destroyed)?;
let (ptr, segment, needs_sync) = { let (ptr, segment, needs_sync) = {
let segment = hal::memory::Segment { let segment = hal::memory::Segment {
offset: sub_range.offset, offset: sub_range.offset,
size: sub_range.size, size: sub_range.size,
}; };
let mapped = buffer.memory.map(raw, segment)?; let mapped = memory.map(raw, segment)?;
let mr = mapped.range(); let mr = mapped.range();
let segment = hal::memory::Segment { let segment = hal::memory::Segment {
offset: mr.start, offset: mr.start,
@ -180,7 +184,7 @@ fn map_buffer<B: hal::Backend>(
buffer.sync_mapped_writes = match kind { buffer.sync_mapped_writes = match kind {
HostMap::Read if needs_sync => unsafe { HostMap::Read if needs_sync => unsafe {
raw.invalidate_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment))) raw.invalidate_mapped_memory_ranges(iter::once((memory.memory(), segment)))
.or(Err(DeviceError::OutOfMemory))?; .or(Err(DeviceError::OutOfMemory))?;
None None
}, },
@ -194,9 +198,13 @@ fn unmap_buffer<B: hal::Backend>(
raw: &B::Device, raw: &B::Device,
buffer: &mut resource::Buffer<B>, buffer: &mut resource::Buffer<B>,
) -> Result<(), resource::BufferAccessError> { ) -> Result<(), resource::BufferAccessError> {
let &(_, ref memory) = buffer
.raw
.as_ref()
.ok_or(resource::BufferAccessError::Destroyed)?;
if let Some(segment) = buffer.sync_mapped_writes.take() { if let Some(segment) = buffer.sync_mapped_writes.take() {
unsafe { unsafe {
raw.flush_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment))) raw.flush_mapped_memory_ranges(iter::once((memory.memory(), segment)))
.or(Err(DeviceError::OutOfMemory))?; .or(Err(DeviceError::OutOfMemory))?;
} }
} }
@ -481,13 +489,12 @@ impl<B: GfxBackend> Device<B> {
.map_err(DeviceError::from_bind)?; .map_err(DeviceError::from_bind)?;
Ok(resource::Buffer { Ok(resource::Buffer {
raw: buffer, raw: Some((buffer, memory)),
device_id: Stored { device_id: Stored {
value: id::Valid(self_id), value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(), ref_count: self.life_guard.add_ref(),
}, },
usage: desc.usage, usage: desc.usage,
memory,
size: desc.size, size: desc.size,
full_range: (), full_range: (),
sync_mapped_writes: None, sync_mapped_writes: None,
@ -579,7 +586,7 @@ impl<B: GfxBackend> Device<B> {
.map_err(DeviceError::from_bind)?; .map_err(DeviceError::from_bind)?;
Ok(resource::Texture { Ok(resource::Texture {
raw: image, raw: Some((image, memory)),
device_id: Stored { device_id: Stored {
value: id::Valid(self_id), value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(), ref_count: self.life_guard.add_ref(),
@ -593,7 +600,6 @@ impl<B: GfxBackend> Device<B> {
levels: 0..desc.mip_level_count as hal::image::Level, levels: 0..desc.mip_level_count as hal::image::Level,
layers: 0..kind.num_layers(), layers: 0..kind.num_layers(),
}, },
memory,
life_guard: LifeGuard::new(), life_guard: LifeGuard::new(),
}) })
} }
@ -872,16 +878,20 @@ impl<B: hal::Backend> Device<B> {
} }
pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) { pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) {
if let Some((raw, memory)) = buffer.raw {
unsafe { unsafe {
self.mem_allocator.lock().free(&self.raw, buffer.memory); self.mem_allocator.lock().free(&self.raw, memory);
self.raw.destroy_buffer(buffer.raw); self.raw.destroy_buffer(raw);
}
} }
} }
pub(crate) fn destroy_texture(&self, texture: resource::Texture<B>) { pub(crate) fn destroy_texture(&self, texture: resource::Texture<B>) {
if let Some((raw, memory)) = texture.raw {
unsafe { unsafe {
self.mem_allocator.lock().free(&self.raw, texture.memory); self.mem_allocator.lock().free(&self.raw, memory);
self.raw.destroy_image(texture.raw); self.raw.destroy_image(raw);
}
} }
} }
@ -1032,7 +1042,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
resource::BufferUse::MAP_WRITE resource::BufferUse::MAP_WRITE
} else { } else {
// buffer needs staging area for initialization only // buffer needs staging area for initialization only
let mut stage = device.create_buffer( let stage = device.create_buffer(
device_id, device_id,
&wgt::BufferDescriptor { &wgt::BufferDescriptor {
label: Some(Cow::Borrowed("<init_buffer>")), label: Some(Cow::Borrowed("<init_buffer>")),
@ -1042,15 +1052,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}, },
gfx_memory::Kind::Linear, gfx_memory::Kind::Linear,
)?; )?;
let mapped = stage let (stage_buffer, mut stage_memory) = stage.raw.unwrap();
.memory let mapped = stage_memory
.map(&device.raw, hal::memory::Segment::ALL) .map(&device.raw, hal::memory::Segment::ALL)
.map_err(resource::BufferAccessError::from)?; .map_err(resource::BufferAccessError::from)?;
buffer.map_state = resource::BufferMapState::Init { buffer.map_state = resource::BufferMapState::Init {
ptr: mapped.ptr(), ptr: mapped.ptr(),
needs_flush: !mapped.is_coherent(), needs_flush: !mapped.is_coherent(),
stage_buffer: stage.raw, stage_buffer,
stage_memory: stage.memory, stage_memory,
}; };
resource::BufferUse::COPY_DST resource::BufferUse::COPY_DST
}; };
@ -1118,7 +1128,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| DeviceError::Invalid)?; .map_err(|_| DeviceError::Invalid)?;
let mut buffer = buffer_guard let mut buffer = buffer_guard
.get_mut(buffer_id) .get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?; .map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?; check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU); //assert!(buffer isn't used by the GPU);
@ -1172,7 +1182,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| DeviceError::Invalid)?; .map_err(|_| DeviceError::Invalid)?;
let mut buffer = buffer_guard let mut buffer = buffer_guard
.get_mut(buffer_id) .get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?; .map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?; check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
//assert!(buffer isn't used by the GPU); //assert!(buffer isn't used by the GPU);
@ -1201,7 +1211,53 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.register_error(id_in, &mut Token::root()) .register_error(id_in, &mut Token::root())
} }
pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, now: bool) { pub fn buffer_destroy<B: GfxBackend>(
&self,
buffer_id: id::BufferId,
) -> Result<(), resource::DestroyError> {
span!(_guard, INFO, "Buffer::destroy");
let hub = B::hub(self);
let mut token = Token::root();
//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);
tracing::info!("Buffer {:?} is destroyed", buffer_id);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
}
let (raw, memory) = buffer
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Buffer(raw);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
device.pending_writes.temp_resources.push((temp, memory));
} else {
let last_submit_index = buffer.life_guard.submission_index.load(Ordering::Acquire);
drop(buffer_guard);
device.lock_life(&mut token).schedule_resource_destruction(
temp,
memory,
last_submit_index,
);
}
Ok(())
}
pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, wait: bool) {
span!(_guard, INFO, "Buffer::drop"); span!(_guard, INFO, "Buffer::drop");
let hub = B::hub(self); let hub = B::hub(self);
@ -1226,25 +1282,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token); let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id]; let device = &device_guard[device_id];
if now { let mut life_lock = device_guard[device_id].lock_life(&mut token);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
life_lock.future_suspected_buffers.push(Stored {
value: id::Valid(buffer_id),
ref_count,
});
} else {
drop(ref_count); drop(ref_count);
device life_lock
.lock_life(&mut token)
.suspected_resources .suspected_resources
.buffers .buffers
.push(id::Valid(buffer_id)); .push(id::Valid(buffer_id));
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) { match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (), Ok(()) => (),
Err(e) => tracing::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), Err(e) => tracing::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
} }
} else {
device
.lock_life(&mut token)
.future_suspected_buffers
.push(Stored {
value: id::Valid(buffer_id),
ref_count,
});
} }
} }
@ -1301,19 +1358,67 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.register_error(id_in, &mut Token::root()) .register_error(id_in, &mut Token::root())
} }
pub fn texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId) { pub fn texture_destroy<B: GfxBackend>(
&self,
texture_id: id::TextureId,
) -> Result<(), resource::DestroyError> {
span!(_guard, INFO, "Texture::destroy");
let hub = B::hub(self);
let mut token = Token::root();
//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);
tracing::info!("Buffer {:?} is destroyed", texture_id);
let (mut texture_guard, _) = hub.textures.write(&mut token);
let texture = texture_guard
.get_mut(texture_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[texture.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeTexture(texture_id));
}
let (raw, memory) = texture
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Image(raw);
if device.pending_writes.dst_textures.contains(&texture_id) {
device.pending_writes.temp_resources.push((temp, memory));
} else {
let last_submit_index = texture.life_guard.submission_index.load(Ordering::Acquire);
drop(texture_guard);
device.lock_life(&mut token).schedule_resource_destruction(
temp,
memory,
last_submit_index,
);
}
Ok(())
}
pub fn texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId, wait: bool) {
span!(_guard, INFO, "Texture::drop"); span!(_guard, INFO, "Texture::drop");
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let (ref_count, device_id) = { let (ref_count, last_submit_index, device_id) = {
let (mut texture_guard, _) = hub.textures.write(&mut token); let (mut texture_guard, _) = hub.textures.write(&mut token);
match texture_guard.get_mut(texture_id) { match texture_guard.get_mut(texture_id) {
Ok(texture) => ( Ok(texture) => {
texture.life_guard.ref_count.take().unwrap(), let ref_count = texture.life_guard.ref_count.take().unwrap();
texture.device_id.value, let last_submit_index =
), texture.life_guard.submission_index.load(Ordering::Acquire);
(ref_count, last_submit_index, texture.device_id.value)
}
Err(InvalidId) => { Err(InvalidId) => {
hub.textures hub.textures
.unregister_locked(texture_id, &mut *texture_guard); .unregister_locked(texture_id, &mut *texture_guard);
@ -1323,13 +1428,28 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}; };
let (device_guard, mut token) = hub.devices.read(&mut token); let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id] let device = &device_guard[device_id];
.lock_life(&mut token) let mut life_lock = device_guard[device_id].lock_life(&mut token);
.future_suspected_textures
.push(Stored { if device.pending_writes.dst_textures.contains(&texture_id) {
life_lock.future_suspected_textures.push(Stored {
value: id::Valid(texture_id), value: id::Valid(texture_id),
ref_count, ref_count,
}); });
} else {
drop(ref_count);
life_lock
.suspected_resources
.textures
.push(id::Valid(texture_id));
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => tracing::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
}
}
} }
pub fn texture_create_view<B: GfxBackend>( pub fn texture_create_view<B: GfxBackend>(
@ -1348,6 +1468,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let texture = texture_guard let texture = texture_guard
.get(texture_id) .get(texture_id)
.map_err(|_| resource::CreateTextureViewError::InvalidTexture)?; .map_err(|_| resource::CreateTextureViewError::InvalidTexture)?;
let &(ref texture_raw, _) = texture
.raw
.as_ref()
.ok_or(resource::CreateTextureViewError::InvalidTexture)?;
let device = &device_guard[texture.device_id.value]; let device = &device_guard[texture.device_id.value];
let view_kind = match desc.dimension { let view_kind = match desc.dimension {
@ -1404,7 +1528,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device device
.raw .raw
.create_image_view( .create_image_view(
&texture.raw, texture_raw,
view_kind, view_kind,
conv::map_texture_format(format, device.private_features), conv::map_texture_format(format, device.private_features),
hal::format::Swizzle::NO, hal::format::Swizzle::NO,
@ -2006,8 +2130,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = used let buffer = used
.buffers .buffers
.use_extend(&*buffer_guard, bb.buffer_id, (), internal_use) .use_extend(&*buffer_guard, bb.buffer_id, (), internal_use)
.unwrap(); .map_err(|_| CreateBindGroupError::InvalidBuffer(bb.buffer_id))?;
check_buffer_usage(buffer.usage, pub_usage)?; check_buffer_usage(buffer.usage, pub_usage)?;
let &(ref buffer_raw, _) = buffer
.raw
.as_ref()
.ok_or(CreateBindGroupError::InvalidBuffer(bb.buffer_id))?;
let (bind_size, bind_end) = match bb.size { let (bind_size, bind_end) = match bb.size {
Some(size) => { Some(size) => {
let end = bb.offset + size.get(); let end = bb.offset + size.get();
@ -2049,7 +2178,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
offset: bb.offset, offset: bb.offset,
size: Some(bind_size), size: Some(bind_size),
}; };
SmallVec::from([hal::pso::Descriptor::Buffer(&buffer.raw, sub_range)]) SmallVec::from([hal::pso::Descriptor::Buffer(buffer_raw, sub_range)])
} }
Br::Sampler(id) => { Br::Sampler(id) => {
match decl.ty { match decl.ty {
@ -2057,7 +2186,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let sampler = used let sampler = used
.samplers .samplers
.use_extend(&*sampler_guard, id, (), ()) .use_extend(&*sampler_guard, id, (), ())
.unwrap(); .map_err(|_| CreateBindGroupError::InvalidSampler(id))?;
// Check the actual sampler to also (not) be a comparison sampler // Check the actual sampler to also (not) be a comparison sampler
if sampler.comparison != comparison { if sampler.comparison != comparison {
@ -2079,7 +2208,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view = used let view = used
.views .views
.use_extend(&*texture_view_guard, id, (), ()) .use_extend(&*texture_view_guard, id, (), ())
.unwrap(); .map_err(|_| CreateBindGroupError::InvalidTextureView(id))?;
let (pub_usage, internal_use) = match decl.ty { let (pub_usage, internal_use) = match decl.ty {
wgt::BindingType::SampledTexture { .. } => ( wgt::BindingType::SampledTexture { .. } => (
wgt::TextureUsage::SAMPLED, wgt::TextureUsage::SAMPLED,
@ -2168,7 +2297,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view = used let view = used
.views .views
.use_extend(&*texture_view_guard, id, (), ()) .use_extend(&*texture_view_guard, id, (), ())
.unwrap(); .map_err(|_| CreateBindGroupError::InvalidTextureView(id))?;
match view.inner { match view.inner {
resource::TextureViewInner::Native { resource::TextureViewInner::Native {
ref raw, ref raw,
@ -3598,7 +3727,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (mut buffer_guard, _) = hub.buffers.write(&mut token); let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard let buffer = buffer_guard
.get_mut(buffer_id) .get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?; .map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, pub_usage)?; check_buffer_usage(buffer.usage, pub_usage)?;
buffer.map_state = match buffer.map_state { buffer.map_state = match buffer.map_state {
@ -3653,7 +3782,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (buffer_guard, _) = hub.buffers.read(&mut token); let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = buffer_guard let buffer = buffer_guard
.get(buffer_id) .get(buffer_id)
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?; .map_err(|_| resource::BufferAccessError::Invalid)?;
match buffer.map_state { match buffer.map_state {
resource::BufferMapState::Init { ptr, .. } resource::BufferMapState::Init { ptr, .. }
@ -3679,7 +3808,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (mut buffer_guard, _) = hub.buffers.write(&mut token); let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard let buffer = buffer_guard
.get_mut(buffer_id) .get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::InvalidBuffer)?; .map_err(|_| resource::BufferAccessError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value]; let device = &mut device_guard[buffer.device_id.value];
tracing::debug!("Buffer {:?} map state -> Idle", buffer_id); tracing::debug!("Buffer {:?} map state -> Idle", buffer_id);
@ -3719,6 +3848,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}; };
} }
let &(ref buf_raw, _) = buffer
.raw
.as_ref()
.ok_or(resource::BufferAccessError::Destroyed)?;
buffer.life_guard.use_at(device.active_submission_index + 1); buffer.life_guard.use_at(device.active_submission_index + 1);
let region = hal::command::BufferCopy { let region = hal::command::BufferCopy {
src: 0, src: 0,
@ -3733,7 +3867,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}; };
let transition_dst = hal::memory::Barrier::Buffer { let transition_dst = hal::memory::Barrier::Buffer {
states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE, states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE,
target: &buffer.raw, target: buf_raw,
range: hal::buffer::SubRange::WHOLE, range: hal::buffer::SubRange::WHOLE,
families: None, families: None,
}; };
@ -3745,12 +3879,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
iter::once(transition_src).chain(iter::once(transition_dst)), iter::once(transition_src).chain(iter::once(transition_dst)),
); );
if buffer.size > 0 { if buffer.size > 0 {
cmdbuf.copy_buffer(&stage_buffer, &buffer.raw, iter::once(region)); cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region));
} }
} }
device device
.pending_writes .pending_writes
.consume_temp(stage_buffer, stage_memory); .consume_temp(queue::TempResource::Buffer(stage_buffer), stage_memory);
} }
resource::BufferMapState::Idle => { resource::BufferMapState::Idle => {
return Err(resource::BufferAccessError::NotMapped); return Err(resource::BufferAccessError::NotMapped);

View File

@ -14,7 +14,7 @@ use crate::{
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id, id,
resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse}, resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
span, span, FastHashSet,
}; };
use gfx_memory::{Block, Heaps, MemoryBlock}; use gfx_memory::{Block, Heaps, MemoryBlock};
@ -29,17 +29,27 @@ struct StagingData<B: hal::Backend> {
cmdbuf: B::CommandBuffer, cmdbuf: B::CommandBuffer,
} }
#[derive(Debug, Default)] #[derive(Debug)]
pub enum TempResource<B: hal::Backend> {
Buffer(B::Buffer),
Image(B::Image),
}
#[derive(Debug)]
pub(crate) struct PendingWrites<B: hal::Backend> { pub(crate) struct PendingWrites<B: hal::Backend> {
pub command_buffer: Option<B::CommandBuffer>, pub command_buffer: Option<B::CommandBuffer>,
pub temp_buffers: Vec<(B::Buffer, MemoryBlock<B>)>, pub temp_resources: Vec<(TempResource<B>, MemoryBlock<B>)>,
pub dst_buffers: FastHashSet<id::BufferId>,
pub dst_textures: FastHashSet<id::TextureId>,
} }
impl<B: hal::Backend> PendingWrites<B> { impl<B: hal::Backend> PendingWrites<B> {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
command_buffer: None, command_buffer: None,
temp_buffers: Vec::new(), temp_resources: Vec::new(),
dst_buffers: FastHashSet::default(),
dst_textures: FastHashSet::default(),
} }
} }
@ -52,22 +62,38 @@ impl<B: hal::Backend> PendingWrites<B> {
if let Some(raw) = self.command_buffer { if let Some(raw) = self.command_buffer {
cmd_allocator.discard_internal(raw); cmd_allocator.discard_internal(raw);
} }
for (buffer, memory) in self.temp_buffers { for (resource, memory) in self.temp_resources {
mem_allocator.free(device, memory); mem_allocator.free(device, memory);
unsafe { match resource {
TempResource::Buffer(buffer) => unsafe {
device.destroy_buffer(buffer); device.destroy_buffer(buffer);
},
TempResource::Image(image) => unsafe {
device.destroy_image(image);
},
} }
} }
} }
pub fn consume_temp(&mut self, buffer: B::Buffer, memory: MemoryBlock<B>) { pub fn consume_temp(&mut self, resource: TempResource<B>, memory: MemoryBlock<B>) {
self.temp_buffers.push((buffer, memory)); self.temp_resources.push((resource, memory));
} }
fn consume(&mut self, stage: StagingData<B>) { fn consume(&mut self, stage: StagingData<B>) {
self.temp_buffers.push((stage.buffer, stage.memory)); self.temp_resources
.push((TempResource::Buffer(stage.buffer), stage.memory));
self.command_buffer = Some(stage.cmdbuf); self.command_buffer = Some(stage.cmdbuf);
} }
#[must_use]
fn finish(&mut self) -> Option<B::CommandBuffer> {
self.dst_buffers.clear();
self.dst_textures.clear();
self.command_buffer.take().map(|mut cmd_buf| unsafe {
cmd_buf.finish();
cmd_buf
})
}
} }
impl<B: hal::Backend> super::Device<B> { impl<B: hal::Backend> super::Device<B> {
@ -143,8 +169,12 @@ pub enum QueueSubmitError {
Queue(#[from] DeviceError), Queue(#[from] DeviceError),
#[error("command buffer {0:?} is invalid")] #[error("command buffer {0:?} is invalid")]
InvalidCommandBuffer(id::CommandBufferId), InvalidCommandBuffer(id::CommandBufferId),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error("texture {0:?} is destroyed")]
DestroyedTexture(id::TextureId),
#[error(transparent)] #[error(transparent)]
BufferAccess(#[from] BufferAccessError), Unmap(#[from] BufferAccessError),
#[error("swap chain output was dropped before the command buffer got submitted")] #[error("swap chain output was dropped before the command buffer got submitted")]
SwapChainOutputDropped, SwapChainOutputDropped,
#[error("GPU got stuck :(")] #[error("GPU got stuck :(")]
@ -209,6 +239,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers .buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST) .use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?; .map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) { if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)?; Err(TransferError::MissingCopyDstUsageFlag)?;
} }
@ -248,10 +282,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
); );
stage stage
.cmdbuf .cmdbuf
.copy_buffer(&stage.buffer, &dst.raw, iter::once(region)); .copy_buffer(&stage.buffer, dst_raw, iter::once(region));
} }
device.pending_writes.consume(stage); device.pending_writes.consume(stage);
device.pending_writes.dst_buffers.insert(buffer_id);
Ok(()) Ok(())
} }
@ -336,6 +371,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureUse::COPY_DST, TextureUse::COPY_DST,
) )
.unwrap(); .unwrap();
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) { if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)? Err(TransferError::MissingCopyDstUsageFlag)?
@ -403,13 +442,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
); );
stage.cmdbuf.copy_buffer_to_image( stage.cmdbuf.copy_buffer_to_image(
&stage.buffer, &stage.buffer,
&dst.raw, dst_raw,
hal::image::Layout::TransferDstOptimal, hal::image::Layout::TransferDstOptimal,
iter::once(region), iter::once(region),
); );
} }
device.pending_writes.consume(stage); device.pending_writes.consume(stage);
device
.pending_writes
.dst_textures
.insert(destination.texture);
Ok(()) Ok(())
} }
@ -429,15 +472,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device = device_guard let device = device_guard
.get_mut(queue_id) .get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?; .map_err(|_| DeviceError::Invalid)?;
let pending_write_command_buffer = let pending_write_command_buffer = device.pending_writes.finish();
device
.pending_writes
.command_buffer
.take()
.map(|mut comb_raw| unsafe {
comb_raw.finish();
comb_raw
});
device.temp_suspected.clear(); device.temp_suspected.clear();
device.active_submission_index += 1; device.active_submission_index += 1;
let submit_index = device.active_submission_index; let submit_index = device.active_submission_index;
@ -497,6 +532,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// update submission IDs // update submission IDs
for id in cmdbuf.trackers.buffers.used() { for id in cmdbuf.trackers.buffers.used() {
let buffer = &mut buffer_guard[id]; let buffer = &mut buffer_guard[id];
if buffer.raw.is_none() {
return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
}
if !buffer.life_guard.use_at(submit_index) { if !buffer.life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer.map_state { if let BufferMapState::Active { .. } = buffer.map_state {
tracing::warn!("Dropped buffer has a pending mapping."); tracing::warn!("Dropped buffer has a pending mapping.");
@ -511,7 +549,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
} }
for id in cmdbuf.trackers.textures.used() { for id in cmdbuf.trackers.textures.used() {
if !texture_guard[id].life_guard.use_at(submit_index) { let texture = &texture_guard[id];
if texture.raw.is_none() {
return Err(QueueSubmitError::DestroyedTexture(id.0))?;
}
if !texture.life_guard.use_at(submit_index) {
device.temp_suspected.textures.push(id); device.temp_suspected.textures.push(id);
} }
} }
@ -604,7 +646,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
submit_index, submit_index,
fence, fence,
&device.temp_suspected, &device.temp_suspected,
device.pending_writes.temp_buffers.drain(..), device.pending_writes.temp_resources.drain(..),
); );
// finally, return the command buffers to the allocator // finally, return the command buffers to the allocator

View File

@ -35,8 +35,10 @@ pub enum Action<'a> {
backend: wgt::Backend, backend: wgt::Backend,
}, },
CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>), CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
FreeBuffer(id::BufferId),
DestroyBuffer(id::BufferId), DestroyBuffer(id::BufferId),
CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>), CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
FreeTexture(id::TextureId),
DestroyTexture(id::TextureId), DestroyTexture(id::TextureId),
CreateTextureView { CreateTextureView {
id: id::TextureViewId, id: id::TextureViewId,

View File

@ -232,6 +232,8 @@ macro_rules! span {
/// Fast hash map used internally. /// Fast hash map used internally.
type FastHashMap<K, V> = type FastHashMap<K, V> =
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>; std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
/// Fast hash set used internally.
type FastHashSet<K> = std::collections::HashSet<K, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
#[test] #[test]
fn test_default_limits() { fn test_default_limits() {

View File

@ -130,7 +130,9 @@ pub enum BufferAccessError {
#[error(transparent)] #[error(transparent)]
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("buffer is invalid")] #[error("buffer is invalid")]
InvalidBuffer, Invalid,
#[error("buffer is destroyed")]
Destroyed,
#[error("buffer is already mapped")] #[error("buffer is already mapped")]
AlreadyMapped, AlreadyMapped,
#[error(transparent)] #[error(transparent)]
@ -164,10 +166,9 @@ pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
#[derive(Debug)] #[derive(Debug)]
pub struct Buffer<B: hal::Backend> { pub struct Buffer<B: hal::Backend> {
pub(crate) raw: B::Buffer, pub(crate) raw: Option<(B::Buffer, MemoryBlock<B>)>,
pub(crate) device_id: Stored<DeviceId>, pub(crate) device_id: Stored<DeviceId>,
pub(crate) usage: wgt::BufferUsage, pub(crate) usage: wgt::BufferUsage,
pub(crate) memory: MemoryBlock<B>,
pub(crate) size: wgt::BufferAddress, pub(crate) size: wgt::BufferAddress,
pub(crate) full_range: (), pub(crate) full_range: (),
pub(crate) sync_mapped_writes: Option<hal::memory::Segment>, pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
@ -203,7 +204,7 @@ pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>>;
#[derive(Debug)] #[derive(Debug)]
pub struct Texture<B: hal::Backend> { pub struct Texture<B: hal::Backend> {
pub(crate) raw: B::Image, pub(crate) raw: Option<(B::Image, MemoryBlock<B>)>,
pub(crate) device_id: Stored<DeviceId>, pub(crate) device_id: Stored<DeviceId>,
pub(crate) usage: wgt::TextureUsage, pub(crate) usage: wgt::TextureUsage,
pub(crate) aspects: hal::format::Aspects, pub(crate) aspects: hal::format::Aspects,
@ -211,7 +212,6 @@ pub struct Texture<B: hal::Backend> {
pub(crate) kind: hal::image::Kind, pub(crate) kind: hal::image::Kind,
pub(crate) format: wgt::TextureFormat, pub(crate) format: wgt::TextureFormat,
pub(crate) full_range: TextureSelector, pub(crate) full_range: TextureSelector,
pub(crate) memory: MemoryBlock<B>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }
@ -313,7 +313,7 @@ pub struct TextureView<B: hal::Backend> {
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum CreateTextureViewError { pub enum CreateTextureViewError {
#[error("parent texture is invalid")] #[error("parent texture is invalid or destroyed")]
InvalidTexture, InvalidTexture,
#[error("not enough memory left")] #[error("not enough memory left")]
OutOfMemory, OutOfMemory,
@ -425,3 +425,11 @@ impl<B: hal::Backend> Borrow<()> for Sampler<B> {
&DUMMY_SELECTOR &DUMMY_SELECTOR
} }
} }
#[derive(Clone, Debug, Error)]
pub enum DestroyError {
#[error("resource is invalid")]
Invalid,
#[error("resource is already destroyed")]
AlreadyDestroyed,
}

View File

@ -134,10 +134,11 @@ impl PendingTransition<BufferState> {
buf: &'a resource::Buffer<B>, buf: &'a resource::Buffer<B>,
) -> hal::memory::Barrier<'a, B> { ) -> hal::memory::Barrier<'a, B> {
tracing::trace!("\tbuffer -> {:?}", self); tracing::trace!("\tbuffer -> {:?}", self);
let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed");
hal::memory::Barrier::Buffer { hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(self.usage.start) states: conv::map_buffer_state(self.usage.start)
..conv::map_buffer_state(self.usage.end), ..conv::map_buffer_state(self.usage.end),
target: &buf.raw, target,
range: hal::buffer::SubRange::WHOLE, range: hal::buffer::SubRange::WHOLE,
families: None, families: None,
} }
@ -151,11 +152,12 @@ impl PendingTransition<TextureState> {
tex: &'a resource::Texture<B>, tex: &'a resource::Texture<B>,
) -> hal::memory::Barrier<'a, B> { ) -> hal::memory::Barrier<'a, B> {
tracing::trace!("\ttexture -> {:?}", self); tracing::trace!("\ttexture -> {:?}", self);
let &(ref target, _) = tex.raw.as_ref().expect("Texture is destroyed");
let aspects = tex.aspects; let aspects = tex.aspects;
hal::memory::Barrier::Image { hal::memory::Barrier::Image {
states: conv::map_texture_state(self.usage.start, aspects) states: conv::map_texture_state(self.usage.start, aspects)
..conv::map_texture_state(self.usage.end, aspects), ..conv::map_texture_state(self.usage.end, aspects),
target: &tex.raw, target,
range: hal::image::SubresourceRange { range: hal::image::SubresourceRange {
aspects, aspects,
level_start: self.selector.levels.start, level_start: self.selector.levels.start,