diff --git a/player/src/lib.rs b/player/src/lib.rs index 5d52748bf..5ded59d70 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -139,6 +139,9 @@ impl GlobalPlay for wgc::hub::Global { self.device_maintain_ids::(device).unwrap(); self.device_create_buffer::(device, &desc, id).unwrap(); } + A::FreeBuffer(id) => { + self.buffer_destroy::(id).unwrap(); + } A::DestroyBuffer(id) => { self.buffer_drop::(id, true); } @@ -146,8 +149,11 @@ impl GlobalPlay for wgc::hub::Global { self.device_maintain_ids::(device).unwrap(); self.device_create_texture::(device, &desc, id).unwrap(); } + A::FreeTexture(id) => { + self.texture_destroy::(id).unwrap(); + } A::DestroyTexture(id) => { - self.texture_drop::(id); + self.texture_drop::(id, true); } A::CreateTextureView { id, diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 9f8748b4a..98159fbbd 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -45,7 +45,7 @@ pub enum CreateBindGroupError { Device(#[from] DeviceError), #[error("bind group layout is invalid")] InvalidLayout, - #[error("buffer {0:?} is invalid")] + #[error("buffer {0:?} is invalid or destroyed")] InvalidBuffer(BufferId), #[error("texture view {0:?} is invalid")] InvalidTextureView(TextureViewId), diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 8e4fa4038..fe22efe3a 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -125,6 +125,13 @@ pub enum CreateRenderBundleError { InvalidSampleCount(u32), } +/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid. +#[derive(Clone, Debug, Error)] +pub enum ExecutionError { + #[error("buffer {0:?} is destroyed")] + DestroyedBuffer(id::BufferId), +} + pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor>; //Note: here, `RenderBundle` is just wrapping a raw stream of render commands. @@ -151,8 +158,9 @@ impl RenderBundle { /// However the point of this function is to be lighter, since we already had /// a chance to go through the commands in `render_bundle_encoder_finish`. /// - /// Note that the function isn't expected to fail. + /// Note that the function isn't expected to fail, generally. /// All the validation has already been done by this point. + /// The only failure condition is if some of the used buffers are destroyed. pub(crate) unsafe fn execute( &self, cmd_buf: &mut B::CommandBuffer, @@ -163,7 +171,7 @@ impl RenderBundle { bind_group_guard: &Storage, id::BindGroupId>, pipeline_guard: &Storage, id::RenderPipelineId>, buffer_guard: &Storage, id::BufferId>, - ) { + ) -> Result<(), ExecutionError> { use hal::command::CommandBuffer as _; let mut offsets = self.base.dynamic_offsets.as_slice(); @@ -197,9 +205,14 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard.get(buffer_id).unwrap(); + let &(ref buffer, _) = buffer_guard + .get(buffer_id) + .unwrap() + .raw + .as_ref() + .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let view = hal::buffer::IndexBufferView { - buffer: &buffer.raw, + buffer, range: hal::buffer::SubRange { offset, size: size.map(|s| s.get()), @@ -215,12 +228,17 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard.get(buffer_id).unwrap(); + let &(ref buffer, _) = buffer_guard + .get(buffer_id) + .unwrap() + .raw + .as_ref() + .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let range = hal::buffer::SubRange { offset, size: size.map(|s| s.get()), }; - cmd_buf.bind_vertex_buffers(slot, iter::once((&buffer.raw, range))); + cmd_buf.bind_vertex_buffers(slot, iter::once((buffer, range))); } RenderCommand::SetPushConstant { stages, @@ -288,8 +306,13 @@ impl RenderBundle { count: None, indexed: false, } => { - let buffer = buffer_guard.get(buffer_id).unwrap(); - cmd_buf.draw_indirect(&buffer.raw, offset, 1, 0); + let &(ref buffer, _) = buffer_guard + .get(buffer_id) + .unwrap() + .raw + .as_ref() + .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + cmd_buf.draw_indirect(buffer, offset, 1, 0); } RenderCommand::MultiDrawIndirect { buffer_id, @@ -297,8 +320,13 @@ impl RenderBundle { count: None, indexed: true, } => { - let buffer = buffer_guard.get(buffer_id).unwrap(); - cmd_buf.draw_indexed_indirect(&buffer.raw, offset, 1, 0); + let &(ref buffer, _) = buffer_guard + .get(buffer_id) + .unwrap() + .raw + .as_ref() + .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + cmd_buf.draw_indexed_indirect(buffer, offset, 1, 0); } RenderCommand::MultiDrawIndirect { .. } | RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(), @@ -312,6 +340,8 @@ impl RenderBundle { | RenderCommand::SetScissor(_) => unreachable!(), } } + + Ok(()) } } diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 2f01cfdde..a6897c342 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -121,7 +121,7 @@ pub enum ComputePassError { BindGroupIndexOutOfRange { index: u8, max: u32 }, #[error("compute pipeline {0:?} is invalid")] InvalidPipeline(id::ComputePipelineId), - #[error("indirect buffer {0:?} is invalid")] + #[error("indirect buffer {0:?} is invalid or destroyed")] InvalidIndirectBuffer(id::BufferId), #[error(transparent)] ResourceUsageConflict(#[from] UsageConflict), @@ -412,6 +412,10 @@ impl Global { .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) .map_err(|_| ComputePassError::InvalidIndirectBuffer(buffer_id))?; check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?; + let &(ref buf_raw, _) = indirect_buffer + .raw + .as_ref() + .ok_or(ComputePassError::InvalidIndirectBuffer(buffer_id))?; state.flush_states( raw, @@ -421,7 +425,7 @@ impl Global { &*texture_guard, )?; unsafe { - raw.dispatch_indirect(&indirect_buffer.raw, offset); + raw.dispatch_indirect(buf_raw, offset); } } ComputeCommand::PushDebugGroup { color, len } => { diff --git a/wgpu-core/src/command/draw.rs b/wgpu-core/src/command/draw.rs index 2efcef469..c2dc454b0 100644 --- a/wgpu-core/src/command/draw.rs +++ b/wgpu-core/src/command/draw.rs @@ -65,6 +65,8 @@ pub enum RenderCommandError { IncompatibleReadOnlyDepthStencil, #[error("buffer {0:?} is in error {1:?}")] Buffer(id::BufferId, BufferError), + #[error("buffer {0:?} is destroyed")] + DestroyedBuffer(id::BufferId), #[error(transparent)] MissingBufferUsage(#[from] MissingBufferUsageError), #[error(transparent)] diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index fa09d5ec6..21457956c 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -6,8 +6,8 @@ use crate::{ binding_model::BindError, command::{ bind::{Binder, LayoutChange}, - BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, RenderCommand, - RenderCommandError, + BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, ExecutionError, + RenderCommand, RenderCommandError, }, conv, device::{ @@ -197,7 +197,7 @@ impl OptionalState { #[derive(Debug, Default)] struct IndexState { - bound_buffer_view: Option<(id::BufferId, Range)>, + bound_buffer_view: Option<(id::Valid, Range)>, format: IndexFormat, limit: u32, } @@ -1013,7 +1013,7 @@ impl Global { let pipeline = trackers .render_pipes .use_extend(&*pipeline_guard, pipeline_id, (), ()) - .unwrap(); + .map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))?; context .check_compatible(&pipeline.pass_context) @@ -1101,13 +1101,10 @@ impl Global { state.index.update_limit(); if let Some((buffer_id, ref range)) = state.index.bound_buffer_view { - let buffer = trackers - .buffers - .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX) - .unwrap(); + let &(ref buffer, _) = buffer_guard[buffer_id].raw.as_ref().unwrap(); let view = hal::buffer::IndexBufferView { - buffer: &buffer.raw, + buffer, range: hal::buffer::SubRange { offset: range.start, size: Some(range.end - range.start), @@ -1142,18 +1139,22 @@ impl Global { let buffer = trackers .buffers .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX) - .unwrap(); + .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?; check_buffer_usage(buffer.usage, BufferUsage::INDEX)?; + let &(ref buf_raw, _) = buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?; let end = match size { Some(s) => offset + s.get(), None => buffer.size, }; - state.index.bound_buffer_view = Some((buffer_id, offset..end)); + state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end)); state.index.update_limit(); let view = hal::buffer::IndexBufferView { - buffer: &buffer.raw, + buffer: buf_raw, range: hal::buffer::SubRange { offset, size: Some(end - offset), @@ -1174,8 +1175,13 @@ impl Global { let buffer = trackers .buffers .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX) - .unwrap(); + .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?; check_buffer_usage(buffer.usage, BufferUsage::VERTEX)?; + let &(ref buf_raw, _) = buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?; + let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len()); state .vertex @@ -1191,7 +1197,7 @@ impl Global { size: size.map(|s| s.get()), }; unsafe { - raw.bind_vertex_buffers(slot, iter::once((&buffer.raw, range))); + raw.bind_vertex_buffers(slot, iter::once((buf_raw, range))); } state.vertex.update_limits(); } @@ -1374,33 +1380,37 @@ impl Global { check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT)?; } - let buffer = trackers + let indirect_buffer = trackers .buffers .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) - .unwrap(); - check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?; + .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?; + check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?; + let &(ref indirect_raw, _) = indirect_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?; let actual_count = count.map_or(1, |c| c.get()); let begin_offset = offset; let end_offset = offset + stride * actual_count as u64; - if end_offset > buffer.size { + if end_offset > indirect_buffer.size { return Err(RenderPassError::IndirectBufferOverrun { offset, count, begin_offset, end_offset, - buffer_size: buffer.size, + buffer_size: indirect_buffer.size, }); } match indexed { false => unsafe { - raw.draw_indirect(&buffer.raw, offset, actual_count, stride as u32); + raw.draw_indirect(indirect_raw, offset, actual_count, stride as u32); }, true => unsafe { raw.draw_indexed_indirect( - &buffer.raw, + indirect_raw, offset, actual_count, stride as u32, @@ -1428,26 +1438,35 @@ impl Global { wgt::Features::MULTI_DRAW_INDIRECT_COUNT, )?; - let buffer = trackers + let indirect_buffer = trackers .buffers .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) - .unwrap(); - check_buffer_usage(buffer.usage, BufferUsage::INDIRECT)?; + .map_err(|e| RenderCommandError::Buffer(buffer_id, e))?; + check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?; + let &(ref indirect_raw, _) = indirect_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id))?; + let count_buffer = trackers .buffers .use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT) - .unwrap(); + .map_err(|e| RenderCommandError::Buffer(count_buffer_id, e))?; check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT)?; + let &(ref count_raw, _) = count_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id))?; let begin_offset = offset; let end_offset = offset + stride * max_count as u64; - if end_offset > buffer.size { + if end_offset > indirect_buffer.size { return Err(RenderPassError::IndirectBufferOverrun { offset, count: None, begin_offset, end_offset, - buffer_size: buffer.size, + buffer_size: indirect_buffer.size, }); } @@ -1464,9 +1483,9 @@ impl Global { match indexed { false => unsafe { raw.draw_indirect_count( - &buffer.raw, + indirect_raw, offset, - &count_buffer.raw, + count_raw, count_buffer_offset, max_count, stride as u32, @@ -1474,9 +1493,9 @@ impl Global { }, true => unsafe { raw.draw_indexed_indirect_count( - &buffer.raw, + indirect_raw, offset, - &count_buffer.raw, + count_raw, count_buffer_offset, max_count, stride as u32, @@ -1526,6 +1545,11 @@ impl Global { &*buffer_guard, ) } + .map_err(|e| match e { + ExecutionError::DestroyedBuffer(id) => { + RenderCommandError::DestroyedBuffer(id) + } + })?; trackers.merge_extend(&bundle.used)?; state.reset_bundle(); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 5561cc13a..a77cda52d 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -35,9 +35,9 @@ pub enum CopySide { /// Error encountered while attempting a data transfer. #[derive(Clone, Debug, Error)] pub enum TransferError { - #[error("buffer {0:?} is invalid")] + #[error("buffer {0:?} is invalid or destroyed")] InvalidBuffer(BufferId), - #[error("texture {0:?} is invalid")] + #[error("texture {0:?} is invalid or destroyed")] InvalidTexture(TextureId), #[error("Source and destination cannot be the same buffer")] SameSourceDestinationBuffer, @@ -330,6 +330,10 @@ impl Global { .buffers .use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC) .map_err(TransferError::InvalidBuffer)?; + let &(ref src_raw, _) = src_buffer + .raw + .as_ref() + .ok_or(TransferError::InvalidBuffer(source))?; if !src_buffer.usage.contains(BufferUsage::COPY_SRC) { Err(TransferError::MissingCopySrcUsageFlag)? } @@ -340,6 +344,10 @@ impl Global { .buffers .use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST) .map_err(TransferError::InvalidBuffer)?; + let &(ref dst_raw, _) = dst_buffer + .raw + .as_ref() + .ok_or(TransferError::InvalidBuffer(destination))?; if !dst_buffer.usage.contains(BufferUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)? } @@ -391,7 +399,7 @@ impl Global { hal::memory::Dependencies::empty(), barriers, ); - cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region)); + cmb_raw.copy_buffer(src_raw, dst_raw, iter::once(region)); } Ok(()) } @@ -433,6 +441,10 @@ impl Global { .buffers .use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC) .map_err(TransferError::InvalidBuffer)?; + let &(ref src_raw, _) = src_buffer + .raw + .as_ref() + .ok_or(TransferError::InvalidBuffer(source.buffer))?; if !src_buffer.usage.contains(BufferUsage::COPY_SRC) { Err(TransferError::MissingCopySrcUsageFlag)? } @@ -448,6 +460,10 @@ impl Global { TextureUse::COPY_DST, ) .unwrap(); + let &(ref dst_raw, _) = dst_texture + .raw + .as_ref() + .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.usage.contains(TextureUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)? } @@ -505,8 +521,8 @@ impl Global { src_barriers.chain(dst_barriers), ); cmb_raw.copy_buffer_to_image( - &src_buffer.raw, - &dst_texture.raw, + src_raw, + dst_raw, hal::image::Layout::TransferDstOptimal, iter::once(region), ); @@ -556,6 +572,10 @@ impl Global { TextureUse::COPY_SRC, ) .unwrap(); + let &(ref src_raw, _) = src_texture + .raw + .as_ref() + .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.usage.contains(TextureUsage::COPY_SRC) { Err(TransferError::MissingCopySrcUsageFlag)? } @@ -566,6 +586,10 @@ impl Global { .buffers .use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST) .map_err(TransferError::InvalidBuffer)?; + let &(ref dst_raw, _) = dst_buffer + .raw + .as_ref() + .ok_or(TransferError::InvalidBuffer(destination.buffer))?; if !dst_buffer.usage.contains(BufferUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)? } @@ -623,9 +647,9 @@ impl Global { src_barriers.chain(dst_barrier), ); cmb_raw.copy_image_to_buffer( - &src_texture.raw, + src_raw, hal::image::Layout::TransferSrcOptimal, - &dst_buffer.raw, + dst_raw, iter::once(region), ); } @@ -683,6 +707,10 @@ impl Global { TextureUse::COPY_SRC, ) .unwrap(); + let &(ref src_raw, _) = src_texture + .raw + .as_ref() + .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.usage.contains(TextureUsage::COPY_SRC) { Err(TransferError::MissingCopySrcUsageFlag)? } @@ -698,6 +726,10 @@ impl Global { TextureUse::COPY_DST, ) .unwrap(); + let &(ref dst_raw, _) = dst_texture + .raw + .as_ref() + .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.usage.contains(TextureUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)? } @@ -733,9 +765,9 @@ impl Global { barriers, ); cmb_raw.copy_image( - &src_texture.raw, + src_raw, hal::image::Layout::TransferSrcOptimal, - &dst_texture.raw, + dst_raw, hal::image::Layout::TransferDstOptimal, iter::once(region), ); diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index d6f4b02f7..0d0fbc5a8 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -5,7 +5,7 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - device::DeviceError, + device::{queue::TempResource, DeviceError}, hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token}, id, resource, track::TrackerSet, @@ -241,10 +241,16 @@ impl LifetimeTracker { index: SubmissionIndex, fence: B::Fence, new_suspects: &SuspectedResources, - temp_buffers: impl Iterator)>, + temp_resources: impl Iterator, MemoryBlock)>, ) { let mut last_resources = NonReferencedResources::new(); - last_resources.buffers.extend(temp_buffers); + for (res, memory) in temp_resources { + match res { + TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)), + TempResource::Image(raw) => last_resources.images.push((raw, memory)), + } + } + self.suspected_resources.buffers.extend( self.future_suspected_buffers .drain(..) @@ -256,6 +262,7 @@ impl LifetimeTracker { .map(|stored| stored.value), ); self.suspected_resources.extend(new_suspects); + self.active.alloc().init(ActiveSubmission { index, fence, @@ -336,6 +343,23 @@ impl LifetimeTracker { descriptor_allocator_mutex.lock().cleanup(device); } } + + pub fn schedule_resource_destruction( + &mut self, + temp_resource: TempResource, + memory: MemoryBlock, + last_submit_index: SubmissionIndex, + ) { + let resources = self + .active + .iter_mut() + .find(|a| a.index == last_submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources); + match temp_resource { + TempResource::Buffer(raw) => resources.buffers.push((raw, memory)), + TempResource::Image(raw) => resources.images.push((raw, memory)), + } + } } impl LifetimeTracker { @@ -432,7 +456,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .images - .push((res.raw, res.memory)); + .extend(res.raw); } } } @@ -477,7 +501,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .buffers - .push((res.raw, res.memory)); + .extend(res.raw); } } } @@ -691,7 +715,7 @@ impl LifetimeTracker { .buffers .unregister_locked(buffer_id.0, &mut *buffer_guard) { - self.free_resources.buffers.push((buf.raw, buf.memory)); + self.free_resources.buffers.extend(buf.raw); } } else { let mapping = match std::mem::replace( diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 963f0b817..7589f6676 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -164,12 +164,16 @@ fn map_buffer( sub_range: hal::buffer::SubRange, kind: HostMap, ) -> Result, resource::BufferAccessError> { + let &mut (_, ref mut memory) = buffer + .raw + .as_mut() + .ok_or(resource::BufferAccessError::Destroyed)?; let (ptr, segment, needs_sync) = { let segment = hal::memory::Segment { offset: sub_range.offset, size: sub_range.size, }; - let mapped = buffer.memory.map(raw, segment)?; + let mapped = memory.map(raw, segment)?; let mr = mapped.range(); let segment = hal::memory::Segment { offset: mr.start, @@ -180,7 +184,7 @@ fn map_buffer( buffer.sync_mapped_writes = match kind { HostMap::Read if needs_sync => unsafe { - raw.invalidate_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment))) + raw.invalidate_mapped_memory_ranges(iter::once((memory.memory(), segment))) .or(Err(DeviceError::OutOfMemory))?; None }, @@ -194,9 +198,13 @@ fn unmap_buffer( raw: &B::Device, buffer: &mut resource::Buffer, ) -> Result<(), resource::BufferAccessError> { + let &(_, ref memory) = buffer + .raw + .as_ref() + .ok_or(resource::BufferAccessError::Destroyed)?; if let Some(segment) = buffer.sync_mapped_writes.take() { unsafe { - raw.flush_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment))) + raw.flush_mapped_memory_ranges(iter::once((memory.memory(), segment))) .or(Err(DeviceError::OutOfMemory))?; } } @@ -481,13 +489,12 @@ impl Device { .map_err(DeviceError::from_bind)?; Ok(resource::Buffer { - raw: buffer, + raw: Some((buffer, memory)), device_id: Stored { value: id::Valid(self_id), ref_count: self.life_guard.add_ref(), }, usage: desc.usage, - memory, size: desc.size, full_range: (), sync_mapped_writes: None, @@ -579,7 +586,7 @@ impl Device { .map_err(DeviceError::from_bind)?; Ok(resource::Texture { - raw: image, + raw: Some((image, memory)), device_id: Stored { value: id::Valid(self_id), ref_count: self.life_guard.add_ref(), @@ -593,7 +600,6 @@ impl Device { levels: 0..desc.mip_level_count as hal::image::Level, layers: 0..kind.num_layers(), }, - memory, life_guard: LifeGuard::new(), }) } @@ -872,16 +878,20 @@ impl Device { } pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer) { - unsafe { - self.mem_allocator.lock().free(&self.raw, buffer.memory); - self.raw.destroy_buffer(buffer.raw); + if let Some((raw, memory)) = buffer.raw { + unsafe { + self.mem_allocator.lock().free(&self.raw, memory); + self.raw.destroy_buffer(raw); + } } } pub(crate) fn destroy_texture(&self, texture: resource::Texture) { - unsafe { - self.mem_allocator.lock().free(&self.raw, texture.memory); - self.raw.destroy_image(texture.raw); + if let Some((raw, memory)) = texture.raw { + unsafe { + self.mem_allocator.lock().free(&self.raw, memory); + self.raw.destroy_image(raw); + } } } @@ -1032,7 +1042,7 @@ impl Global { resource::BufferUse::MAP_WRITE } else { // buffer needs staging area for initialization only - let mut stage = device.create_buffer( + let stage = device.create_buffer( device_id, &wgt::BufferDescriptor { label: Some(Cow::Borrowed("")), @@ -1042,15 +1052,15 @@ impl Global { }, gfx_memory::Kind::Linear, )?; - let mapped = stage - .memory + let (stage_buffer, mut stage_memory) = stage.raw.unwrap(); + let mapped = stage_memory .map(&device.raw, hal::memory::Segment::ALL) .map_err(resource::BufferAccessError::from)?; buffer.map_state = resource::BufferMapState::Init { ptr: mapped.ptr(), needs_flush: !mapped.is_coherent(), - stage_buffer: stage.raw, - stage_memory: stage.memory, + stage_buffer, + stage_memory, }; resource::BufferUse::COPY_DST }; @@ -1118,7 +1128,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let mut buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::InvalidBuffer)?; + .map_err(|_| resource::BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?; //assert!(buffer isn't used by the GPU); @@ -1172,7 +1182,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let mut buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::InvalidBuffer)?; + .map_err(|_| resource::BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?; //assert!(buffer isn't used by the GPU); @@ -1201,7 +1211,53 @@ impl Global { .register_error(id_in, &mut Token::root()) } - pub fn buffer_drop(&self, buffer_id: id::BufferId, now: bool) { + pub fn buffer_destroy( + &self, + buffer_id: id::BufferId, + ) -> Result<(), resource::DestroyError> { + span!(_guard, INFO, "Buffer::destroy"); + + let hub = B::hub(self); + let mut token = Token::root(); + + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); + + tracing::info!("Buffer {:?} is destroyed", buffer_id); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &mut device_guard[buffer.device_id.value]; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + } + + let (raw, memory) = buffer + .raw + .take() + .ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Buffer(raw); + + if device.pending_writes.dst_buffers.contains(&buffer_id) { + device.pending_writes.temp_resources.push((temp, memory)); + } else { + let last_submit_index = buffer.life_guard.submission_index.load(Ordering::Acquire); + drop(buffer_guard); + device.lock_life(&mut token).schedule_resource_destruction( + temp, + memory, + last_submit_index, + ); + } + + Ok(()) + } + + pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { span!(_guard, INFO, "Buffer::drop"); let hub = B::hub(self); @@ -1226,25 +1282,26 @@ impl Global { let (device_guard, mut token) = hub.devices.read(&mut token); let device = &device_guard[device_id]; - if now { + let mut life_lock = device_guard[device_id].lock_life(&mut token); + + if device.pending_writes.dst_buffers.contains(&buffer_id) { + life_lock.future_suspected_buffers.push(Stored { + value: id::Valid(buffer_id), + ref_count, + }); + } else { drop(ref_count); - device - .lock_life(&mut token) + life_lock .suspected_resources .buffers .push(id::Valid(buffer_id)); + } + + if wait { match device.wait_for_submit(last_submit_index, &mut token) { Ok(()) => (), Err(e) => tracing::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), } - } else { - device - .lock_life(&mut token) - .future_suspected_buffers - .push(Stored { - value: id::Valid(buffer_id), - ref_count, - }); } } @@ -1301,19 +1358,67 @@ impl Global { .register_error(id_in, &mut Token::root()) } - pub fn texture_drop(&self, texture_id: id::TextureId) { + pub fn texture_destroy( + &self, + texture_id: id::TextureId, + ) -> Result<(), resource::DestroyError> { + span!(_guard, INFO, "Texture::destroy"); + + let hub = B::hub(self); + let mut token = Token::root(); + + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); + + tracing::info!("Buffer {:?} is destroyed", texture_id); + let (mut texture_guard, _) = hub.textures.write(&mut token); + let texture = texture_guard + .get_mut(texture_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &mut device_guard[texture.device_id.value]; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeTexture(texture_id)); + } + + let (raw, memory) = texture + .raw + .take() + .ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Image(raw); + + if device.pending_writes.dst_textures.contains(&texture_id) { + device.pending_writes.temp_resources.push((temp, memory)); + } else { + let last_submit_index = texture.life_guard.submission_index.load(Ordering::Acquire); + drop(texture_guard); + device.lock_life(&mut token).schedule_resource_destruction( + temp, + memory, + last_submit_index, + ); + } + + Ok(()) + } + + pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { span!(_guard, INFO, "Texture::drop"); let hub = B::hub(self); let mut token = Token::root(); - let (ref_count, device_id) = { + let (ref_count, last_submit_index, device_id) = { let (mut texture_guard, _) = hub.textures.write(&mut token); match texture_guard.get_mut(texture_id) { - Ok(texture) => ( - texture.life_guard.ref_count.take().unwrap(), - texture.device_id.value, - ), + Ok(texture) => { + let ref_count = texture.life_guard.ref_count.take().unwrap(); + let last_submit_index = + texture.life_guard.submission_index.load(Ordering::Acquire); + (ref_count, last_submit_index, texture.device_id.value) + } Err(InvalidId) => { hub.textures .unregister_locked(texture_id, &mut *texture_guard); @@ -1323,13 +1428,28 @@ impl Global { }; let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .future_suspected_textures - .push(Stored { + let device = &device_guard[device_id]; + let mut life_lock = device_guard[device_id].lock_life(&mut token); + + if device.pending_writes.dst_textures.contains(&texture_id) { + life_lock.future_suspected_textures.push(Stored { value: id::Valid(texture_id), ref_count, }); + } else { + drop(ref_count); + life_lock + .suspected_resources + .textures + .push(id::Valid(texture_id)); + } + + if wait { + match device.wait_for_submit(last_submit_index, &mut token) { + Ok(()) => (), + Err(e) => tracing::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + } + } } pub fn texture_create_view( @@ -1348,6 +1468,10 @@ impl Global { let texture = texture_guard .get(texture_id) .map_err(|_| resource::CreateTextureViewError::InvalidTexture)?; + let &(ref texture_raw, _) = texture + .raw + .as_ref() + .ok_or(resource::CreateTextureViewError::InvalidTexture)?; let device = &device_guard[texture.device_id.value]; let view_kind = match desc.dimension { @@ -1404,7 +1528,7 @@ impl Global { device .raw .create_image_view( - &texture.raw, + texture_raw, view_kind, conv::map_texture_format(format, device.private_features), hal::format::Swizzle::NO, @@ -2006,8 +2130,13 @@ impl Global { let buffer = used .buffers .use_extend(&*buffer_guard, bb.buffer_id, (), internal_use) - .unwrap(); + .map_err(|_| CreateBindGroupError::InvalidBuffer(bb.buffer_id))?; check_buffer_usage(buffer.usage, pub_usage)?; + let &(ref buffer_raw, _) = buffer + .raw + .as_ref() + .ok_or(CreateBindGroupError::InvalidBuffer(bb.buffer_id))?; + let (bind_size, bind_end) = match bb.size { Some(size) => { let end = bb.offset + size.get(); @@ -2049,7 +2178,7 @@ impl Global { offset: bb.offset, size: Some(bind_size), }; - SmallVec::from([hal::pso::Descriptor::Buffer(&buffer.raw, sub_range)]) + SmallVec::from([hal::pso::Descriptor::Buffer(buffer_raw, sub_range)]) } Br::Sampler(id) => { match decl.ty { @@ -2057,7 +2186,7 @@ impl Global { let sampler = used .samplers .use_extend(&*sampler_guard, id, (), ()) - .unwrap(); + .map_err(|_| CreateBindGroupError::InvalidSampler(id))?; // Check the actual sampler to also (not) be a comparison sampler if sampler.comparison != comparison { @@ -2079,7 +2208,7 @@ impl Global { let view = used .views .use_extend(&*texture_view_guard, id, (), ()) - .unwrap(); + .map_err(|_| CreateBindGroupError::InvalidTextureView(id))?; let (pub_usage, internal_use) = match decl.ty { wgt::BindingType::SampledTexture { .. } => ( wgt::TextureUsage::SAMPLED, @@ -2168,7 +2297,7 @@ impl Global { let view = used .views .use_extend(&*texture_view_guard, id, (), ()) - .unwrap(); + .map_err(|_| CreateBindGroupError::InvalidTextureView(id))?; match view.inner { resource::TextureViewInner::Native { ref raw, @@ -3598,7 +3727,7 @@ impl Global { let (mut buffer_guard, _) = hub.buffers.write(&mut token); let buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::InvalidBuffer)?; + .map_err(|_| resource::BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, pub_usage)?; buffer.map_state = match buffer.map_state { @@ -3653,7 +3782,7 @@ impl Global { let (buffer_guard, _) = hub.buffers.read(&mut token); let buffer = buffer_guard .get(buffer_id) - .map_err(|_| resource::BufferAccessError::InvalidBuffer)?; + .map_err(|_| resource::BufferAccessError::Invalid)?; match buffer.map_state { resource::BufferMapState::Init { ptr, .. } @@ -3679,7 +3808,7 @@ impl Global { let (mut buffer_guard, _) = hub.buffers.write(&mut token); let buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::InvalidBuffer)?; + .map_err(|_| resource::BufferAccessError::Invalid)?; let device = &mut device_guard[buffer.device_id.value]; tracing::debug!("Buffer {:?} map state -> Idle", buffer_id); @@ -3719,6 +3848,11 @@ impl Global { }; } + let &(ref buf_raw, _) = buffer + .raw + .as_ref() + .ok_or(resource::BufferAccessError::Destroyed)?; + buffer.life_guard.use_at(device.active_submission_index + 1); let region = hal::command::BufferCopy { src: 0, @@ -3733,7 +3867,7 @@ impl Global { }; let transition_dst = hal::memory::Barrier::Buffer { states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE, - target: &buffer.raw, + target: buf_raw, range: hal::buffer::SubRange::WHOLE, families: None, }; @@ -3745,12 +3879,12 @@ impl Global { iter::once(transition_src).chain(iter::once(transition_dst)), ); if buffer.size > 0 { - cmdbuf.copy_buffer(&stage_buffer, &buffer.raw, iter::once(region)); + cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region)); } } device .pending_writes - .consume_temp(stage_buffer, stage_memory); + .consume_temp(queue::TempResource::Buffer(stage_buffer), stage_memory); } resource::BufferMapState::Idle => { return Err(resource::BufferAccessError::NotMapped); diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 7056e990d..ac7b3429b 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -14,7 +14,7 @@ use crate::{ hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, id, resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse}, - span, + span, FastHashSet, }; use gfx_memory::{Block, Heaps, MemoryBlock}; @@ -29,17 +29,27 @@ struct StagingData { cmdbuf: B::CommandBuffer, } -#[derive(Debug, Default)] +#[derive(Debug)] +pub enum TempResource { + Buffer(B::Buffer), + Image(B::Image), +} + +#[derive(Debug)] pub(crate) struct PendingWrites { pub command_buffer: Option, - pub temp_buffers: Vec<(B::Buffer, MemoryBlock)>, + pub temp_resources: Vec<(TempResource, MemoryBlock)>, + pub dst_buffers: FastHashSet, + pub dst_textures: FastHashSet, } impl PendingWrites { pub fn new() -> Self { Self { command_buffer: None, - temp_buffers: Vec::new(), + temp_resources: Vec::new(), + dst_buffers: FastHashSet::default(), + dst_textures: FastHashSet::default(), } } @@ -52,22 +62,38 @@ impl PendingWrites { if let Some(raw) = self.command_buffer { cmd_allocator.discard_internal(raw); } - for (buffer, memory) in self.temp_buffers { + for (resource, memory) in self.temp_resources { mem_allocator.free(device, memory); - unsafe { - device.destroy_buffer(buffer); + match resource { + TempResource::Buffer(buffer) => unsafe { + device.destroy_buffer(buffer); + }, + TempResource::Image(image) => unsafe { + device.destroy_image(image); + }, } } } - pub fn consume_temp(&mut self, buffer: B::Buffer, memory: MemoryBlock) { - self.temp_buffers.push((buffer, memory)); + pub fn consume_temp(&mut self, resource: TempResource, memory: MemoryBlock) { + self.temp_resources.push((resource, memory)); } fn consume(&mut self, stage: StagingData) { - self.temp_buffers.push((stage.buffer, stage.memory)); + self.temp_resources + .push((TempResource::Buffer(stage.buffer), stage.memory)); self.command_buffer = Some(stage.cmdbuf); } + + #[must_use] + fn finish(&mut self) -> Option { + self.dst_buffers.clear(); + self.dst_textures.clear(); + self.command_buffer.take().map(|mut cmd_buf| unsafe { + cmd_buf.finish(); + cmd_buf + }) + } } impl super::Device { @@ -143,8 +169,12 @@ pub enum QueueSubmitError { Queue(#[from] DeviceError), #[error("command buffer {0:?} is invalid")] InvalidCommandBuffer(id::CommandBufferId), + #[error("buffer {0:?} is destroyed")] + DestroyedBuffer(id::BufferId), + #[error("texture {0:?} is destroyed")] + DestroyedTexture(id::TextureId), #[error(transparent)] - BufferAccess(#[from] BufferAccessError), + Unmap(#[from] BufferAccessError), #[error("swap chain output was dropped before the command buffer got submitted")] SwapChainOutputDropped, #[error("GPU got stuck :(")] @@ -209,6 +239,10 @@ impl Global { .buffers .use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST) .map_err(TransferError::InvalidBuffer)?; + let &(ref dst_raw, _) = dst + .raw + .as_ref() + .ok_or(TransferError::InvalidBuffer(buffer_id))?; if !dst.usage.contains(wgt::BufferUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)?; } @@ -248,10 +282,11 @@ impl Global { ); stage .cmdbuf - .copy_buffer(&stage.buffer, &dst.raw, iter::once(region)); + .copy_buffer(&stage.buffer, dst_raw, iter::once(region)); } device.pending_writes.consume(stage); + device.pending_writes.dst_buffers.insert(buffer_id); Ok(()) } @@ -336,6 +371,10 @@ impl Global { TextureUse::COPY_DST, ) .unwrap(); + let &(ref dst_raw, _) = dst + .raw + .as_ref() + .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst.usage.contains(wgt::TextureUsage::COPY_DST) { Err(TransferError::MissingCopyDstUsageFlag)? @@ -403,13 +442,17 @@ impl Global { ); stage.cmdbuf.copy_buffer_to_image( &stage.buffer, - &dst.raw, + dst_raw, hal::image::Layout::TransferDstOptimal, iter::once(region), ); } device.pending_writes.consume(stage); + device + .pending_writes + .dst_textures + .insert(destination.texture); Ok(()) } @@ -429,15 +472,7 @@ impl Global { let device = device_guard .get_mut(queue_id) .map_err(|_| DeviceError::Invalid)?; - let pending_write_command_buffer = - device - .pending_writes - .command_buffer - .take() - .map(|mut comb_raw| unsafe { - comb_raw.finish(); - comb_raw - }); + let pending_write_command_buffer = device.pending_writes.finish(); device.temp_suspected.clear(); device.active_submission_index += 1; let submit_index = device.active_submission_index; @@ -497,6 +532,9 @@ impl Global { // update submission IDs for id in cmdbuf.trackers.buffers.used() { let buffer = &mut buffer_guard[id]; + if buffer.raw.is_none() { + return Err(QueueSubmitError::DestroyedBuffer(id.0))?; + } if !buffer.life_guard.use_at(submit_index) { if let BufferMapState::Active { .. } = buffer.map_state { tracing::warn!("Dropped buffer has a pending mapping."); @@ -511,7 +549,11 @@ impl Global { } } for id in cmdbuf.trackers.textures.used() { - if !texture_guard[id].life_guard.use_at(submit_index) { + let texture = &texture_guard[id]; + if texture.raw.is_none() { + return Err(QueueSubmitError::DestroyedTexture(id.0))?; + } + if !texture.life_guard.use_at(submit_index) { device.temp_suspected.textures.push(id); } } @@ -604,7 +646,7 @@ impl Global { submit_index, fence, &device.temp_suspected, - device.pending_writes.temp_buffers.drain(..), + device.pending_writes.temp_resources.drain(..), ); // finally, return the command buffers to the allocator diff --git a/wgpu-core/src/device/trace.rs b/wgpu-core/src/device/trace.rs index d41e8c9e6..d64174702 100644 --- a/wgpu-core/src/device/trace.rs +++ b/wgpu-core/src/device/trace.rs @@ -35,8 +35,10 @@ pub enum Action<'a> { backend: wgt::Backend, }, CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>), + FreeBuffer(id::BufferId), DestroyBuffer(id::BufferId), CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>), + FreeTexture(id::TextureId), DestroyTexture(id::TextureId), CreateTextureView { id: id::TextureViewId, diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index 63df2334a..0ec245a31 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -232,6 +232,8 @@ macro_rules! span { /// Fast hash map used internally. type FastHashMap = std::collections::HashMap>; +/// Fast hash set used internally. +type FastHashSet = std::collections::HashSet>; #[test] fn test_default_limits() { diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index f88fa2211..5f4e2427e 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -130,7 +130,9 @@ pub enum BufferAccessError { #[error(transparent)] Device(#[from] DeviceError), #[error("buffer is invalid")] - InvalidBuffer, + Invalid, + #[error("buffer is destroyed")] + Destroyed, #[error("buffer is already mapped")] AlreadyMapped, #[error(transparent)] @@ -164,10 +166,9 @@ pub type BufferDescriptor<'a> = wgt::BufferDescriptor>; #[derive(Debug)] pub struct Buffer { - pub(crate) raw: B::Buffer, + pub(crate) raw: Option<(B::Buffer, MemoryBlock)>, pub(crate) device_id: Stored, pub(crate) usage: wgt::BufferUsage, - pub(crate) memory: MemoryBlock, pub(crate) size: wgt::BufferAddress, pub(crate) full_range: (), pub(crate) sync_mapped_writes: Option, @@ -203,7 +204,7 @@ pub type TextureDescriptor<'a> = wgt::TextureDescriptor>; #[derive(Debug)] pub struct Texture { - pub(crate) raw: B::Image, + pub(crate) raw: Option<(B::Image, MemoryBlock)>, pub(crate) device_id: Stored, pub(crate) usage: wgt::TextureUsage, pub(crate) aspects: hal::format::Aspects, @@ -211,7 +212,6 @@ pub struct Texture { pub(crate) kind: hal::image::Kind, pub(crate) format: wgt::TextureFormat, pub(crate) full_range: TextureSelector, - pub(crate) memory: MemoryBlock, pub(crate) life_guard: LifeGuard, } @@ -313,7 +313,7 @@ pub struct TextureView { #[derive(Clone, Debug, Error)] pub enum CreateTextureViewError { - #[error("parent texture is invalid")] + #[error("parent texture is invalid or destroyed")] InvalidTexture, #[error("not enough memory left")] OutOfMemory, @@ -425,3 +425,11 @@ impl Borrow<()> for Sampler { &DUMMY_SELECTOR } } + +#[derive(Clone, Debug, Error)] +pub enum DestroyError { + #[error("resource is invalid")] + Invalid, + #[error("resource is already destroyed")] + AlreadyDestroyed, +} diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 5c2db99e1..e12f28aae 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -134,10 +134,11 @@ impl PendingTransition { buf: &'a resource::Buffer, ) -> hal::memory::Barrier<'a, B> { tracing::trace!("\tbuffer -> {:?}", self); + let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed"); hal::memory::Barrier::Buffer { states: conv::map_buffer_state(self.usage.start) ..conv::map_buffer_state(self.usage.end), - target: &buf.raw, + target, range: hal::buffer::SubRange::WHOLE, families: None, } @@ -151,11 +152,12 @@ impl PendingTransition { tex: &'a resource::Texture, ) -> hal::memory::Barrier<'a, B> { tracing::trace!("\ttexture -> {:?}", self); + let &(ref target, _) = tex.raw.as_ref().expect("Texture is destroyed"); let aspects = tex.aspects; hal::memory::Barrier::Image { states: conv::map_texture_state(self.usage.start, aspects) ..conv::map_texture_state(self.usage.end, aspects), - target: &tex.raw, + target, range: hal::image::SubresourceRange { aspects, level_start: self.selector.levels.start,