diff --git a/CHANGELOG.md b/CHANGELOG.md index 89c9c4e09..95d4fde68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ the same every time it is rendered, we now warn if it is missing. ### Bug Fixes #### General +- Free `StagingBuffers` even when an error occurs in the operation that consumes them. By @jimblandy in [#2961](https://github.com/gfx-rs/wgpu/pull/2961) - Improve the validation and error reporting of buffer mappings by @nical in [#2848](https://github.com/gfx-rs/wgpu/pull/2848) - Fix compilation errors when using wgpu-core in isolation while targetting `wasm32-unknown-unknown` by @Seamooo in [#2922](https://github.com/gfx-rs/wgpu/pull/2922) - Fixed opening of RenderDoc library by @abuffseagull in [#2930](https://github.com/gfx-rs/wgpu/pull/2930) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 102f59fda..748f1057d 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -243,30 +243,28 @@ impl PendingWrites { } } -impl super::Device { - fn prepare_staging_buffer( - &mut self, - size: wgt::BufferAddress, - ) -> Result<(StagingBuffer, *mut u8), DeviceError> { - profiling::scope!("prepare_staging_buffer"); - let stage_desc = hal::BufferDescriptor { - label: Some("(wgpu internal) Staging"), - size, - usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC, - memory_flags: hal::MemoryFlags::TRANSIENT, - }; +fn prepare_staging_buffer( + device: &mut A::Device, + size: wgt::BufferAddress, +) -> Result<(StagingBuffer, *mut u8), DeviceError> { + profiling::scope!("prepare_staging_buffer"); + let stage_desc = hal::BufferDescriptor { + label: Some("(wgpu internal) Staging"), + size, + usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC, + memory_flags: hal::MemoryFlags::TRANSIENT, + }; - let buffer = unsafe { self.raw.create_buffer(&stage_desc)? }; - let mapping = unsafe { self.raw.map_buffer(&buffer, 0..size) }?; + let buffer = unsafe { device.create_buffer(&stage_desc)? }; + let mapping = unsafe { device.map_buffer(&buffer, 0..size) }?; - let staging_buffer = StagingBuffer { - raw: buffer, - size, - is_coherent: mapping.is_coherent, - }; + let staging_buffer = StagingBuffer { + raw: buffer, + size, + is_coherent: mapping.is_coherent, + }; - Ok((staging_buffer, mapping.ptr.as_ptr())) - } + Ok((staging_buffer, mapping.ptr.as_ptr())) } impl StagingBuffer { @@ -350,21 +348,31 @@ impl Global { return Ok(()); } - let (staging_buffer, staging_buffer_ptr) = device.prepare_staging_buffer(data_size)?; + // Platform validation requires that the staging buffer always be + // freed, even if an error occurs. All paths from here must call + // `device.pending_writes.consume`. + let (staging_buffer, staging_buffer_ptr) = + prepare_staging_buffer(&mut device.raw, data_size)?; - unsafe { + if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); - staging_buffer.flush(&device.raw)?; - }; + staging_buffer.flush(&device.raw) + } { + device.pending_writes.consume(staging_buffer); + return Err(flush_error.into()); + } - self.queue_write_staging_buffer_impl( + let result = self.queue_write_staging_buffer_impl( device, device_token, - staging_buffer, + &staging_buffer, buffer_id, buffer_offset, - ) + ); + + device.pending_writes.consume(staging_buffer); + result } pub fn queue_create_staging_buffer( @@ -382,7 +390,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let (staging_buffer, staging_buffer_ptr) = - device.prepare_staging_buffer(buffer_size.get())?; + prepare_staging_buffer(&mut device.raw, buffer_size.get())?; let fid = hub.staging_buffers.prepare(id_in); let id = fid.assign(staging_buffer, device_token); @@ -413,15 +421,25 @@ impl Global { .0 .ok_or(TransferError::InvalidBuffer(buffer_id))?; - unsafe { staging_buffer.flush(&device.raw)? }; + // At this point, we have taken ownership of the staging_buffer from the + // user. Platform validation requires that the staging buffer always + // be freed, even if an error occurs. All paths from here must call + // `device.pending_writes.consume`. + if let Err(flush_error) = unsafe { staging_buffer.flush(&device.raw) } { + device.pending_writes.consume(staging_buffer); + return Err(flush_error.into()); + } - self.queue_write_staging_buffer_impl( + let result = self.queue_write_staging_buffer_impl( device, device_token, - staging_buffer, + &staging_buffer, buffer_id, buffer_offset, - ) + ); + + device.pending_writes.consume(staging_buffer); + result } pub fn queue_validate_write_buffer( @@ -481,7 +499,7 @@ impl Global { &self, device: &mut super::Device, device_token: &mut Token>, - staging_buffer: StagingBuffer, + staging_buffer: &StagingBuffer, buffer_id: id::BufferId, buffer_offset: u64, ) -> Result<(), QueueWriteError> { @@ -520,7 +538,6 @@ impl Global { encoder.copy_buffer_to_buffer(&staging_buffer.raw, dst_raw, region.into_iter()); } - device.pending_writes.consume(staging_buffer); device.pending_writes.dst_buffers.insert(buffer_id); // Ensure the overwritten bytes are marked as initialized so they don't need to be nulled prior to mapping or binding. @@ -613,7 +630,6 @@ impl Global { let block_rows_in_copy = (size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; - let (staging_buffer, staging_buffer_ptr) = device.prepare_staging_buffer(stage_size)?; let dst = texture_guard.get_mut(destination.texture).unwrap(); if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) { @@ -676,12 +692,23 @@ impl Global { validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; dst.life_guard.use_at(device.active_submission_index + 1); + let dst_raw = dst + .inner + .as_raw() + .ok_or(TransferError::InvalidTexture(destination.texture))?; + let bytes_per_row = if let Some(bytes_per_row) = data_layout.bytes_per_row { bytes_per_row.get() } else { width_blocks * format_desc.block_size as u32 }; + // Platform validation requires that the staging buffer always be + // freed, even if an error occurs. All paths from here must call + // `device.pending_writes.consume`. + let (staging_buffer, staging_buffer_ptr) = + prepare_staging_buffer(&mut device.raw, stage_size)?; + if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); // Fast path if the data is already being aligned optimally. @@ -715,7 +742,10 @@ impl Global { } } - unsafe { staging_buffer.flush(&device.raw) }?; + if let Err(e) = unsafe { staging_buffer.flush(&device.raw) } { + device.pending_writes.consume(staging_buffer); + return Err(e.into()); + } let regions = (0..array_layer_count).map(|rel_array_layer| { let mut texture_base = dst_base.clone(); @@ -737,11 +767,6 @@ impl Global { usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }; - let dst_raw = dst - .inner - .as_raw() - .ok_or(TransferError::InvalidTexture(destination.texture))?; - unsafe { encoder .transition_textures(transition.map(|pending| pending.into_hal(dst)).into_iter()); diff --git a/wgpu/tests/buffer_copy.rs b/wgpu/tests/buffer_copy.rs new file mode 100644 index 000000000..56d584edb --- /dev/null +++ b/wgpu/tests/buffer_copy.rs @@ -0,0 +1,42 @@ +//! Tests for buffer copy validation. + +use wgt::BufferAddress; + +use crate::common::{initialize_test, TestParameters}; + +#[test] +fn copy_alignment() { + fn try_copy(offset: BufferAddress, size: BufferAddress, should_panic: bool) { + let mut parameters = TestParameters::default(); + if should_panic { + parameters = parameters.failure(); + } + + initialize_test(parameters, |ctx| { + let buffer = ctx.device.create_buffer(&BUFFER_DESCRIPTOR); + let data = vec![255; size as usize]; + ctx.queue.write_buffer(&buffer, offset, &data); + }); + } + + try_copy(0, 0, false); + try_copy(4, 16 + 1, true); + try_copy(64, 20 + 2, true); + try_copy(256, 44 + 3, true); + try_copy(1024, 8 + 4, false); + + try_copy(0, 4, false); + try_copy(4 + 1, 8, true); + try_copy(64 + 2, 12, true); + try_copy(256 + 3, 16, true); + try_copy(1024 + 4, 4, false); +} + +const BUFFER_SIZE: BufferAddress = 1234; + +const BUFFER_DESCRIPTOR: wgpu::BufferDescriptor = wgpu::BufferDescriptor { + label: None, + size: BUFFER_SIZE, + usage: wgpu::BufferUsages::COPY_SRC.union(wgpu::BufferUsages::COPY_DST), + mapped_at_creation: false, +}; diff --git a/wgpu/tests/root.rs b/wgpu/tests/root.rs index d034418dc..c8ad63e17 100644 --- a/wgpu/tests/root.rs +++ b/wgpu/tests/root.rs @@ -1,6 +1,7 @@ // All files containing tests mod common; +mod buffer_copy; mod clear_texture; mod device; mod example_wgsl; @@ -8,5 +9,6 @@ mod instance; mod poll; mod resource_descriptor_accessor; mod shader_primitive_index; +mod texture_bounds; mod vertex_indices; mod zero_init_texture_after_discard; diff --git a/wgpu/tests/texture_bounds.rs b/wgpu/tests/texture_bounds.rs new file mode 100644 index 000000000..37fc3d82e --- /dev/null +++ b/wgpu/tests/texture_bounds.rs @@ -0,0 +1,61 @@ +//! Tests for texture copy bounds checks. + +use crate::common::{initialize_test, TestParameters}; +use std::num::NonZeroU32; + +#[test] +fn bad_copy_origin() { + fn try_origin(origin: wgpu::Origin3d, should_panic: bool) { + let mut parameters = TestParameters::default(); + if should_panic { + parameters = parameters.failure(); + } + + initialize_test(parameters, |ctx| { + let texture = ctx.device.create_texture(&TEXTURE_DESCRIPTOR); + let data = vec![255; BUFFER_SIZE as usize]; + ctx.queue.write_texture( + wgpu::ImageCopyTexture { + texture: &texture, + mip_level: 0, + origin, + aspect: wgpu::TextureAspect::All, + }, + &data, + BUFFER_COPY_LAYOUT, + TEXTURE_SIZE, + ); + }); + } + + try_origin(wgpu::Origin3d { x: 0, y: 0, z: 0 }, false); + try_origin(wgpu::Origin3d { x: 1, y: 0, z: 0 }, true); + try_origin(wgpu::Origin3d { x: 0, y: 1, z: 0 }, true); + try_origin(wgpu::Origin3d { x: 0, y: 0, z: 1 }, true); +} + +const TEXTURE_SIZE: wgpu::Extent3d = wgpu::Extent3d { + width: 64, + height: 64, + depth_or_array_layers: 1, +}; + +const TEXTURE_DESCRIPTOR: wgpu::TextureDescriptor = wgpu::TextureDescriptor { + label: Some("CopyOrigin"), + size: TEXTURE_SIZE, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8UnormSrgb, + usage: wgpu::TextureUsages::COPY_DST.union(wgpu::TextureUsages::COPY_SRC), +}; + +const BYTES_PER_PIXEL: u32 = 4; + +const BUFFER_SIZE: u32 = TEXTURE_SIZE.width * TEXTURE_SIZE.height * BYTES_PER_PIXEL; + +const BUFFER_COPY_LAYOUT: wgpu::ImageDataLayout = wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: NonZeroU32::new(TEXTURE_SIZE.width * BYTES_PER_PIXEL), + rows_per_image: None, +};