[wgpu-hal] remove return type from Device.unmap_buffer()

It's already documented that to unmap a buffer it has to have been mapped.
Vulkan was the only backend that was returning an OOM on missing `Buffer.block` but `Buffer.map_buffer` already returns an error in this case.
This commit is contained in:
teoxoy 2024-07-11 16:45:40 +02:00 committed by Teodor Tanasoaia
parent 5e2df1406d
commit 26f65ddffd
11 changed files with 25 additions and 57 deletions

View File

@ -321,10 +321,7 @@ impl Global {
.raw()
.flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64));
}
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
device.raw().unmap_buffer(raw_buf);
}
Ok(())
@ -370,10 +367,7 @@ impl Global {
);
}
ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len());
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
device.raw().unmap_buffer(raw_buf);
}
Ok(())

View File

@ -410,17 +410,14 @@ impl Global {
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if let Err(flush_error) = unsafe {
unsafe {
profiling::scope!("copy");
ptr::copy_nonoverlapping(
data.as_ptr(),
staging_buffer_ptr.as_ptr(),
data_size.get() as usize,
);
staging_buffer.flush()
} {
pending_writes.consume(staging_buffer);
return Err(flush_error.into());
staging_buffer.flush();
}
let result = self.queue_write_staging_buffer_impl(
@ -492,10 +489,7 @@ impl Global {
// user. Platform validation requires that the staging buffer always
// be freed, even if an error occurs. All paths from here must call
// `device.pending_writes.consume`.
if let Err(flush_error) = unsafe { staging_buffer.flush() } {
pending_writes.consume(staging_buffer);
return Err(flush_error.into());
}
unsafe { staging_buffer.flush() };
let result = self.queue_write_staging_buffer_impl(
&queue,
@ -823,10 +817,7 @@ impl Global {
}
}
if let Err(e) = unsafe { staging_buffer.flush() } {
pending_writes.consume(staging_buffer);
return Err(e.into());
}
unsafe { staging_buffer.flush() };
let regions = (0..array_layer_count).map(|rel_array_layer| {
let mut texture_base = dst_base.clone();

View File

@ -672,10 +672,7 @@ impl<A: HalApi> Buffer<A> {
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if let Err(e) = unsafe { staging_buffer.flush() } {
pending_writes.consume(staging_buffer);
return Err(e.into());
}
unsafe { staging_buffer.flush() };
self.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let region = wgt::BufferSize::new(self.size).map(|size| hal::BufferCopy {
@ -730,12 +727,7 @@ impl<A: HalApi> Buffer<A> {
}
let _ = (ptr, range);
}
unsafe {
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?
};
unsafe { device.raw().unmap_buffer(raw_buf) };
}
}
Ok(None)
@ -899,14 +891,13 @@ impl<A: HalApi> StagingBuffer<A> {
&self.raw
}
pub(crate) unsafe fn flush(&self) -> Result<(), DeviceError> {
pub(crate) unsafe fn flush(&self) {
use hal::Device;
let device = self.device.raw();
if !self.is_coherent {
unsafe { device.flush_mapped_ranges(self.raw(), iter::once(0..self.size.get())) };
}
unsafe { device.unmap_buffer(self.raw())? };
Ok(())
unsafe { device.unmap_buffer(self.raw()) };
}
}

View File

@ -301,7 +301,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
texture_data.len(),
);
device.unmap_buffer(&staging_buffer).unwrap();
device.unmap_buffer(&staging_buffer);
assert!(mapping.is_coherent);
}
@ -410,7 +410,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
mem::size_of::<Globals>(),
);
device.unmap_buffer(&buffer).unwrap();
device.unmap_buffer(&buffer);
assert!(mapping.is_coherent);
buffer
};
@ -647,7 +647,7 @@ impl<A: hal::Api> Example<A> {
size,
);
assert!(mapping.is_coherent);
self.device.unmap_buffer(&self.local_buffer).unwrap();
self.device.unmap_buffer(&self.local_buffer);
}
}

View File

@ -413,7 +413,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
vertices_size_in_bytes,
);
device.unmap_buffer(&vertices_buffer).unwrap();
device.unmap_buffer(&vertices_buffer);
assert!(mapping.is_coherent);
vertices_buffer
@ -438,7 +438,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
indices_size_in_bytes,
);
device.unmap_buffer(&indices_buffer).unwrap();
device.unmap_buffer(&indices_buffer);
assert!(mapping.is_coherent);
indices_buffer
@ -537,7 +537,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
uniforms_size,
);
device.unmap_buffer(&uniform_buffer).unwrap();
device.unmap_buffer(&uniform_buffer);
assert!(mapping.is_coherent);
uniform_buffer
};
@ -680,7 +680,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
instances_buffer_size,
);
device.unmap_buffer(&instances_buffer).unwrap();
device.unmap_buffer(&instances_buffer);
assert!(mapping.is_coherent);
instances_buffer
@ -848,7 +848,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
instances_buffer_size,
);
self.device.unmap_buffer(&self.instances_buffer).unwrap();
self.device.unmap_buffer(&self.instances_buffer);
assert!(mapping.is_coherent);
}

View File

@ -437,9 +437,8 @@ impl crate::Device for super::Device {
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
unsafe { (*buffer.resource).Unmap(0, ptr::null()) };
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}

View File

@ -151,9 +151,7 @@ impl crate::Device for Context {
) -> DeviceResult<crate::BufferMapping> {
Err(crate::DeviceError::Lost)
}
unsafe fn unmap_buffer(&self, buffer: &Resource) -> DeviceResult<()> {
Ok(())
}
unsafe fn unmap_buffer(&self, buffer: &Resource) {}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}

View File

@ -691,7 +691,7 @@ impl crate::Device for super::Device {
is_coherent,
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
if let Some(raw) = buffer.raw {
if buffer.data.is_none() {
let gl = &self.shared.context.lock();
@ -700,7 +700,6 @@ impl crate::Device for super::Device {
unsafe { gl.bind_buffer(buffer.target, None) };
}
}
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
where

View File

@ -728,7 +728,7 @@ pub trait Device: WasmNotSendSync {
/// # Safety
///
/// - The given `buffer` must be currently mapped.
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer) -> Result<(), DeviceError>;
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer);
/// Indicate that CPU writes to mapped buffer memory should be made visible to the GPU.
///

View File

@ -370,9 +370,7 @@ impl crate::Device for super::Device {
})
}
unsafe fn unmap_buffer(&self, _buffer: &super::Buffer) -> DeviceResult<()> {
Ok(())
}
unsafe fn unmap_buffer(&self, _buffer: &super::Buffer) {}
unsafe fn flush_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}

View File

@ -951,12 +951,10 @@ impl crate::Device for super::Device {
Err(crate::DeviceError::OutOfMemory)
}
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
// We can only unmap the buffer if it was already mapped successfully.
if let Some(ref block) = buffer.block {
unsafe { block.lock().unmap(&*self.shared) };
Ok(())
} else {
Err(crate::DeviceError::OutOfMemory)
}
}