Simplify some code around buffer unmapping (#4892)

This commit is contained in:
Nicolas Silva 2023-12-20 21:21:32 +01:00 committed by GitHub
parent 4de4ddcc3c
commit 164e478fcb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 80 additions and 86 deletions

View File

@ -487,10 +487,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = A::hub(self);
let mut buffer_guard = hub.buffers.write();
let buffer = buffer_guard
.get_and_mark_destroyed(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let buffer = {
let mut buffer_guard = hub.buffers.write();
buffer_guard
.get_and_mark_destroyed(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?
};
let _ = buffer.unmap();
buffer.destroy()
}
@ -500,37 +505,40 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = A::hub(self);
if let Some(buffer) = hub.buffers.unregister(buffer_id) {
if buffer.ref_count() == 1 {
buffer.destroy().ok();
let buffer = match hub.buffers.unregister(buffer_id) {
Some(buffer) => buffer,
None => {
return;
}
};
let last_submit_index = buffer.info.submission_index();
let _ = buffer.unmap();
let device = buffer.device.clone();
let last_submit_index = buffer.info.submission_index();
if device
.pending_writes
.lock()
.as_ref()
.unwrap()
.dst_buffers
.contains_key(&buffer_id)
{
device.lock_life().future_suspected_buffers.push(buffer);
} else {
device
.lock_life()
.suspected_resources
.buffers
.insert(buffer_id, buffer);
}
let device = buffer.device.clone();
if wait {
match device.wait_for_submit(last_submit_index) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e),
}
if device
.pending_writes
.lock()
.as_ref()
.unwrap()
.dst_buffers
.contains_key(&buffer_id)
{
device.lock_life().future_suspected_buffers.push(buffer);
} else {
device
.lock_life()
.suspected_resources
.buffers
.insert(buffer_id, buffer);
}
if wait {
match device.wait_for_submit(last_submit_index) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e),
}
}
}
@ -2511,27 +2519,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
profiling::scope!("unmap", "Buffer");
api_log!("Buffer::unmap {buffer_id:?}");
let closure;
{
// Restrict the locks to this scope.
let hub = A::hub(self);
let hub = A::hub(self);
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
if !buffer.device.is_valid() {
return Err(DeviceError::Lost.into());
}
closure = buffer.buffer_unmap_inner()
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
if !buffer.device.is_valid() {
return Err(DeviceError::Lost.into());
}
// Note: outside the scope where locks are held when calling the callback
if let Some((mut operation, status)) = closure? {
if let Some(callback) = operation.callback.take() {
callback.call(status);
}
}
Ok(())
buffer.unmap()
}
}

View File

@ -427,9 +427,18 @@ impl<A: HalApi> Buffer<A> {
self.raw.get(guard)
}
pub(crate) fn buffer_unmap_inner(
self: &Arc<Self>,
) -> Result<Option<BufferMapPendingClosure>, BufferAccessError> {
// Note: This must not be called while holding a lock.
pub(crate) fn unmap(self: &Arc<Self>) -> Result<(), BufferAccessError> {
if let Some((mut operation, status)) = self.unmap_inner()? {
if let Some(callback) = operation.callback.take() {
callback.call(status);
}
}
Ok(())
}
fn unmap_inner(self: &Arc<Self>) -> Result<Option<BufferMapPendingClosure>, BufferAccessError> {
use hal::Device;
let device = &self.device;
@ -535,43 +544,30 @@ impl<A: HalApi> Buffer<A> {
}
pub(crate) fn destroy(self: &Arc<Self>) -> Result<(), DestroyError> {
let map_closure;
// Restrict the locks to this scope.
{
let device = &self.device;
let buffer_id = self.info.id();
let device = &self.device;
let buffer_id = self.info.id();
map_closure = self.buffer_unmap_inner().unwrap_or(None);
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::FreeBuffer(buffer_id));
}
// Note: a future commit will replace this with a read guard
// and actually snatch the buffer.
let snatch_guard = device.snatchable_lock.read();
if self.raw.get(&snatch_guard).is_none() {
return Err(resource::DestroyError::AlreadyDestroyed);
}
let temp = queue::TempResource::Buffer(self.clone());
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.dst_buffers.contains_key(&buffer_id) {
pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = self.info.submission_index();
device
.lock_life()
.schedule_resource_destruction(temp, last_submit_index);
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::FreeBuffer(buffer_id));
}
// Note: a future commit will replace this with a read guard
// and actually snatch the buffer.
let snatch_guard = device.snatchable_lock.read();
if self.raw.get(&snatch_guard).is_none() {
return Err(resource::DestroyError::AlreadyDestroyed);
}
// Note: outside the scope where locks are held when calling the callback
if let Some((mut operation, status)) = map_closure {
if let Some(callback) = operation.callback.take() {
callback.call(status);
}
let temp = queue::TempResource::Buffer(self.clone());
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.dst_buffers.contains_key(&buffer_id) {
pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = self.info.submission_index();
device
.lock_life()
.schedule_resource_destruction(temp, last_submit_index);
}
Ok(())