map buffers immediately in map_async if the Queue has been dropped

This commit is contained in:
teoxoy 2024-10-15 12:58:17 +02:00 committed by Teodor Tanasoaia
parent 6cc53421bf
commit 5061ae3e5e
4 changed files with 74 additions and 72 deletions

View File

@ -3,7 +3,7 @@ use crate::{
queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource}, queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
DeviceError, DeviceError,
}, },
resource::{self, Buffer, Texture, Trackable}, resource::{Buffer, Texture, Trackable},
snatch::SnatchGuard, snatch::SnatchGuard,
SubmissionIndex, SubmissionIndex,
}; };
@ -388,7 +388,6 @@ impl LifetimeTracker {
#[must_use] #[must_use]
pub(crate) fn handle_mapping( pub(crate) fn handle_mapping(
&mut self, &mut self,
raw: &dyn hal::DynDevice,
snatch_guard: &SnatchGuard, snatch_guard: &SnatchGuard,
) -> Vec<super::BufferMapPendingClosure> { ) -> Vec<super::BufferMapPendingClosure> {
if self.ready_to_map.is_empty() { if self.ready_to_map.is_empty() {
@ -398,61 +397,10 @@ impl LifetimeTracker {
Vec::with_capacity(self.ready_to_map.len()); Vec::with_capacity(self.ready_to_map.len());
for buffer in self.ready_to_map.drain(..) { for buffer in self.ready_to_map.drain(..) {
// This _cannot_ be inlined into the match. If it is, the lock will be held match buffer.map(snatch_guard) {
// open through the whole match, resulting in a deadlock when we try to re-lock Some(cb) => pending_callbacks.push(cb),
// the buffer back to active. None => continue,
let mapping = std::mem::replace( }
&mut *buffer.map_state.lock(),
resource::BufferMapState::Idle,
);
let pending_mapping = match mapping {
resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
// Mapping cancelled
resource::BufferMapState::Idle => continue,
// Mapping queued at least twice by map -> unmap -> map
// and was already successfully mapped below
resource::BufferMapState::Active { .. } => {
*buffer.map_state.lock() = mapping;
continue;
}
_ => panic!("No pending mapping."),
};
let status = if pending_mapping.range.start != pending_mapping.range.end {
let host = pending_mapping.op.host;
let size = pending_mapping.range.end - pending_mapping.range.start;
match super::map_buffer(
raw,
&buffer,
pending_mapping.range.start,
size,
host,
snatch_guard,
) {
Ok(mapping) => {
*buffer.map_state.lock() = resource::BufferMapState::Active {
mapping,
range: pending_mapping.range.clone(),
host,
};
Ok(())
}
Err(e) => {
log::error!("Mapping failed: {e}");
Err(e)
}
}
} else {
*buffer.map_state.lock() = resource::BufferMapState::Active {
mapping: hal::BufferMapping {
ptr: std::ptr::NonNull::dangling(),
is_coherent: true,
},
range: pending_mapping.range,
host: pending_mapping.op.host,
};
Ok(())
};
pending_callbacks.push((pending_mapping.op, status));
} }
pending_callbacks pending_callbacks
} }

View File

@ -298,24 +298,25 @@ impl DeviceLostClosure {
} }
} }
fn map_buffer( pub(crate) fn map_buffer(
raw: &dyn hal::DynDevice,
buffer: &Buffer, buffer: &Buffer,
offset: BufferAddress, offset: BufferAddress,
size: BufferAddress, size: BufferAddress,
kind: HostMap, kind: HostMap,
snatch_guard: &SnatchGuard, snatch_guard: &SnatchGuard,
) -> Result<hal::BufferMapping, BufferAccessError> { ) -> Result<hal::BufferMapping, BufferAccessError> {
let raw_device = buffer.device.raw();
let raw_buffer = buffer.try_raw(snatch_guard)?; let raw_buffer = buffer.try_raw(snatch_guard)?;
let mapping = unsafe { let mapping = unsafe {
raw.map_buffer(raw_buffer, offset..offset + size) raw_device
.map_buffer(raw_buffer, offset..offset + size)
.map_err(|e| buffer.device.handle_hal_error(e))? .map_err(|e| buffer.device.handle_hal_error(e))?
}; };
if !mapping.is_coherent && kind == HostMap::Read { if !mapping.is_coherent && kind == HostMap::Read {
#[allow(clippy::single_range_in_vec_init)] #[allow(clippy::single_range_in_vec_init)]
unsafe { unsafe {
raw.invalidate_mapped_ranges(raw_buffer, &[offset..offset + size]); raw_device.invalidate_mapped_ranges(raw_buffer, &[offset..offset + size]);
} }
} }
@ -370,7 +371,7 @@ fn map_buffer(
&& kind == HostMap::Read && kind == HostMap::Read
&& buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) && buffer.usage.contains(wgt::BufferUsages::MAP_WRITE)
{ {
unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; unsafe { raw_device.flush_mapped_ranges(raw_buffer, &[uninitialized]) };
} }
} }
} }

View File

@ -447,7 +447,7 @@ impl Device {
let submission_closures = let submission_closures =
life_tracker.triage_submissions(submission_index, &self.command_allocator); life_tracker.triage_submissions(submission_index, &self.command_allocator);
let mapping_closures = life_tracker.handle_mapping(self.raw(), &snatch_guard); let mapping_closures = life_tracker.handle_mapping(&snatch_guard);
let queue_empty = life_tracker.queue_empty(); let queue_empty = life_tracker.queue_empty();
@ -620,14 +620,7 @@ impl Device {
} }
} else { } else {
let snatch_guard: SnatchGuard = self.snatchable_lock.read(); let snatch_guard: SnatchGuard = self.snatchable_lock.read();
map_buffer( map_buffer(&buffer, 0, map_size, HostMap::Write, &snatch_guard)?
self.raw(),
&buffer,
0,
map_size,
HostMap::Write,
&snatch_guard,
)?
}; };
*buffer.map_state.lock() = resource::BufferMapState::Active { *buffer.map_state.lock() = resource::BufferMapState::Active {
mapping, mapping,

View File

@ -637,13 +637,73 @@ impl Buffer {
let submit_index = if let Some(queue) = device.get_queue() { let submit_index = if let Some(queue) = device.get_queue() {
queue.lock_life().map(self).unwrap_or(0) // '0' means no wait is necessary queue.lock_life().map(self).unwrap_or(0) // '0' means no wait is necessary
} else { } else {
// TODO: map immediately // We can safely unwrap below since we just set the `map_state` to `BufferMapState::Waiting`.
let (mut operation, status) = self.map(&device.snatchable_lock.read()).unwrap();
if let Some(callback) = operation.callback.take() {
callback.call(status);
}
0 0
}; };
Ok(submit_index) Ok(submit_index)
} }
/// This function returns [`None`] only if [`Self::map_state`] is not [`BufferMapState::Waiting`].
#[must_use]
pub(crate) fn map(&self, snatch_guard: &SnatchGuard) -> Option<BufferMapPendingClosure> {
// This _cannot_ be inlined into the match. If it is, the lock will be held
// open through the whole match, resulting in a deadlock when we try to re-lock
// the buffer back to active.
let mapping = mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle);
let pending_mapping = match mapping {
BufferMapState::Waiting(pending_mapping) => pending_mapping,
// Mapping cancelled
BufferMapState::Idle => return None,
// Mapping queued at least twice by map -> unmap -> map
// and was already successfully mapped below
BufferMapState::Active { .. } => {
*self.map_state.lock() = mapping;
return None;
}
_ => panic!("No pending mapping."),
};
let status = if pending_mapping.range.start != pending_mapping.range.end {
let host = pending_mapping.op.host;
let size = pending_mapping.range.end - pending_mapping.range.start;
match crate::device::map_buffer(
self,
pending_mapping.range.start,
size,
host,
snatch_guard,
) {
Ok(mapping) => {
*self.map_state.lock() = BufferMapState::Active {
mapping,
range: pending_mapping.range.clone(),
host,
};
Ok(())
}
Err(e) => {
log::error!("Mapping failed: {e}");
Err(e)
}
}
} else {
*self.map_state.lock() = BufferMapState::Active {
mapping: hal::BufferMapping {
ptr: NonNull::dangling(),
is_coherent: true,
},
range: pending_mapping.range,
host: pending_mapping.op.host,
};
Ok(())
};
Some((pending_mapping.op, status))
}
// Note: This must not be called while holding a lock. // Note: This must not be called while holding a lock.
pub(crate) fn unmap( pub(crate) fn unmap(
self: &Arc<Self>, self: &Arc<Self>,