diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index dc9d4051e..c7d3a59d6 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -3,7 +3,7 @@ use crate::{ queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource}, DeviceError, }, - resource::{self, Buffer, Texture, Trackable}, + resource::{Buffer, Texture, Trackable}, snatch::SnatchGuard, SubmissionIndex, }; @@ -388,7 +388,6 @@ impl LifetimeTracker { #[must_use] pub(crate) fn handle_mapping( &mut self, - raw: &dyn hal::DynDevice, snatch_guard: &SnatchGuard, ) -> Vec { if self.ready_to_map.is_empty() { @@ -398,61 +397,10 @@ impl LifetimeTracker { Vec::with_capacity(self.ready_to_map.len()); for buffer in self.ready_to_map.drain(..) { - // This _cannot_ be inlined into the match. If it is, the lock will be held - // open through the whole match, resulting in a deadlock when we try to re-lock - // the buffer back to active. - let mapping = std::mem::replace( - &mut *buffer.map_state.lock(), - resource::BufferMapState::Idle, - ); - let pending_mapping = match mapping { - resource::BufferMapState::Waiting(pending_mapping) => pending_mapping, - // Mapping cancelled - resource::BufferMapState::Idle => continue, - // Mapping queued at least twice by map -> unmap -> map - // and was already successfully mapped below - resource::BufferMapState::Active { .. } => { - *buffer.map_state.lock() = mapping; - continue; - } - _ => panic!("No pending mapping."), - }; - let status = if pending_mapping.range.start != pending_mapping.range.end { - let host = pending_mapping.op.host; - let size = pending_mapping.range.end - pending_mapping.range.start; - match super::map_buffer( - raw, - &buffer, - pending_mapping.range.start, - size, - host, - snatch_guard, - ) { - Ok(mapping) => { - *buffer.map_state.lock() = resource::BufferMapState::Active { - mapping, - range: pending_mapping.range.clone(), - host, - }; - Ok(()) - } - Err(e) => { - log::error!("Mapping failed: {e}"); - Err(e) - } - } - } else { - *buffer.map_state.lock() = resource::BufferMapState::Active { - mapping: hal::BufferMapping { - ptr: std::ptr::NonNull::dangling(), - is_coherent: true, - }, - range: pending_mapping.range, - host: pending_mapping.op.host, - }; - Ok(()) - }; - pending_callbacks.push((pending_mapping.op, status)); + match buffer.map(snatch_guard) { + Some(cb) => pending_callbacks.push(cb), + None => continue, + } } pending_callbacks } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 81cb4654d..18e35206b 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -298,24 +298,25 @@ impl DeviceLostClosure { } } -fn map_buffer( - raw: &dyn hal::DynDevice, +pub(crate) fn map_buffer( buffer: &Buffer, offset: BufferAddress, size: BufferAddress, kind: HostMap, snatch_guard: &SnatchGuard, ) -> Result { + let raw_device = buffer.device.raw(); let raw_buffer = buffer.try_raw(snatch_guard)?; let mapping = unsafe { - raw.map_buffer(raw_buffer, offset..offset + size) + raw_device + .map_buffer(raw_buffer, offset..offset + size) .map_err(|e| buffer.device.handle_hal_error(e))? }; if !mapping.is_coherent && kind == HostMap::Read { #[allow(clippy::single_range_in_vec_init)] unsafe { - raw.invalidate_mapped_ranges(raw_buffer, &[offset..offset + size]); + raw_device.invalidate_mapped_ranges(raw_buffer, &[offset..offset + size]); } } @@ -370,7 +371,7 @@ fn map_buffer( && kind == HostMap::Read && buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) { - unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; + unsafe { raw_device.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; } } } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ee1f25233..95274c4d0 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -447,7 +447,7 @@ impl Device { let submission_closures = life_tracker.triage_submissions(submission_index, &self.command_allocator); - let mapping_closures = life_tracker.handle_mapping(self.raw(), &snatch_guard); + let mapping_closures = life_tracker.handle_mapping(&snatch_guard); let queue_empty = life_tracker.queue_empty(); @@ -620,14 +620,7 @@ impl Device { } } else { let snatch_guard: SnatchGuard = self.snatchable_lock.read(); - map_buffer( - self.raw(), - &buffer, - 0, - map_size, - HostMap::Write, - &snatch_guard, - )? + map_buffer(&buffer, 0, map_size, HostMap::Write, &snatch_guard)? }; *buffer.map_state.lock() = resource::BufferMapState::Active { mapping, diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 6d7544d9b..0b9cdf821 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -637,13 +637,73 @@ impl Buffer { let submit_index = if let Some(queue) = device.get_queue() { queue.lock_life().map(self).unwrap_or(0) // '0' means no wait is necessary } else { - // TODO: map immediately + // We can safely unwrap below since we just set the `map_state` to `BufferMapState::Waiting`. + let (mut operation, status) = self.map(&device.snatchable_lock.read()).unwrap(); + if let Some(callback) = operation.callback.take() { + callback.call(status); + } 0 }; Ok(submit_index) } + /// This function returns [`None`] only if [`Self::map_state`] is not [`BufferMapState::Waiting`]. + #[must_use] + pub(crate) fn map(&self, snatch_guard: &SnatchGuard) -> Option { + // This _cannot_ be inlined into the match. If it is, the lock will be held + // open through the whole match, resulting in a deadlock when we try to re-lock + // the buffer back to active. + let mapping = mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle); + let pending_mapping = match mapping { + BufferMapState::Waiting(pending_mapping) => pending_mapping, + // Mapping cancelled + BufferMapState::Idle => return None, + // Mapping queued at least twice by map -> unmap -> map + // and was already successfully mapped below + BufferMapState::Active { .. } => { + *self.map_state.lock() = mapping; + return None; + } + _ => panic!("No pending mapping."), + }; + let status = if pending_mapping.range.start != pending_mapping.range.end { + let host = pending_mapping.op.host; + let size = pending_mapping.range.end - pending_mapping.range.start; + match crate::device::map_buffer( + self, + pending_mapping.range.start, + size, + host, + snatch_guard, + ) { + Ok(mapping) => { + *self.map_state.lock() = BufferMapState::Active { + mapping, + range: pending_mapping.range.clone(), + host, + }; + Ok(()) + } + Err(e) => { + log::error!("Mapping failed: {e}"); + Err(e) + } + } + } else { + *self.map_state.lock() = BufferMapState::Active { + mapping: hal::BufferMapping { + ptr: NonNull::dangling(), + is_coherent: true, + }, + range: pending_mapping.range, + host: pending_mapping.op.host, + }; + Ok(()) + }; + Some((pending_mapping.op, status)) + } + // Note: This must not be called while holding a lock. pub(crate) fn unmap( self: &Arc,