Put raw texture access behind snatch guards

This commit is contained in:
Nicolas Silva 2024-01-02 13:56:11 +01:00 committed by Teodor Tanasoaia
parent 2f87a8ff83
commit bfe235a295
11 changed files with 104 additions and 124 deletions

View File

@ -252,11 +252,9 @@ pub(crate) fn clear_texture<A: HalApi>(
alignments: &hal::Alignments,
zero_buffer: &A::Buffer,
) -> Result<(), ClearError> {
let dst_inner = dst_texture.inner();
let dst_raw = dst_inner
.as_ref()
.unwrap()
.as_raw()
let snatch_guard = dst_texture.device.snatchable_lock.read();
let dst_raw = dst_texture
.as_raw(&snatch_guard)
.ok_or_else(|| ClearError::InvalidTexture(dst_texture.as_info().id()))?;
// Issue the right barrier.
@ -296,7 +294,7 @@ pub(crate) fn clear_texture<A: HalApi>(
let dst_barrier = texture_tracker
.set_single(dst_texture, selector, clear_usage)
.unwrap()
.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap()));
.map(|pending| pending.into_hal(dst_raw));
unsafe {
encoder.transition_textures(dst_barrier.into_iter());
}

View File

@ -226,11 +226,11 @@ impl<A: HalApi> CommandBuffer<A> {
profiling::scope!("drain_barriers");
let buffer_barriers = base.buffers.drain_transitions(snatch_guard);
let (transitions, textures) = base.textures.drain_transitions();
let (transitions, textures) = base.textures.drain_transitions(snatch_guard);
let texture_barriers = transitions
.into_iter()
.enumerate()
.map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap()));
.map(|(i, p)| p.into_hal(textures[i].unwrap().as_raw().unwrap()));
unsafe {
raw.transition_buffers(buffer_barriers);

View File

@ -811,18 +811,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_inner = dst_texture.inner();
let dst_raw = dst_inner
.as_ref()
.unwrap()
.as_raw()
let dst_raw = dst_texture
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap()));
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_raw));
if !dst_base.aspect.is_one() {
return Err(TransferError::CopyAspectNotOne.into());
@ -951,11 +948,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_inner = src_texture.inner();
let src_raw = src_inner
.as_ref()
.unwrap()
.as_raw()
let src_raw = src_texture
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
@ -973,7 +967,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
.into());
}
let src_barrier = src_pending.map(|pending| pending.into_hal(src_inner.as_ref().unwrap()));
let src_barrier = src_pending.map(|pending| pending.into_hal(src_raw));
let (dst_buffer, dst_pending) = {
let buffer_guard = hub.buffers.read();
@ -1077,6 +1071,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into());
}
let snatch_guard = device.snatchable_lock.read();
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
@ -1101,12 +1097,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.get(source.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
let src_inner = src_texture.inner();
let dst_texture = hub
.textures
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
let dst_inner = dst_texture.inner();
// src and dst texture format must be copy-compatible
// https://gpuweb.github.io/gpuweb/#copy-compatible
@ -1168,10 +1162,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_raw = src_inner
.as_ref()
.unwrap()
.as_raw()
let src_raw = src_texture
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
@ -1180,7 +1172,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//TODO: try to avoid this the collection. It's needed because both
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
let mut barriers: ArrayVec<_, 2> = src_pending
.map(|pending| pending.into_hal(src_inner.as_ref().unwrap()))
.map(|pending| pending.into_hal(src_raw))
.collect();
let dst_pending = cmd_buf_data
@ -1188,10 +1180,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst_inner
.as_ref()
.unwrap()
.as_raw()
let dst_raw = dst_texture
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) {
return Err(
@ -1199,7 +1189,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
}
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())));
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_raw)));
let hal_copy_size = hal::CopyExtent {
width: src_copy_size.width.min(dst_copy_size.width),

View File

@ -745,21 +745,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let last_submit_index = texture.info.submission_index();
if let resource::TextureInner::Native { ref raw } = *texture.inner().as_ref().unwrap() {
if !raw.is_none() {
let temp = queue::TempResource::Texture(texture.clone());
let mut guard = device.pending_writes.lock();
let pending_writes = guard.as_mut().unwrap();
if pending_writes.dst_textures.contains_key(&texture_id) {
pending_writes.temp_resources.push(temp);
} else {
drop(guard);
device
.lock_life()
.schedule_resource_destruction(temp, last_submit_index);
}
let snatch_guard = texture.device.snatchable_lock.read();
if let Some(resource::TextureInner::Native { .. }) = texture.inner.get(&snatch_guard) {
let temp = queue::TempResource::Texture(texture.clone());
let mut guard = device.pending_writes.lock();
let pending_writes = guard.as_mut().unwrap();
if pending_writes.dst_textures.contains_key(&texture_id) {
pending_writes.temp_resources.push(temp);
} else {
return Err(resource::DestroyError::AlreadyDestroyed);
drop(guard);
device
.lock_life()
.schedule_resource_destruction(temp, last_submit_index);
}
}

View File

@ -802,6 +802,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
let snatch_guard = device.snatchable_lock.read();
// Re-get `dst` immutably here, so that the mutable borrow of the
// `texture_guard.get` above ends in time for the `clear_texture`
// call above. Since we've held `texture_guard` the whole time, we know
@ -810,11 +812,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
dst.info
.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let dst_inner = dst.inner();
let dst_raw = dst_inner
.as_ref()
.unwrap()
.as_raw()
let dst_raw = dst
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let bytes_per_row = data_layout
@ -897,9 +896,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.set_single(&dst, selector, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
unsafe {
encoder.transition_textures(
transition.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())),
);
encoder.transition_textures(transition.map(|pending| pending.into_hal(dst_raw)));
encoder.transition_buffers(iter::once(barrier));
encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions);
}
@ -1076,11 +1073,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
dst.info
.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let dst_inner = dst.inner();
let dst_raw = dst_inner
.as_ref()
.unwrap()
.as_raw()
let snatch_guard = device.snatchable_lock.read();
let dst_raw = dst
.as_raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let regions = hal::TextureCopy {
@ -1100,9 +1095,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.textures
.set_single(&dst, selector, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
encoder.transition_textures(
transitions.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())),
);
encoder.transition_textures(transitions.map(|pending| pending.into_hal(dst_raw)));
encoder.copy_external_image_to_texture(
source,
dst_raw,
@ -1238,12 +1231,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
for texture in cmd_buf_trackers.textures.used_resources() {
let id = texture.info.id();
let should_extend = match *texture.inner().as_ref().unwrap() {
TextureInner::Native { raw: None } => {
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(id));
}
TextureInner::Native { raw: Some(_) } => false,
TextureInner::Surface { ref has_work, .. } => {
Some(TextureInner::Native { .. }) => false,
Some(TextureInner::Surface { ref has_work, .. }) => {
has_work.store(true, Ordering::Relaxed);
true
}
@ -1399,11 +1392,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trackers
.textures
.set_from_usage_scope(&used_surface_textures);
let (transitions, textures) = trackers.textures.drain_transitions();
let (transitions, textures) =
trackers.textures.drain_transitions(&snatch_guard);
let texture_barriers = transitions
.into_iter()
.enumerate()
.map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap()));
.map(|(i, p)| p.into_hal(textures[i].unwrap().as_raw().unwrap()));
let present = unsafe {
baked.encoder.transition_textures(texture_barriers);
baked.encoder.end_encoding().unwrap()
@ -1429,12 +1423,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
{
used_surface_textures.set_size(hub.textures.read().len());
for (&id, texture) in pending_writes.dst_textures.iter() {
match *texture.inner().as_ref().unwrap() {
TextureInner::Native { raw: None } => {
match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(id));
}
TextureInner::Native { raw: Some(_) } => {}
TextureInner::Surface { ref has_work, .. } => {
Some(TextureInner::Native { .. }) => {}
Some(TextureInner::Surface { ref has_work, .. }) => {
has_work.store(true, Ordering::Relaxed);
unsafe {
used_surface_textures
@ -1451,11 +1445,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trackers
.textures
.set_from_usage_scope(&used_surface_textures);
let (transitions, textures) = trackers.textures.drain_transitions();
let (transitions, textures) =
trackers.textures.drain_transitions(&snatch_guard);
let texture_barriers = transitions
.into_iter()
.enumerate()
.map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap()));
.map(|(i, p)| p.into_hal(textures[i].unwrap().as_raw().unwrap()));
unsafe {
pending_writes
.command_encoder

View File

@ -587,9 +587,7 @@ impl<A: HalApi> Device<A> {
debug_assert_eq!(self.as_info().id().backend(), A::VARIANT);
Texture {
inner: RwLock::new(Some(resource::TextureInner::Native {
raw: Some(hal_texture),
})),
inner: Snatchable::new(resource::TextureInner::Native { raw: hal_texture }),
device: self.clone(),
desc: desc.map_label(|_| ()),
hal_usage,
@ -907,15 +905,14 @@ impl<A: HalApi> Device<A> {
texture: &Arc<Texture<A>>,
desc: &resource::TextureViewDescriptor,
) -> Result<TextureView<A>, resource::CreateTextureViewError> {
let inner = texture.inner();
let texture_raw = inner
.as_ref()
.unwrap()
.as_raw()
let snatch_guard = texture.device.snatchable_lock.read();
let texture_raw = texture
.as_raw(&snatch_guard)
.ok_or(resource::CreateTextureViewError::InvalidTexture)?;
// resolve TextureViewDescriptor defaults
// https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults
let resolved_format = desc.format.unwrap_or_else(|| {
texture
.desc

View File

@ -27,6 +27,7 @@ use crate::{
identity::{GlobalIdentityHandlerFactory, Input},
init_tracker::TextureInitTracker,
resource::{self, ResourceInfo},
snatch::Snatchable,
track,
};
@ -215,11 +216,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut presentation = surface.presentation.lock();
let present = presentation.as_mut().unwrap();
let texture = resource::Texture {
inner: RwLock::new(Some(resource::TextureInner::Surface {
inner: Snatchable::new(resource::TextureInner::Surface {
raw: Some(ast.texture),
parent_id: surface_id,
has_work: AtomicBool::new(false),
})),
}),
device: device.clone(),
desc: texture_desc,
hal_usage,
@ -325,8 +326,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
let mut exclusive_snatch_guard = device.snatchable_lock.write();
let suf = A::get_surface(&surface);
let mut inner = texture.inner_mut();
let mut inner = texture.inner_mut(&mut exclusive_snatch_guard);
let inner = inner.as_mut().unwrap();
match *inner {
@ -420,13 +422,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
let suf = A::get_surface(&surface);
match *texture.inner_mut().as_mut().take().as_mut().unwrap() {
&mut resource::TextureInner::Surface {
ref mut raw,
ref parent_id,
let exclusive_snatch_guard = device.snatchable_lock.write();
match texture.inner.snatch(exclusive_snatch_guard).unwrap() {
resource::TextureInner::Surface {
mut raw,
parent_id,
has_work: _,
} => {
if surface_id == *parent_id {
if surface_id == parent_id {
unsafe { suf.unwrap().raw.discard_texture(raw.take().unwrap()) };
} else {
log::warn!("Surface texture is outdated");

View File

@ -14,14 +14,14 @@ use crate::{
identity::{GlobalIdentityHandlerFactory, IdentityManager},
init_tracker::{BufferInitTracker, TextureInitTracker},
resource, resource_log,
snatch::{SnatchGuard, Snatchable},
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
track::TextureSelector,
validation::MissingBufferUsageError,
Label, SubmissionIndex,
};
use hal::CommandEncoder;
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use parking_lot::{Mutex, RwLock};
use smallvec::SmallVec;
use thiserror::Error;
use wgt::WasmNotSendSync;
@ -709,7 +709,7 @@ pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, Vec<wgt::Text
#[derive(Debug)]
pub(crate) enum TextureInner<A: HalApi> {
Native {
raw: Option<A::Texture>,
raw: A::Texture,
},
Surface {
raw: Option<A::SurfaceTexture>,
@ -720,11 +720,9 @@ pub(crate) enum TextureInner<A: HalApi> {
impl<A: HalApi> TextureInner<A> {
pub fn as_raw(&self) -> Option<&A::Texture> {
match *self {
Self::Native { raw: Some(ref tex) } => Some(tex),
Self::Surface {
raw: Some(ref tex), ..
} => Some(tex.borrow()),
match self {
Self::Native { raw } => Some(raw),
Self::Surface { raw: Some(tex), .. } => Some(tex.borrow()),
_ => None,
}
}
@ -748,7 +746,7 @@ pub enum TextureClearMode<A: HalApi> {
#[derive(Debug)]
pub struct Texture<A: HalApi> {
pub(crate) inner: RwLock<Option<TextureInner<A>>>,
pub(crate) inner: Snatchable<TextureInner<A>>,
pub(crate) device: Arc<Device<A>>,
pub(crate) desc: wgt::TextureDescriptor<(), Vec<wgt::TextureFormat>>,
pub(crate) hal_usage: hal::TextureUses,
@ -789,11 +787,8 @@ impl<A: HalApi> Drop for Texture<A> {
}
_ => {}
};
if self.inner.read().is_none() {
return;
}
let inner = self.inner.write().take().unwrap();
if let TextureInner::Native { raw: Some(raw) } = inner {
if let Some(TextureInner::Native { raw }) = self.inner.take() {
unsafe {
self.device.raw().destroy_texture(raw);
}
@ -802,11 +797,15 @@ impl<A: HalApi> Drop for Texture<A> {
}
impl<A: HalApi> Texture<A> {
pub(crate) fn inner<'a>(&'a self) -> RwLockReadGuard<'a, Option<TextureInner<A>>> {
self.inner.read()
pub(crate) fn as_raw<'a>(&'a self, snatch_guard: &'a SnatchGuard) -> Option<&'a A::Texture> {
self.inner.get(snatch_guard)?.as_raw()
}
pub(crate) fn inner_mut<'a>(&'a self) -> RwLockWriteGuard<'a, Option<TextureInner<A>>> {
self.inner.write()
pub(crate) fn inner_mut<'a>(
&'a self,
guard: &mut ExclusiveSnatchGuard,
) -> Option<&'a mut TextureInner<A>> {
self.inner.get_mut(guard)
}
pub(crate) fn get_clear_view<'a>(
clear_mode: &'a TextureClearMode<A>,
@ -850,9 +849,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
profiling::scope!("Texture::as_hal");
let hub = A::hub(self);
let texture = { hub.textures.try_get(id).ok().flatten() };
let inner = texture.as_ref().unwrap().inner();
let hal_texture = inner.as_ref().unwrap().as_raw();
let texture_opt = { hub.textures.try_get(id).ok().flatten() };
let texture = texture_opt.as_ref().unwrap();
let snatch_guard = texture.device.snatchable_lock.read();
let hal_texture = texture.as_raw(&snatch_guard);
hal_texture_callback(hal_texture);
}

View File

@ -30,6 +30,11 @@ impl<T> Snatchable<T> {
unsafe { (*self.value.get()).as_ref() }
}
/// Get write access to the value. Requires a the snatchable lock's write guard.
pub fn get_mut(&self, _guard: &mut ExclusiveSnatchGuard) -> Option<&mut T> {
unsafe { (*self.value.get()).as_mut() }
}
/// Take the value. Requires a the snatchable lock's write guard.
pub fn snatch(&self, _guard: ExclusiveSnatchGuard) -> Option<T> {
unsafe { (*self.value.get()).take() }

View File

@ -151,12 +151,7 @@ impl PendingTransition<hal::BufferUses> {
impl PendingTransition<hal::TextureUses> {
/// Produce the hal barrier corresponding to the transition.
pub fn into_hal<'a, A: HalApi>(
self,
tex: &'a resource::TextureInner<A>,
) -> hal::TextureBarrier<'a, A> {
let texture = tex.as_raw().expect("Texture is destroyed");
pub fn into_hal<'a, A: HalApi>(self, texture: &'a A::Texture) -> hal::TextureBarrier<'a, A> {
// These showing up in a barrier is always a bug
strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN);
strict_assert_ne!(self.usage.end, hal::TextureUses::UNKNOWN);

View File

@ -24,6 +24,7 @@ use crate::{
hal_api::HalApi,
id::{TextureId, TypedId},
resource::{Resource, Texture, TextureInner},
snatch::SnatchGuard,
track::{
invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
ResourceUses, UsageConflict,
@ -34,7 +35,7 @@ use hal::TextureUses;
use arrayvec::ArrayVec;
use naga::FastHashMap;
use parking_lot::{Mutex, RwLockReadGuard};
use parking_lot::Mutex;
use wgt::{strict_assert, strict_assert_eq};
use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain};
@ -497,17 +498,15 @@ impl<A: HalApi> TextureTracker<A> {
/// Drain all currently pending transitions.
pub fn drain_transitions<'a>(
&'a mut self,
) -> (
PendingTransitionList,
Vec<RwLockReadGuard<'a, Option<TextureInner<A>>>>,
) {
snatch_guard: &'a SnatchGuard<'a>,
) -> (PendingTransitionList, Vec<Option<&'a TextureInner<A>>>) {
let mut textures = Vec::new();
let transitions = self
.temp
.drain(..)
.map(|pending| {
let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) };
textures.push(tex.inner());
textures.push(tex.inner.get(snatch_guard));
pending
})
.collect();