[wgpu-core] use .strict_get() & .strict_unregister() for command buffers

We now only rely on the presence of the inner `CommandBufferMutable` to determine if command buffers are invalid.
This commit is contained in:
teoxoy 2024-09-07 02:11:50 +02:00 committed by Teodor Tanasoaia
parent c0c594eff2
commit 31edbfd0dc
14 changed files with 463 additions and 592 deletions

View File

@ -88,17 +88,11 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
@ -177,17 +171,11 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {

View File

@ -292,12 +292,15 @@ impl Global {
let make_err = |e, arc_desc| (ComputePass::new(None, arc_desc), Some(e));
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc),
};
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
match cmd_buf.lock_encoder() {
match cmd_buf
.try_get()
.map_err(|e| e.into())
.and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder())
{
Ok(_) => {}
Err(e) => return make_err(e, arc_desc),
};
@ -320,25 +323,8 @@ impl Global {
(ComputePass::new(Some(cmd_buf), arc_desc), None)
}
pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> {
let scope = PassErrorScope::Pass;
let cmd_buf = pass
.parent
.as_ref()
.ok_or(ComputePassErrorInner::InvalidParentEncoder)
.map_pass_err(scope)?;
cmd_buf.unlock_encoder().map_pass_err(scope)?;
let base = pass
.base
.take()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?;
self.compute_pass_end_impl(cmd_buf, base, pass.timestamp_writes.take())
}
/// Note that this differs from [`Self::compute_pass_end`], it will
/// create a new pass, replay the commands and end the pass.
#[doc(hidden)]
#[cfg(any(feature = "serde", feature = "replay"))]
pub fn compute_pass_end_with_unresolved_commands(
@ -347,19 +333,16 @@ impl Global {
base: BasePass<super::ComputeCommand>,
timestamp_writes: Option<&PassTimestampWrites>,
) -> Result<(), ComputePassError> {
let hub = &self.hub;
let scope = PassErrorScope::Pass;
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(scope),
};
cmd_buf.check_recording().map_pass_err(scope)?;
let pass_scope = PassErrorScope::Pass;
#[cfg(feature = "trace")]
{
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let cmd_buf = self
.hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?;
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(crate::device::trace::Command::RunComputePass {
base: BasePass {
@ -374,50 +357,61 @@ impl Global {
}
}
let commands =
super::ComputeCommand::resolve_compute_command_ids(&self.hub, &base.commands)?;
let BasePass {
label,
commands,
dynamic_offsets,
string_data,
push_constant_data,
} = base;
let timestamp_writes = if let Some(tw) = timestamp_writes {
Some(ArcPassTimestampWrites {
query_set: hub
.query_sets
.strict_get(tw.query_set)
.get()
.map_pass_err(scope)?,
beginning_of_pass_write_index: tw.beginning_of_pass_write_index,
end_of_pass_write_index: tw.end_of_pass_write_index,
})
} else {
None
let (mut compute_pass, encoder_error) = self.command_encoder_create_compute_pass(
encoder_id,
&ComputePassDescriptor {
label: label.as_deref().map(std::borrow::Cow::Borrowed),
timestamp_writes,
},
);
if let Some(err) = encoder_error {
return Err(ComputePassError {
scope: pass_scope,
inner: err.into(),
});
};
self.compute_pass_end_impl(
&cmd_buf,
BasePass {
label: base.label,
commands,
dynamic_offsets: base.dynamic_offsets,
string_data: base.string_data,
push_constant_data: base.push_constant_data,
},
timestamp_writes,
)
compute_pass.base = Some(BasePass {
label,
commands: super::ComputeCommand::resolve_compute_command_ids(&self.hub, &commands)?,
dynamic_offsets,
string_data,
push_constant_data,
});
self.compute_pass_end(&mut compute_pass)
}
fn compute_pass_end_impl(
&self,
cmd_buf: &CommandBuffer,
base: BasePass<ArcComputeCommand>,
mut timestamp_writes: Option<ArcPassTimestampWrites>,
) -> Result<(), ComputePassError> {
pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> {
profiling::scope!("CommandEncoder::run_compute_pass");
let pass_scope = PassErrorScope::Pass;
let cmd_buf = pass
.parent
.as_ref()
.ok_or(ComputePassErrorInner::InvalidParentEncoder)
.map_pass_err(pass_scope)?;
let base = pass
.base
.take()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(pass_scope)?;
let device = &cmd_buf.device;
device.check_is_valid().map_pass_err(pass_scope)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?;
cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?;
let cmd_buf_data = &mut *cmd_buf_data;
let encoder = &mut cmd_buf_data.encoder;
let status = &mut cmd_buf_data.status;
@ -459,9 +453,9 @@ impl Global {
state.tracker.textures.set_size(indices.textures.size());
let timestamp_writes: Option<hal::PassTimestampWrites<'_, dyn hal::DynQuerySet>> =
if let Some(tw) = timestamp_writes.take() {
if let Some(tw) = pass.timestamp_writes.take() {
tw.query_set
.same_device_as(cmd_buf)
.same_device_as(cmd_buf.as_ref())
.map_pass_err(pass_scope)?;
let query_set = state.tracker.query_sets.insert_single(tw.query_set);

View File

@ -83,7 +83,7 @@ pub(crate) enum CommandEncoderStatus {
/// When a `CommandEncoder` is left in this state, we have also
/// returned an error result from the function that encountered
/// the problem. Future attempts to use the encoder (for example,
/// calls to [`CommandBuffer::check_recording`]) will also return
/// calls to [`CommandBufferMutable::check_recording`]) will also return
/// errors.
///
/// Calling [`Global::command_encoder_finish`] in this state
@ -288,6 +288,106 @@ impl CommandBufferMutable {
Ok((encoder, tracker))
}
fn lock_encoder_impl(&mut self, lock: bool) -> Result<(), CommandEncoderError> {
match self.status {
CommandEncoderStatus::Recording => {
if lock {
self.status = CommandEncoderStatus::Locked;
}
Ok(())
}
CommandEncoderStatus::Locked => {
// Any operation on a locked encoder is required to put it into the invalid/error state.
// See https://www.w3.org/TR/webgpu/#encoder-state-locked
self.encoder.discard();
self.status = CommandEncoderStatus::Error;
Err(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
}
/// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state.
fn check_recording(&mut self) -> Result<(), CommandEncoderError> {
self.lock_encoder_impl(false)
}
/// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state.
///
/// Call [`CommandBufferMutable::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state.
fn lock_encoder(&mut self) -> Result<(), CommandEncoderError> {
self.lock_encoder_impl(true)
}
/// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state.
///
/// This function is the counterpart to [`CommandBufferMutable::lock_encoder`].
/// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state.
fn unlock_encoder(&mut self) -> Result<(), CommandEncoderError> {
match self.status {
CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Locked => {
self.status = CommandEncoderStatus::Recording;
Ok(())
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
}
pub fn check_finished(&self) -> Result<(), CommandEncoderError> {
match self.status {
CommandEncoderStatus::Finished => Ok(()),
_ => Err(CommandEncoderError::Invalid),
}
}
pub(crate) fn finish(&mut self, device: &Device) -> Result<(), CommandEncoderError> {
match self.status {
CommandEncoderStatus::Recording => {
if let Err(e) = self.encoder.close(device) {
Err(e.into())
} else {
self.status = CommandEncoderStatus::Finished;
// Note: if we want to stop tracking the swapchain texture view,
// this is the place to do it.
Ok(())
}
}
CommandEncoderStatus::Locked => {
self.encoder.discard();
self.status = CommandEncoderStatus::Error;
Err(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => {
self.encoder.discard();
Err(CommandEncoderError::Invalid)
}
}
}
pub(crate) fn into_baked_commands(self) -> BakedCommands {
BakedCommands {
encoder: self.encoder.raw,
list: self.encoder.list,
trackers: self.trackers,
buffer_memory_init_actions: self.buffer_memory_init_actions,
texture_memory_actions: self.texture_memory_actions,
}
}
pub(crate) fn destroy(mut self, device: &Device) {
self.encoder.discard();
unsafe {
self.encoder.raw.reset_all(self.encoder.list);
}
unsafe {
device.raw().destroy_command_encoder(self.encoder.raw);
}
}
}
/// A buffer of commands to be submitted to the GPU for execution.
@ -319,22 +419,15 @@ pub struct CommandBuffer {
/// This `Option` is populated when the command buffer is first created.
/// When this is submitted, dropped, or destroyed, its contents are
/// extracted into a [`BakedCommands`] by
/// [`CommandBuffer::extract_baked_commands`].
/// [`CommandBufferMutable::into_baked_commands`].
pub(crate) data: Mutex<Option<CommandBufferMutable>>,
}
impl Drop for CommandBuffer {
fn drop(&mut self) {
resource_log!("Drop {}", self.error_ident());
if self.data.lock().is_none() {
return;
}
let mut baked = self.extract_baked_commands();
unsafe {
baked.encoder.reset_all(baked.list);
}
unsafe {
self.device.raw().destroy_command_encoder(baked.encoder);
if let Some(data) = self.data.lock().take() {
data.destroy(&self.device);
}
}
}
@ -374,6 +467,15 @@ impl CommandBuffer {
}
}
pub(crate) fn new_invalid(device: &Arc<Device>, label: &Label) -> Self {
CommandBuffer {
device: device.clone(),
support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE),
label: label.to_string(),
data: Mutex::new(rank::COMMAND_BUFFER_DATA, None),
}
}
pub(crate) fn insert_barriers_from_tracker(
raw: &mut dyn hal::DynCommandEncoder,
base: &mut Tracker,
@ -452,80 +554,19 @@ impl CommandBuffer {
}
impl CommandBuffer {
fn lock_encoder_impl(&self, lock: bool) -> Result<(), CommandEncoderError> {
let mut cmd_buf_data_guard = self.data.lock();
let cmd_buf_data = cmd_buf_data_guard.as_mut().unwrap();
match cmd_buf_data.status {
CommandEncoderStatus::Recording => {
if lock {
cmd_buf_data.status = CommandEncoderStatus::Locked;
}
Ok(())
}
CommandEncoderStatus::Locked => {
// Any operation on a locked encoder is required to put it into the invalid/error state.
// See https://www.w3.org/TR/webgpu/#encoder-state-locked
cmd_buf_data.encoder.discard();
cmd_buf_data.status = CommandEncoderStatus::Error;
Err(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
pub fn try_get<'a>(
&'a self,
) -> Result<parking_lot::MappedMutexGuard<'a, CommandBufferMutable>, InvalidResourceError> {
let g = self.data.lock();
crate::lock::MutexGuard::try_map(g, |data| data.as_mut())
.map_err(|_| InvalidResourceError(self.error_ident()))
}
/// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state.
fn check_recording(&self) -> Result<(), CommandEncoderError> {
self.lock_encoder_impl(false)
}
/// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state.
///
/// Call [`CommandBuffer::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state.
fn lock_encoder(&self) -> Result<(), CommandEncoderError> {
self.lock_encoder_impl(true)
}
/// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state.
///
/// This function is the counterpart to [`CommandBuffer::lock_encoder`].
/// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state.
fn unlock_encoder(&self) -> Result<(), CommandEncoderError> {
let mut data_lock = self.data.lock();
let status = &mut data_lock.as_mut().unwrap().status;
match *status {
CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Locked => {
*status = CommandEncoderStatus::Recording;
Ok(())
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
}
pub fn is_finished(&self) -> bool {
match self.data.lock().as_ref().unwrap().status {
CommandEncoderStatus::Finished => true,
_ => false,
}
}
pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands {
let data = self.data.lock().take().unwrap();
BakedCommands {
encoder: data.encoder.raw,
list: data.encoder.list,
trackers: data.trackers,
buffer_memory_init_actions: data.buffer_memory_init_actions,
texture_memory_actions: data.texture_memory_actions,
}
}
pub(crate) fn from_arc_into_baked(self: Arc<Self>) -> BakedCommands {
let mut command_buffer = Arc::into_inner(self)
.expect("CommandBuffer cannot be destroyed because is still in use");
command_buffer.extract_baked_commands()
pub fn try_take<'a>(&'a self) -> Result<CommandBufferMutable, InvalidResourceError> {
self.data
.lock()
.take()
.ok_or_else(|| InvalidResourceError(self.error_ident()))
}
}
@ -613,34 +654,17 @@ impl Global {
let hub = &self.hub;
let error = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => {
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
match cmd_buf_data.status {
CommandEncoderStatus::Recording => {
if let Err(e) = cmd_buf_data.encoder.close(&cmd_buf.device) {
Some(e.into())
} else {
cmd_buf_data.status = CommandEncoderStatus::Finished;
//Note: if we want to stop tracking the swapchain texture view,
// this is the place to do it.
None
}
}
CommandEncoderStatus::Locked => {
cmd_buf_data.encoder.discard();
cmd_buf_data.status = CommandEncoderStatus::Error;
Some(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => {
cmd_buf_data.encoder.discard();
Some(CommandEncoderError::Invalid)
}
}
}
Err(_) => Some(CommandEncoderError::Invalid),
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let error = match cmd_buf
.try_get()
.map_err(|e| e.into())
.and_then(|mut cmd_buf_data| cmd_buf_data.finish(&cmd_buf.device))
{
Ok(_) => None,
Err(e) => Some(e),
};
(encoder_id.into_command_buffer_id(), error)
@ -656,14 +680,12 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid),
};
cmd_buf.check_recording()?;
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::PushDebugGroup(label.to_string()));
@ -692,14 +714,11 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
@ -728,14 +747,11 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {

View File

@ -318,22 +318,16 @@ impl Global {
) -> Result<(), QueryError> {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
cmd_buf
.device
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::WriteTimestamp {
@ -342,17 +336,14 @@ impl Global {
});
}
let encoder = &mut cmd_buf_data.encoder;
let tracker = &mut cmd_buf_data.trackers;
let raw_encoder = encoder.open(&cmd_buf.device)?;
let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?;
let query_set = hub.query_sets.strict_get(query_set_id).get()?;
let query_set = tracker.query_sets.insert_single(query_set);
query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?;
cmd_buf_data.trackers.query_sets.insert_single(query_set);
Ok(())
}
@ -367,17 +358,11 @@ impl Global {
) -> Result<(), QueryError> {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
@ -390,26 +375,20 @@ impl Global {
});
}
let encoder = &mut cmd_buf_data.encoder;
let tracker = &mut cmd_buf_data.trackers;
let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions;
let raw_encoder = encoder.open(&cmd_buf.device)?;
if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 {
return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment));
}
let query_set = hub.query_sets.strict_get(query_set_id).get()?;
let query_set = tracker.query_sets.insert_single(query_set);
query_set.same_device_as(cmd_buf.as_ref())?;
let dst_buffer = hub.buffers.strict_get(destination).get()?;
dst_buffer.same_device_as(cmd_buf.as_ref())?;
let dst_pending = tracker
let dst_pending = cmd_buf_data
.trackers
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
@ -455,14 +434,16 @@ impl Global {
}
// TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state.
buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action(
&dst_buffer,
buffer_start_offset..buffer_end_offset,
MemoryInitKind::ImplicitlyInitialized,
));
cmd_buf_data.buffer_memory_init_actions.extend(
dst_buffer.initialization_status.read().create_action(
&dst_buffer,
buffer_start_offset..buffer_end_offset,
MemoryInitKind::ImplicitlyInitialized,
),
);
let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?;
let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?;
unsafe {
raw_encoder.transition_buffers(dst_barrier.as_slice());
raw_encoder.copy_query_results(
@ -474,6 +455,8 @@ impl Global {
);
}
cmd_buf_data.trackers.query_sets.insert_single(query_set);
Ok(())
}
}

View File

@ -1427,12 +1427,15 @@ impl Global {
let make_err = |e, arc_desc| (RenderPass::new(None, arc_desc), Some(e));
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc),
};
let cmd_buf = hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
match cmd_buf.lock_encoder() {
match cmd_buf
.try_get()
.map_err(|e| e.into())
.and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder())
{
Ok(_) => {}
Err(e) => return make_err(e, arc_desc),
};
@ -1442,6 +1445,8 @@ impl Global {
(RenderPass::new(Some(cmd_buf), arc_desc), err)
}
/// Note that this differs from [`Self::render_pass_end`], it will
/// create a new pass, replay the commands and end the pass.
#[doc(hidden)]
#[cfg(any(feature = "serde", feature = "replay"))]
pub fn render_pass_end_with_unresolved_commands(
@ -1457,15 +1462,11 @@ impl Global {
#[cfg(feature = "trace")]
{
let hub = &self.hub;
let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) {
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(pass_scope)?,
};
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let cmd_buf = self
.hub
.command_buffers
.strict_get(encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?;
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(crate::device::trace::Command::RunRenderPass {
@ -1509,29 +1510,26 @@ impl Global {
});
};
let hub = &self.hub;
render_pass.base = Some(BasePass {
label,
commands: super::RenderCommand::resolve_render_command_ids(hub, &commands)?,
commands: super::RenderCommand::resolve_render_command_ids(&self.hub, &commands)?,
dynamic_offsets,
string_data,
push_constant_data,
});
if let Some(err) = encoder_error {
Err(RenderPassError {
scope: pass_scope,
inner: err.into(),
})
} else {
self.render_pass_end(&mut render_pass)
}
self.render_pass_end(&mut render_pass)
}
#[doc(hidden)]
pub fn render_pass_end(&self, pass: &mut RenderPass) -> Result<(), RenderPassError> {
let pass_scope = PassErrorScope::Pass;
let cmd_buf = pass
.parent
.as_ref()
.ok_or(RenderPassErrorInner::InvalidParentEncoder)
.map_pass_err(pass_scope)?;
let base = pass
.base
.take()
@ -1543,10 +1541,9 @@ impl Global {
base.label.as_deref().unwrap_or("")
);
let Some(cmd_buf) = pass.parent.as_ref() else {
return Err(RenderPassErrorInner::InvalidParentEncoder).map_pass_err(pass_scope);
};
cmd_buf.unlock_encoder().map_pass_err(pass_scope)?;
let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?;
cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?;
let cmd_buf_data = &mut *cmd_buf_data;
let device = &cmd_buf.device;
let snatch_guard = &device.snatchable_lock.read();
@ -1554,9 +1551,6 @@ impl Global {
let hal_label = hal_label(base.label.as_deref(), device.instance_flags);
let (scope, pending_discard_init_fixups) = {
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
device.check_is_valid().map_pass_err(pass_scope)?;
let encoder = &mut cmd_buf_data.encoder;
@ -1881,9 +1875,6 @@ impl Global {
(trackers, pending_discard_init_fixups)
};
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let encoder = &mut cmd_buf_data.encoder;
let status = &mut cmd_buf_data.status;
let tracker = &mut cmd_buf_data.trackers;

View File

@ -16,7 +16,7 @@ use crate::{
MissingTextureUsageError, ParentDevice, Texture, TextureErrorDimension,
},
snatch::SnatchGuard,
track::{TextureSelector, Tracker},
track::TextureSelector,
};
use arrayvec::ArrayVec;
@ -25,7 +25,7 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages};
use std::sync::Arc;
use super::{memory_init::CommandBufferTextureMemoryActions, ClearError, CommandEncoder};
use super::{ClearError, CommandBufferMutable};
pub type ImageCopyBuffer = wgt::ImageCopyBuffer<BufferId>;
pub type ImageCopyTexture = wgt::ImageCopyTexture<TextureId>;
@ -406,9 +406,7 @@ pub(crate) fn validate_texture_copy_range(
fn handle_texture_init(
init_kind: MemoryInitKind,
encoder: &mut CommandEncoder,
trackers: &mut Tracker,
texture_memory_actions: &mut CommandBufferTextureMemoryActions,
cmd_buf_data: &mut CommandBufferMutable,
device: &Device,
copy_texture: &ImageCopyTexture,
copy_size: &Extent3d,
@ -426,11 +424,13 @@ fn handle_texture_init(
};
// Register the init action.
let immediate_inits = texture_memory_actions.register_init_action(&{ init_action });
let immediate_inits = cmd_buf_data
.texture_memory_actions
.register_init_action(&{ init_action });
// In rare cases we may need to insert an init operation immediately onto the command buffer.
if !immediate_inits.is_empty() {
let cmd_buf_raw = encoder.open(device)?;
let cmd_buf_raw = cmd_buf_data.encoder.open(device)?;
for init in immediate_inits {
clear_texture(
&init.texture,
@ -439,7 +439,7 @@ fn handle_texture_init(
layer_range: init.layer..(init.layer + 1),
},
cmd_buf_raw,
&mut trackers.textures,
&mut cmd_buf_data.trackers.textures,
&device.alignments,
device.zero_buffer.as_ref(),
snatch_guard,
@ -455,9 +455,7 @@ fn handle_texture_init(
/// Ensure the source texture of a transfer is in the right initialization
/// state, and record the state for after the transfer operation.
fn handle_src_texture_init(
encoder: &mut CommandEncoder,
trackers: &mut Tracker,
texture_memory_actions: &mut CommandBufferTextureMemoryActions,
cmd_buf_data: &mut CommandBufferMutable,
device: &Device,
source: &ImageCopyTexture,
copy_size: &Extent3d,
@ -466,9 +464,7 @@ fn handle_src_texture_init(
) -> Result<(), TransferError> {
handle_texture_init(
MemoryInitKind::NeedsInitializedMemory,
encoder,
trackers,
texture_memory_actions,
cmd_buf_data,
device,
source,
copy_size,
@ -483,9 +479,7 @@ fn handle_src_texture_init(
/// Ensure the destination texture of a transfer is in the right initialization
/// state, and record the state for after the transfer operation.
fn handle_dst_texture_init(
encoder: &mut CommandEncoder,
trackers: &mut Tracker,
texture_memory_actions: &mut CommandBufferTextureMemoryActions,
cmd_buf_data: &mut CommandBufferMutable,
device: &Device,
destination: &ImageCopyTexture,
copy_size: &Extent3d,
@ -508,9 +502,7 @@ fn handle_dst_texture_init(
handle_texture_init(
dst_init_kind,
encoder,
trackers,
texture_memory_actions,
cmd_buf_data,
device,
destination,
copy_size,
@ -540,17 +532,11 @@ impl Global {
}
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
let device = &cmd_buf.device;
device.check_is_valid()?;
@ -704,21 +690,15 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
let device = &cmd_buf.device;
device.check_is_valid()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::CopyBufferToTexture {
@ -728,11 +708,6 @@ impl Global {
});
}
let encoder = &mut cmd_buf_data.encoder;
let tracker = &mut cmd_buf_data.trackers;
let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions;
let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions;
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
log::trace!("Ignoring copy_buffer_to_texture of size 0");
return Ok(());
@ -757,9 +732,7 @@ impl Global {
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_dst_texture_init(
encoder,
tracker,
texture_memory_actions,
&mut cmd_buf_data,
device,
destination,
copy_size,
@ -771,7 +744,8 @@ impl Global {
src_buffer.same_device_as(cmd_buf.as_ref())?;
let src_pending = tracker
let src_pending = cmd_buf_data
.trackers
.buffers
.set_single(&src_buffer, hal::BufferUses::COPY_SRC);
@ -781,10 +755,11 @@ impl Global {
.map_err(TransferError::MissingBufferUsage)?;
let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard));
let dst_pending =
tracker
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST);
let dst_pending = cmd_buf_data.trackers.textures.set_single(
&dst_texture,
dst_range,
hal::TextureUses::COPY_DST,
);
let dst_raw = dst_texture.try_raw(&snatch_guard)?;
dst_texture
.check_usage(TextureUsages::COPY_DST)
@ -821,11 +796,13 @@ impl Global {
.map_err(TransferError::from)?;
}
buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action(
&src_buffer,
source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy),
MemoryInitKind::NeedsInitializedMemory,
));
cmd_buf_data.buffer_memory_init_actions.extend(
src_buffer.initialization_status.read().create_action(
&src_buffer,
source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy),
MemoryInitKind::NeedsInitializedMemory,
),
);
let regions = (0..array_layer_count)
.map(|rel_array_layer| {
@ -841,7 +818,7 @@ impl Global {
})
.collect::<Vec<_>>();
let cmd_buf_raw = encoder.open(&cmd_buf.device)?;
let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?;
unsafe {
cmd_buf_raw.transition_textures(&dst_barrier);
cmd_buf_raw.transition_buffers(src_barrier.as_slice());
@ -866,21 +843,15 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
let device = &cmd_buf.device;
device.check_is_valid()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::CopyTextureToBuffer {
@ -889,10 +860,6 @@ impl Global {
size: *copy_size,
});
}
let encoder = &mut cmd_buf_data.encoder;
let tracker = &mut cmd_buf_data.trackers;
let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions;
let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions;
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
log::trace!("Ignoring copy_texture_to_buffer of size 0");
@ -914,9 +881,7 @@ impl Global {
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_src_texture_init(
encoder,
tracker,
texture_memory_actions,
&mut cmd_buf_data,
device,
source,
copy_size,
@ -924,10 +889,11 @@ impl Global {
&snatch_guard,
)?;
let src_pending =
tracker
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC);
let src_pending = cmd_buf_data.trackers.textures.set_single(
&src_texture,
src_range,
hal::TextureUses::COPY_SRC,
);
let src_raw = src_texture.try_raw(&snatch_guard)?;
src_texture
.check_usage(TextureUsages::COPY_SRC)
@ -953,7 +919,8 @@ impl Global {
dst_buffer.same_device_as(cmd_buf.as_ref())?;
let dst_pending = tracker
let dst_pending = cmd_buf_data
.trackers
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
@ -991,11 +958,14 @@ impl Global {
.map_err(TransferError::from)?;
}
buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action(
&dst_buffer,
destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy),
MemoryInitKind::ImplicitlyInitialized,
));
cmd_buf_data.buffer_memory_init_actions.extend(
dst_buffer.initialization_status.read().create_action(
&dst_buffer,
destination.layout.offset
..(destination.layout.offset + required_buffer_bytes_in_copy),
MemoryInitKind::ImplicitlyInitialized,
),
);
let regions = (0..array_layer_count)
.map(|rel_array_layer| {
@ -1010,7 +980,7 @@ impl Global {
}
})
.collect::<Vec<_>>();
let cmd_buf_raw = encoder.open(&cmd_buf.device)?;
let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?;
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier.as_slice());
cmd_buf_raw.transition_textures(&src_barrier);
@ -1040,23 +1010,17 @@ impl Global {
let hub = &self.hub;
let cmd_buf = match hub
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id())
{
Ok(cmd_buf) => cmd_buf,
Err(_) => return Err(CommandEncoderError::Invalid.into()),
};
cmd_buf.check_recording()?;
.strict_get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.try_get()?;
cmd_buf_data.check_recording()?;
let device = &cmd_buf.device;
device.check_is_valid()?;
let snatch_guard = device.snatchable_lock.read();
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::CopyTextureToTexture {
@ -1065,9 +1029,6 @@ impl Global {
size: *copy_size,
});
}
let encoder = &mut cmd_buf_data.encoder;
let tracker = &mut cmd_buf_data.trackers;
let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions;
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
log::trace!("Ignoring copy_texture_to_texture of size 0");
@ -1117,9 +1078,7 @@ impl Global {
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_src_texture_init(
encoder,
tracker,
texture_memory_actions,
&mut cmd_buf_data,
device,
source,
copy_size,
@ -1127,9 +1086,7 @@ impl Global {
&snatch_guard,
)?;
handle_dst_texture_init(
encoder,
tracker,
texture_memory_actions,
&mut cmd_buf_data,
device,
destination,
copy_size,

View File

@ -6,7 +6,8 @@ use crate::{
self, BindGroupEntry, BindingResource, BufferBinding, ResolvedBindGroupDescriptor,
ResolvedBindGroupEntry, ResolvedBindingResource, ResolvedBufferBinding,
},
command, conv,
command::{self, CommandBuffer},
conv,
device::{bgl, life::WaitIdleError, DeviceError, DeviceLostClosure, DeviceLostReason},
global::Global,
hal_api::HalApi,
@ -1012,9 +1013,9 @@ impl Global {
id_in.map(|id| id.into_command_buffer_id()),
);
let error = 'error: {
let device = self.hub.devices.strict_get(device_id);
let device = self.hub.devices.strict_get(device_id);
let error = 'error: {
let command_buffer = match device.create_command_encoder(&desc.label) {
Ok(command_buffer) => command_buffer,
Err(e) => break 'error e,
@ -1025,7 +1026,7 @@ impl Global {
return (id.into_command_encoder_id(), None);
};
let id = fid.assign_error();
let id = fid.assign(Arc::new(CommandBuffer::new_invalid(&device, &desc.label)));
(id.into_command_encoder_id(), Some(error))
}
@ -1035,12 +1036,9 @@ impl Global {
let hub = &self.hub;
if let Some(cmd_buf) = hub
let _cmd_buf = hub
.command_buffers
.unregister(command_encoder_id.into_command_buffer_id())
{
cmd_buf.data.lock().as_mut().unwrap().encoder.discard();
}
.strict_unregister(command_encoder_id.into_command_buffer_id());
}
pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) {

View File

@ -4,7 +4,8 @@ use crate::{
api_log,
command::{
extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
ClearError, CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError,
ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide,
ImageCopyTexture, TransferError,
},
conv,
device::{DeviceError, WaitIdleError},
@ -353,6 +354,10 @@ pub enum QueueSubmitError {
SurfaceUnconfigured,
#[error("GPU got stuck :(")]
StuckGpu,
#[error(transparent)]
InvalidResource(#[from] InvalidResourceError),
#[error(transparent)]
CommandEncoder(#[from] CommandEncoderError),
}
//TODO: move out common parts of write_xxx.
@ -1050,105 +1055,69 @@ impl Global {
let mut submit_surface_textures_owned = FastHashMap::default();
{
let mut command_buffer_guard = hub.command_buffers.write();
let command_buffer_guard = hub.command_buffers.read();
if !command_buffer_ids.is_empty() {
profiling::scope!("prepare");
let mut first_error = None;
//TODO: if multiple command buffers are submitted, we can re-use the last
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
for command_buffer_id in command_buffer_ids {
profiling::scope!("process command buffer");
// we reset the used surface textures every time we use
// it, so make sure to set_size on it.
used_surface_textures.set_size(device.tracker_indices.textures.size());
let command_buffer = command_buffer_guard.strict_get(*command_buffer_id);
// Note that we are required to invalidate all command buffers in both the success and failure paths.
// This is why we `continue` and don't early return via `?`.
#[allow(unused_mut)]
let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) {
Ok(cmdbuf) => cmdbuf,
Err(_) => continue,
};
let mut cmd_buf_data = command_buffer.try_take();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(Action::Submit(
submit_index,
cmdbuf
.data
.lock()
.as_mut()
.unwrap()
.commands
.take()
.unwrap(),
));
if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
trace.add(Action::Submit(
submit_index,
cmd_buf_data.commands.take().unwrap(),
));
}
}
cmdbuf.same_device_as(queue.as_ref())?;
let mut baked = match cmd_buf_data {
Ok(cmd_buf_data) => {
let res = validate_command_buffer(
&command_buffer,
&queue,
&cmd_buf_data,
&snatch_guard,
&mut submit_surface_textures_owned,
&mut used_surface_textures,
);
if let Err(err) = res {
first_error.get_or_insert(err);
cmd_buf_data.destroy(&command_buffer.device);
continue;
}
cmd_buf_data.into_baked_commands()
}
Err(err) => {
first_error.get_or_insert(err.into());
continue;
}
};
if !cmdbuf.is_finished() {
let cmdbuf = Arc::into_inner(cmdbuf).expect(
"Command buffer cannot be destroyed because is still in use",
);
device.destroy_command_buffer(cmdbuf);
if first_error.is_some() {
continue;
}
{
profiling::scope!("check resource state");
let cmd_buf_data = cmdbuf.data.lock();
let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers;
// update submission IDs
{
profiling::scope!("buffers");
for buffer in cmd_buf_trackers.buffers.used_resources() {
buffer.check_destroyed(&snatch_guard)?;
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
_ => {
return Err(QueueSubmitError::BufferStillMapped(
buffer.error_ident(),
))
}
}
}
}
{
profiling::scope!("textures");
for texture in cmd_buf_trackers.textures.used_resources() {
let should_extend = match texture.try_inner(&snatch_guard)? {
TextureInner::Native { .. } => false,
TextureInner::Surface { .. } => {
// Compare the Arcs by pointer as Textures don't implement Eq.
submit_surface_textures_owned
.insert(Arc::as_ptr(&texture), texture.clone());
true
}
};
if should_extend {
unsafe {
used_surface_textures
.merge_single(
&texture,
None,
hal::TextureUses::PRESENT,
)
.unwrap();
};
}
}
}
}
let mut baked = cmdbuf.from_arc_into_baked();
// execute resource transitions
unsafe {
baked.encoder.begin_encoding(hal_label(
@ -1209,6 +1178,10 @@ impl Global {
pending_textures: FastHashMap::default(),
});
}
if let Some(first_error) = first_error {
return Err(first_error);
}
}
}
@ -1340,3 +1313,54 @@ impl Global {
queue.device.lock_life().add_work_done_closure(closure);
}
}
fn validate_command_buffer(
command_buffer: &CommandBuffer,
queue: &Queue,
cmd_buf_data: &crate::command::CommandBufferMutable,
snatch_guard: &crate::snatch::SnatchGuard<'_>,
submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
used_surface_textures: &mut track::TextureUsageScope,
) -> Result<(), QueueSubmitError> {
command_buffer.same_device_as(queue)?;
cmd_buf_data.check_finished()?;
{
profiling::scope!("check resource state");
{
profiling::scope!("buffers");
for buffer in cmd_buf_data.trackers.buffers.used_resources() {
buffer.check_destroyed(snatch_guard)?;
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
_ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
}
}
}
{
profiling::scope!("textures");
for texture in cmd_buf_data.trackers.textures.used_resources() {
let should_extend = match texture.try_inner(snatch_guard)? {
TextureInner::Native { .. } => false,
TextureInner::Surface { .. } => {
// Compare the Arcs by pointer as Textures don't implement Eq.
submit_surface_textures_owned
.insert(Arc::as_ptr(&texture), texture.clone());
true
}
};
if should_extend {
unsafe {
used_surface_textures
.merge_single(&texture, None, hal::TextureUses::PRESENT)
.unwrap();
};
}
}
}
}
Ok(())
}

View File

@ -3630,16 +3630,6 @@ impl Device {
}
impl Device {
pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) {
let mut baked = cmd_buf.extract_baked_commands();
unsafe {
baked.encoder.reset_all(baked.list);
}
unsafe {
self.raw().destroy_command_encoder(baked.encoder);
}
}
/// Wait for idle and remove resources that we can, before we die.
pub(crate) fn prepare_to_die(&self) {
self.pending_writes.lock().deactivate();

View File

@ -78,6 +78,15 @@ impl<T> Mutex<T> {
}
}
impl<'a, T> MutexGuard<'a, T> {
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<parking_lot::MappedMutexGuard<'a, U>, ()>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
parking_lot::MutexGuard::try_map(s.inner, f).map_err(|_| ())
}
}
impl<'a, T> std::ops::Deref for MutexGuard<'a, T> {
type Target = T;

View File

@ -30,6 +30,15 @@ impl<T> Mutex<T> {
}
}
impl<'a, T> MutexGuard<'a, T> {
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<parking_lot::MappedMutexGuard<'a, U>, ()>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
parking_lot::MutexGuard::try_map(s.0, f).map_err(|_| ())
}
}
impl<'a, T> std::ops::Deref for MutexGuard<'a, T> {
type Target = T;

View File

@ -4,7 +4,7 @@ use crate::{
id::Id,
identity::IdentityManager,
lock::{rank, RwLock, RwLockReadGuard, RwLockWriteGuard},
storage::{Element, InvalidId, Storage, StorageItem},
storage::{Element, Storage, StorageItem},
};
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
@ -12,7 +12,6 @@ pub struct RegistryReport {
pub num_allocated: usize,
pub num_kept_from_user: usize,
pub num_released_from_user: usize,
pub num_error: usize,
pub element_size: usize,
}
@ -73,11 +72,6 @@ impl<T: StorageItem> FutureId<'_, T> {
data.insert(self.id, value);
self.id
}
pub fn assign_error(self) -> Id<T::Marker> {
self.data.write().insert_error(self.id);
self.id
}
}
impl<T: StorageItem> Registry<T> {
@ -106,15 +100,6 @@ impl<T: StorageItem> Registry<T> {
pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage<T>> {
self.storage.write()
}
pub(crate) fn unregister(&self, id: Id<T::Marker>) -> Option<T> {
let value = self.storage.write().remove(id);
// This needs to happen *after* removing it from the storage, to maintain the
// invariant that `self.identity` only contains ids which are actually available
// See https://github.com/gfx-rs/wgpu/issues/5372
self.identity.free(id);
//Returning None is legal if it's an error ID
value
}
pub(crate) fn strict_unregister(&self, id: Id<T::Marker>) -> T {
let value = self.storage.write().strict_remove(id);
// This needs to happen *after* removing it from the storage, to maintain the
@ -136,7 +121,6 @@ impl<T: StorageItem> Registry<T> {
match *element {
Element::Occupied(..) => report.num_kept_from_user += 1,
Element::Vacant => report.num_released_from_user += 1,
Element::Error(_) => report.num_error += 1,
}
}
report
@ -144,10 +128,6 @@ impl<T: StorageItem> Registry<T> {
}
impl<T: StorageItem + Clone> Registry<T> {
pub(crate) fn get(&self, id: Id<T::Marker>) -> Result<T, InvalidId> {
self.read().get_owned(id)
}
pub(crate) fn strict_get(&self, id: Id<T::Marker>) -> T {
self.read().strict_get(id)
}
@ -181,7 +161,7 @@ mod tests {
let value = Arc::new(TestData);
let new_id = registry.prepare(wgt::Backend::Empty, None);
let id = new_id.assign(value);
registry.unregister(id);
registry.strict_unregister(id);
}
});
}

View File

@ -1383,9 +1383,10 @@ impl Global {
let hub = &self.hub;
if let Ok(cmd_buf) = hub.command_buffers.get(id.into_command_buffer_id()) {
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let cmd_buf = hub.command_buffers.strict_get(id.into_command_buffer_id());
let cmd_buf_data = cmd_buf.try_get();
if let Ok(mut cmd_buf_data) = cmd_buf_data {
let cmd_buf_raw = cmd_buf_data
.encoder
.open(&cmd_buf.device)

View File

@ -18,15 +18,8 @@ where
/// There is one live id with this index, allocated at the given
/// epoch.
Occupied(T, Epoch),
/// Like `Occupied`, but an error occurred when creating the
/// resource.
Error(Epoch),
}
#[derive(Clone, Debug)]
pub(crate) struct InvalidId;
pub(crate) trait StorageItem: ResourceType {
type Marker: Marker;
}
@ -81,23 +74,6 @@ impl<T> Storage<T>
where
T: StorageItem,
{
/// Get a reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get(&self, id: Id<T::Marker>) -> Result<&T, InvalidId> {
let (index, epoch, _) = id.unzip();
let (result, storage_epoch) = match self.map.get(index as usize) {
Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch),
None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id),
Some(&Element::Error(epoch)) => (Err(InvalidId), epoch),
};
assert_eq!(
epoch, storage_epoch,
"{}[{:?}] is no longer alive",
self.kind, id
);
result
}
fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element<T>) {
if index >= self.map.len() {
self.map.resize_with(index + 1, || Element::Vacant);
@ -112,14 +88,6 @@ where
T::TYPE
);
}
Element::Error(storage_epoch) => {
assert_ne!(
epoch,
storage_epoch,
"Index {index:?} of {} is already occupied with Error",
T::TYPE
);
}
}
}
@ -128,35 +96,6 @@ where
self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch))
}
pub(crate) fn insert_error(&mut self, id: Id<T::Marker>) {
let (index, epoch, _) = id.unzip();
self.insert_impl(index as usize, epoch, Element::Error(epoch))
}
pub(crate) fn replace_with_error(&mut self, id: Id<T::Marker>) -> Result<T, InvalidId> {
let (index, epoch, _) = id.unzip();
match std::mem::replace(&mut self.map[index as usize], Element::Error(epoch)) {
Element::Vacant => panic!("Cannot access vacant resource"),
Element::Occupied(value, storage_epoch) => {
assert_eq!(epoch, storage_epoch);
Ok(value)
}
_ => Err(InvalidId),
}
}
pub(crate) fn remove(&mut self, id: Id<T::Marker>) -> Option<T> {
let (index, epoch, _) = id.unzip();
match std::mem::replace(&mut self.map[index as usize], Element::Vacant) {
Element::Occupied(value, storage_epoch) => {
assert_eq!(epoch, storage_epoch);
Some(value)
}
Element::Error(_) => None,
Element::Vacant => panic!("Cannot remove a vacant resource"),
}
}
pub(crate) fn strict_remove(&mut self, id: Id<T::Marker>) -> T {
let (index, epoch, _) = id.unzip();
match std::mem::replace(&mut self.map[index as usize], Element::Vacant) {
@ -164,7 +103,6 @@ where
assert_eq!(epoch, storage_epoch);
value
}
Element::Error(_) => unreachable!(),
Element::Vacant => panic!("Cannot remove a vacant resource"),
}
}
@ -190,12 +128,6 @@ impl<T> Storage<T>
where
T: StorageItem + Clone,
{
/// Get an owned reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get_owned(&self, id: Id<T::Marker>) -> Result<T, InvalidId> {
Ok(self.get(id)?.clone())
}
/// Get an owned reference to an item.
/// Panics if there is an epoch mismatch, the entry is empty or in error.
pub(crate) fn strict_get(&self, id: Id<T::Marker>) -> T {
@ -203,7 +135,6 @@ where
let (result, storage_epoch) = match self.map.get(index as usize) {
Some(&Element::Occupied(ref v, epoch)) => (v.clone(), epoch),
None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id),
Some(&Element::Error(_)) => unreachable!(),
};
assert_eq!(
epoch, storage_epoch,