Make compute pass end consume the pass (#5575)

* rename `command_encoder_run_*_pass` to `*_pass_end` and make it a method of compute/render pass instead of encoder

* executing a compute pass consumes it now such that it can't be executed again

* use handle_error instead of handle_error_nolabel for wgpu compute pass

* use handle_error instead of handle_error_nolabel for render_pass_end

* changelog addition

* feat: `compute_pass_set_push_constant`: move panics to error variants

Co-Authored-By: Erich Gubler <erichdongubler@gmail.com>

---------

Co-authored-by: Erich Gubler <erichdongubler@gmail.com>
This commit is contained in:
Andreas Reich 2024-05-25 18:54:48 +02:00 committed by GitHub
parent aaefc7c10d
commit 2fd09945cd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 498 additions and 352 deletions

View File

@ -47,7 +47,7 @@ TODO(wumpf): This is still work in progress. Should write a bit more about it. A
`wgpu::ComputePass` recording methods (e.g. `wgpu::ComputePass:set_render_pipeline`) no longer impose a lifetime constraint passed in resources.
By @wumpf in [#5569](https://github.com/gfx-rs/wgpu/pull/5569).
By @wumpf in [#5569](https://github.com/gfx-rs/wgpu/pull/5569), [#5575](https://github.com/gfx-rs/wgpu/pull/5575).
#### Querying shader compilation errors

View File

@ -57,7 +57,7 @@ pub fn op_webgpu_compute_pass_dispatch_workgroups(
compute_pass_resource
.0
.borrow_mut()
.dispatch_workgroups(state.borrow(), x, y, z);
.dispatch_workgroups(state.borrow(), x, y, z)?;
Ok(WebGpuResult::empty())
}
@ -95,7 +95,7 @@ pub fn op_webgpu_compute_pass_end(
.resource_table
.take::<WebGpuComputePass>(compute_pass_rid)?;
compute_pass_resource.0.borrow_mut().run(state.borrow())?;
compute_pass_resource.0.borrow_mut().end(state.borrow())?;
Ok(WebGpuResult::empty())
}
@ -152,7 +152,7 @@ pub fn op_webgpu_compute_pass_push_debug_group(
state.borrow(),
group_label,
0, // wgpu#975
);
)?;
Ok(WebGpuResult::empty())
}
@ -170,7 +170,7 @@ pub fn op_webgpu_compute_pass_pop_debug_group(
compute_pass_resource
.0
.borrow_mut()
.pop_debug_group(state.borrow());
.pop_debug_group(state.borrow())?;
Ok(WebGpuResult::empty())
}
@ -190,7 +190,7 @@ pub fn op_webgpu_compute_pass_insert_debug_marker(
state.borrow(),
marker_label,
0, // wgpu#975
);
)?;
Ok(WebGpuResult::empty())
}

View File

@ -186,21 +186,16 @@ pub fn op_webgpu_render_pass_execute_bundles(
#[serde]
pub fn op_webgpu_render_pass_end(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] render_pass_rid: ResourceId,
) -> Result<WebGpuResult, AnyError> {
let command_encoder_resource =
state
.resource_table
.get::<super::command_encoder::WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let render_pass_resource = state
.resource_table
.take::<WebGpuRenderPass>(render_pass_rid)?;
let render_pass = &render_pass_resource.0.borrow();
let command_encoder = render_pass.parent_id();
let instance = state.borrow::<super::Instance>();
gfx_ok!(command_encoder => instance.command_encoder_run_render_pass(command_encoder, render_pass))
gfx_ok!(command_encoder => instance.render_pass_end(render_pass))
}
#[op2]

View File

@ -99,9 +99,9 @@ impl GlobalPlay for wgc::global::Global {
base,
timestamp_writes,
} => {
self.command_encoder_run_compute_pass_with_unresolved_commands::<A>(
self.compute_pass_end_with_unresolved_commands::<A>(
encoder,
base.as_ref(),
base,
timestamp_writes.as_ref(),
)
.unwrap();
@ -113,7 +113,7 @@ impl GlobalPlay for wgc::global::Global {
timestamp_writes,
occlusion_query_set_id,
} => {
self.command_encoder_run_render_pass_impl::<A>(
self.render_pass_end_impl::<A>(
encoder,
base.as_ref(),
&target_colors,

View File

@ -888,7 +888,7 @@ unsafe impl<A: HalApi> Sync for RenderBundle<A> {}
impl<A: HalApi> RenderBundle<A> {
/// Actually encode the contents into a native command buffer.
///
/// This is partially duplicating the logic of `command_encoder_run_render_pass`.
/// This is partially duplicating the logic of `render_pass_end`.
/// However the point of this function is to be lighter, since we already had
/// a chance to go through the commands in `render_bundle_encoder_finish`.
///

View File

@ -5,8 +5,8 @@ use crate::{
compute_command::{ArcComputeCommand, ComputeCommand},
end_pipeline_statistics_query,
memory_init::{fixup_discarded_surfaces, SurfacesInDiscardState},
BasePass, BasePassRef, BindGroupStateChange, CommandBuffer, CommandEncoderError,
CommandEncoderStatus, MapPassErr, PassErrorScope, QueryUseError, StateChange,
BasePass, BindGroupStateChange, CommandBuffer, CommandEncoderError, CommandEncoderStatus,
MapPassErr, PassErrorScope, QueryUseError, StateChange,
},
device::{DeviceError, MissingDownlevelFlags, MissingFeatures},
error::{ErrorFormatter, PrettyError},
@ -35,7 +35,12 @@ use std::sync::Arc;
use std::{fmt, mem, str};
pub struct ComputePass<A: HalApi> {
base: BasePass<ArcComputeCommand<A>>,
/// All pass data & records is stored here.
///
/// If this is `None`, the pass has been ended and can no longer be used.
/// Any attempt to record more commands will result in a validation error.
base: Option<BasePass<ArcComputeCommand<A>>>,
parent_id: id::CommandEncoderId,
timestamp_writes: Option<ComputePassTimestampWrites>,
@ -47,7 +52,7 @@ pub struct ComputePass<A: HalApi> {
impl<A: HalApi> ComputePass<A> {
fn new(parent_id: id::CommandEncoderId, desc: &ComputePassDescriptor) -> Self {
Self {
base: BasePass::<ArcComputeCommand<A>>::new(&desc.label),
base: Some(BasePass::<ArcComputeCommand<A>>::new(&desc.label)),
parent_id,
timestamp_writes: desc.timestamp_writes.cloned(),
@ -56,9 +61,25 @@ impl<A: HalApi> ComputePass<A> {
}
}
#[inline]
pub fn parent_id(&self) -> id::CommandEncoderId {
self.parent_id
}
#[inline]
pub fn label(&self) -> Option<&str> {
self.base.as_ref().and_then(|base| base.label.as_deref())
}
fn base_mut<'a>(
&'a mut self,
scope: PassErrorScope,
) -> Result<&'a mut BasePass<ArcComputeCommand<A>>, ComputePassError> {
self.base
.as_mut()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)
}
}
impl<A: HalApi> fmt::Debug for ComputePass<A> {
@ -140,12 +161,20 @@ pub enum ComputePassErrorInner {
Bind(#[from] BindError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
#[error("Push constant offset must be aligned to 4 bytes")]
PushConstantOffsetAlignment,
#[error("Push constant size must be aligned to 4 bytes")]
PushConstantSizeAlignment,
#[error("Ran out of push constant space. Don't set 4gb of push constants per ComputePass.")]
PushConstantOutOfMemory,
#[error(transparent)]
QueryUse(#[from] QueryUseError),
#[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
#[error("The compute pass has already been ended and no further commands can be recorded")]
PassEnded,
}
impl PrettyError for ComputePassErrorInner {
@ -279,32 +308,31 @@ impl Global {
Box::new(ComputePass::<A>::new(parent_id, desc))
}
pub fn command_encoder_run_compute_pass<A: HalApi>(
pub fn compute_pass_end<A: HalApi>(
&self,
pass: &ComputePass<A>,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
self.command_encoder_run_compute_pass_impl(
pass.parent_id,
pass.base.as_ref(),
pass.timestamp_writes.as_ref(),
)
let base = pass.base.take().ok_or(ComputePassError {
scope: PassErrorScope::Pass(pass.parent_id),
inner: ComputePassErrorInner::PassEnded,
})?;
self.compute_pass_end_impl(pass.parent_id, base, pass.timestamp_writes.as_ref())
}
#[doc(hidden)]
pub fn command_encoder_run_compute_pass_with_unresolved_commands<A: HalApi>(
pub fn compute_pass_end_with_unresolved_commands<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<ComputeCommand>,
base: BasePass<ComputeCommand>,
timestamp_writes: Option<&ComputePassTimestampWrites>,
) -> Result<(), ComputePassError> {
let resolved_commands =
ComputeCommand::resolve_compute_command_ids(A::hub(self), base.commands)?;
let commands = ComputeCommand::resolve_compute_command_ids(A::hub(self), &base.commands)?;
self.command_encoder_run_compute_pass_impl::<A>(
self.compute_pass_end_impl::<A>(
encoder_id,
BasePassRef {
BasePass {
label: base.label,
commands: &resolved_commands,
commands,
dynamic_offsets: base.dynamic_offsets,
string_data: base.string_data,
push_constant_data: base.push_constant_data,
@ -313,10 +341,10 @@ impl Global {
)
}
fn command_encoder_run_compute_pass_impl<A: HalApi>(
fn compute_pass_end_impl<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<ArcComputeCommand<A>>,
base: BasePass<ArcComputeCommand<A>>,
timestamp_writes: Option<&ComputePassTimestampWrites>,
) -> Result<(), ComputePassError> {
profiling::scope!("CommandEncoder::run_compute_pass");
@ -341,7 +369,7 @@ impl Global {
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(crate::device::trace::Command::RunComputePass {
base: BasePass {
label: base.label.map(str::to_string),
label: base.label.clone(),
commands: base.commands.iter().map(Into::into).collect(),
dynamic_offsets: base.dynamic_offsets.to_vec(),
string_data: base.string_data.to_vec(),
@ -429,7 +457,7 @@ impl Global {
.flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS);
let hal_desc = hal::ComputePassDescriptor {
label: hal_label(base.label, self.instance.flags),
label: hal_label(base.label.as_deref(), self.instance.flags),
timestamp_writes,
};
@ -455,9 +483,9 @@ impl Global {
let scope = PassErrorScope::SetBindGroup(bind_group.as_info().id());
let max_bind_groups = cmd_buf.limits.max_bind_groups;
if index >= &max_bind_groups {
if index >= max_bind_groups {
return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
index: *index,
index,
max: max_bind_groups,
})
.map_pass_err(scope);
@ -470,9 +498,9 @@ impl Global {
);
dynamic_offset_count += num_dynamic_offsets;
let bind_group = tracker.bind_groups.insert_single(bind_group.clone());
let bind_group = tracker.bind_groups.insert_single(bind_group);
bind_group
.validate_dynamic_bindings(*index, &temp_offsets, &cmd_buf.limits)
.validate_dynamic_bindings(index, &temp_offsets, &cmd_buf.limits)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(
@ -494,7 +522,7 @@ impl Global {
let entries =
state
.binder
.assign_group(*index as usize, bind_group, &temp_offsets);
.assign_group(index as usize, bind_group, &temp_offsets);
if !entries.is_empty() && pipeline_layout.is_some() {
let pipeline_layout = pipeline_layout.as_ref().unwrap().raw();
for (i, e) in entries.iter().enumerate() {
@ -521,7 +549,7 @@ impl Global {
state.pipeline = Some(pipeline_id);
tracker.compute_pipelines.insert_single(pipeline.clone());
let pipeline = tracker.compute_pipelines.insert_single(pipeline);
unsafe {
raw.set_compute_pipeline(pipeline.raw());
@ -592,7 +620,7 @@ impl Global {
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice =
&base.push_constant_data[(*values_offset as usize)..values_end_offset];
&base.push_constant_data[(values_offset as usize)..values_end_offset];
let pipeline_layout = state
.binder
@ -607,7 +635,7 @@ impl Global {
pipeline_layout
.validate_push_constant_ranges(
wgt::ShaderStages::COMPUTE,
*offset,
offset,
end_offset_bytes,
)
.map_pass_err(scope)?;
@ -616,7 +644,7 @@ impl Global {
raw.set_push_constants(
pipeline_layout.raw(),
wgt::ShaderStages::COMPUTE,
*offset,
offset,
data_slice,
);
}
@ -640,7 +668,7 @@ impl Global {
{
return Err(ComputePassErrorInner::Dispatch(
DispatchError::InvalidGroupSize {
current: *groups,
current: groups,
limit: groups_size_limit,
},
))
@ -648,7 +676,7 @@ impl Global {
}
unsafe {
raw.dispatch(*groups);
raw.dispatch(groups);
}
}
ArcComputeCommand::DispatchIndirect { buffer, offset } => {
@ -675,7 +703,7 @@ impl Global {
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > buffer.size {
return Err(ComputePassErrorInner::IndirectBufferOverrun {
offset: *offset,
offset,
end_offset,
buffer_size: buffer.size,
})
@ -692,8 +720,8 @@ impl Global {
buffer_memory_init_actions.extend(
buffer.initialization_status.read().create_action(
buffer,
*offset..(*offset + stride),
&buffer,
offset..(offset + stride),
MemoryInitKind::NeedsInitializedMemory,
),
);
@ -707,7 +735,7 @@ impl Global {
)
.map_pass_err(scope)?;
unsafe {
raw.dispatch_indirect(buf_raw, *offset);
raw.dispatch_indirect(buf_raw, offset);
}
}
ArcComputeCommand::PushDebugGroup { color: _, len } => {
@ -756,10 +784,10 @@ impl Global {
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES)
.map_pass_err(scope)?;
let query_set = tracker.query_sets.insert_single(query_set.clone());
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_write_timestamp(raw, query_set_id, *query_index, None)
.validate_and_write_timestamp(raw, query_set_id, query_index, None)
.map_pass_err(scope)?;
}
ArcComputeCommand::BeginPipelineStatisticsQuery {
@ -769,13 +797,13 @@ impl Global {
let query_set_id = query_set.as_info().id();
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = tracker.query_sets.insert_single(query_set.clone());
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_begin_pipeline_statistics_query(
raw,
query_set_id,
*query_index,
query_index,
None,
&mut active_query,
)
@ -834,10 +862,17 @@ impl Global {
bind_group_id: id::BindGroupId,
offsets: &[DynamicOffset],
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::SetBindGroup(bind_group_id);
let base = pass
.base
.as_mut()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?; // Can't use base_mut() utility here because of borrow checker.
let redundant = pass.current_bind_groups.set_and_check_redundant(
bind_group_id,
index,
&mut pass.base.dynamic_offsets,
&mut base.dynamic_offsets,
offsets,
);
@ -850,13 +885,11 @@ impl Global {
.bind_groups
.read()
.get(bind_group_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::SetBindGroup(bind_group_id),
inner: ComputePassErrorInner::InvalidBindGroup(index),
})?
.map_err(|_| ComputePassErrorInner::InvalidBindGroup(index))
.map_pass_err(scope)?
.clone();
pass.base.commands.push(ArcComputeCommand::SetBindGroup {
base.commands.push(ArcComputeCommand::SetBindGroup {
index,
num_dynamic_offsets: offsets.len(),
bind_group,
@ -870,7 +903,13 @@ impl Global {
pass: &mut ComputePass<A>,
pipeline_id: id::ComputePipelineId,
) -> Result<(), ComputePassError> {
if pass.current_pipeline.set_and_check_redundant(pipeline_id) {
let redundant = pass.current_pipeline.set_and_check_redundant(pipeline_id);
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
let base = pass.base_mut(scope)?;
if redundant {
// Do redundant early-out **after** checking whether the pass is ended or not.
return Ok(());
}
@ -879,15 +918,11 @@ impl Global {
.compute_pipelines
.read()
.get(pipeline_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::SetPipelineCompute(pipeline_id),
inner: ComputePassErrorInner::InvalidPipeline(pipeline_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
.push(ArcComputeCommand::SetPipeline(pipeline));
base.commands.push(ArcComputeCommand::SetPipeline(pipeline));
Ok(())
}
@ -897,33 +932,36 @@ impl Global {
pass: &mut ComputePass<A>,
offset: u32,
data: &[u8],
) {
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant offset must be aligned to 4 bytes."
);
assert_eq!(
data.len() as u32 & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant size must be aligned to 4 bytes."
);
let value_offset = pass.base.push_constant_data.len().try_into().expect(
"Ran out of push constant space. Don't set 4gb of push constants per ComputePass.",
); // TODO: make this an error that can be handled
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::SetPushConstant;
let base = pass.base_mut(scope)?;
pass.base.push_constant_data.extend(
if offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1) != 0 {
return Err(ComputePassErrorInner::PushConstantOffsetAlignment).map_pass_err(scope);
}
if data.len() as u32 & (wgt::PUSH_CONSTANT_ALIGNMENT - 1) != 0 {
return Err(ComputePassErrorInner::PushConstantSizeAlignment).map_pass_err(scope);
}
let value_offset = base
.push_constant_data
.len()
.try_into()
.map_err(|_| ComputePassErrorInner::PushConstantOutOfMemory)
.map_pass_err(scope)?;
base.push_constant_data.extend(
data.chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
.map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
);
pass.base
.commands
.push(ArcComputeCommand::<A>::SetPushConstant {
offset,
size_bytes: data.len() as u32,
values_offset: value_offset,
});
base.commands.push(ArcComputeCommand::<A>::SetPushConstant {
offset,
size_bytes: data.len() as u32,
values_offset: value_offset,
});
Ok(())
}
pub fn compute_pass_dispatch_workgroups<A: HalApi>(
@ -932,10 +970,18 @@ impl Global {
groups_x: u32,
groups_y: u32,
groups_z: u32,
) {
pass.base.commands.push(ArcComputeCommand::<A>::Dispatch([
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::Dispatch {
indirect: false,
pipeline: pass.current_pipeline.last_state,
};
let base = pass.base_mut(scope)?;
base.commands.push(ArcComputeCommand::<A>::Dispatch([
groups_x, groups_y, groups_z,
]));
Ok(())
}
pub fn compute_pass_dispatch_workgroups_indirect<A: HalApi>(
@ -945,21 +991,21 @@ impl Global {
offset: BufferAddress,
) -> Result<(), ComputePassError> {
let hub = A::hub(self);
let scope = PassErrorScope::Dispatch {
indirect: true,
pipeline: pass.current_pipeline.last_state,
};
let base = pass.base_mut(scope)?;
let buffer = hub
.buffers
.read()
.get(buffer_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::Dispatch {
indirect: true,
pipeline: pass.current_pipeline.last_state,
},
inner: ComputePassErrorInner::InvalidBuffer(buffer_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidBuffer(buffer_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
base.commands
.push(ArcComputeCommand::<A>::DispatchIndirect { buffer, offset });
Ok(())
@ -970,22 +1016,29 @@ impl Global {
pass: &mut ComputePass<A>,
label: &str,
color: u32,
) {
let bytes = label.as_bytes();
pass.base.string_data.extend_from_slice(bytes);
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::PushDebugGroup)?;
pass.base
.commands
.push(ArcComputeCommand::<A>::PushDebugGroup {
color,
len: bytes.len(),
});
let bytes = label.as_bytes();
base.string_data.extend_from_slice(bytes);
base.commands.push(ArcComputeCommand::<A>::PushDebugGroup {
color,
len: bytes.len(),
});
Ok(())
}
pub fn compute_pass_pop_debug_group<A: HalApi>(&self, pass: &mut ComputePass<A>) {
pass.base
.commands
.push(ArcComputeCommand::<A>::PopDebugGroup);
pub fn compute_pass_pop_debug_group<A: HalApi>(
&self,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::PopDebugGroup)?;
base.commands.push(ArcComputeCommand::<A>::PopDebugGroup);
Ok(())
}
pub fn compute_pass_insert_debug_marker<A: HalApi>(
@ -993,16 +1046,19 @@ impl Global {
pass: &mut ComputePass<A>,
label: &str,
color: u32,
) {
let bytes = label.as_bytes();
pass.base.string_data.extend_from_slice(bytes);
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::InsertDebugMarker)?;
pass.base
.commands
let bytes = label.as_bytes();
base.string_data.extend_from_slice(bytes);
base.commands
.push(ArcComputeCommand::<A>::InsertDebugMarker {
color,
len: bytes.len(),
});
Ok(())
}
pub fn compute_pass_write_timestamp<A: HalApi>(
@ -1011,18 +1067,19 @@ impl Global {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::WriteTimestamp;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::WriteTimestamp,
inner: ComputePassErrorInner::InvalidQuerySet(query_set_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
pass.base.commands.push(ArcComputeCommand::WriteTimestamp {
base.commands.push(ArcComputeCommand::WriteTimestamp {
query_set,
query_index,
});
@ -1036,19 +1093,19 @@ impl Global {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::WriteTimestamp,
inner: ComputePassErrorInner::InvalidQuerySet(query_set_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
base.commands
.push(ArcComputeCommand::BeginPipelineStatisticsQuery {
query_set,
query_index,
@ -1057,9 +1114,15 @@ impl Global {
Ok(())
}
pub fn compute_pass_end_pipeline_statistics_query<A: HalApi>(&self, pass: &mut ComputePass<A>) {
pass.base
.commands
pub fn compute_pass_end_pipeline_statistics_query<A: HalApi>(
&self,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::EndPipelineStatisticsQuery;
let base = pass.base_mut(scope)?;
base.commands
.push(ArcComputeCommand::<A>::EndPipelineStatisticsQuery);
Ok(())
}
}

View File

@ -9,7 +9,6 @@ use super::{ComputePass, ComputePassError};
// Practically speaking this allows us merge gfx_select with type erasure:
// The alternative would be to introduce ComputePassId which then first needs to be looked up and then dispatch via gfx_select.
pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
fn run(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn set_bind_group(
&mut self,
context: &global::Global,
@ -22,23 +21,38 @@ pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
context: &global::Global,
pipeline_id: id::ComputePipelineId,
) -> Result<(), ComputePassError>;
fn set_push_constant(&mut self, context: &global::Global, offset: u32, data: &[u8]);
fn set_push_constant(
&mut self,
context: &global::Global,
offset: u32,
data: &[u8],
) -> Result<(), ComputePassError>;
fn dispatch_workgroups(
&mut self,
context: &global::Global,
groups_x: u32,
groups_y: u32,
groups_z: u32,
);
) -> Result<(), ComputePassError>;
fn dispatch_workgroups_indirect(
&mut self,
context: &global::Global,
buffer_id: id::BufferId,
offset: wgt::BufferAddress,
) -> Result<(), ComputePassError>;
fn push_debug_group(&mut self, context: &global::Global, label: &str, color: u32);
fn pop_debug_group(&mut self, context: &global::Global);
fn insert_debug_marker(&mut self, context: &global::Global, label: &str, color: u32);
fn push_debug_group(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError>;
fn pop_debug_group(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn insert_debug_marker(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError>;
fn write_timestamp(
&mut self,
context: &global::Global,
@ -51,14 +65,16 @@ pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError>;
fn end_pipeline_statistics_query(&mut self, context: &global::Global);
fn end_pipeline_statistics_query(
&mut self,
context: &global::Global,
) -> Result<(), ComputePassError>;
fn end(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn label(&self) -> Option<&str>;
}
impl<A: HalApi> DynComputePass for ComputePass<A> {
fn run(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.command_encoder_run_compute_pass(self)
}
fn set_bind_group(
&mut self,
context: &global::Global,
@ -77,7 +93,12 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_set_pipeline(self, pipeline_id)
}
fn set_push_constant(&mut self, context: &global::Global, offset: u32, data: &[u8]) {
fn set_push_constant(
&mut self,
context: &global::Global,
offset: u32,
data: &[u8],
) -> Result<(), ComputePassError> {
context.compute_pass_set_push_constant(self, offset, data)
}
@ -87,7 +108,7 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
groups_x: u32,
groups_y: u32,
groups_z: u32,
) {
) -> Result<(), ComputePassError> {
context.compute_pass_dispatch_workgroups(self, groups_x, groups_y, groups_z)
}
@ -100,15 +121,25 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_dispatch_workgroups_indirect(self, buffer_id, offset)
}
fn push_debug_group(&mut self, context: &global::Global, label: &str, color: u32) {
fn push_debug_group(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError> {
context.compute_pass_push_debug_group(self, label, color)
}
fn pop_debug_group(&mut self, context: &global::Global) {
fn pop_debug_group(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.compute_pass_pop_debug_group(self)
}
fn insert_debug_marker(&mut self, context: &global::Global, label: &str, color: u32) {
fn insert_debug_marker(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError> {
context.compute_pass_insert_debug_marker(self, label, color)
}
@ -130,7 +161,18 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_begin_pipeline_statistics_query(self, query_set_id, query_index)
}
fn end_pipeline_statistics_query(&mut self, context: &global::Global) {
fn end_pipeline_statistics_query(
&mut self,
context: &global::Global,
) -> Result<(), ComputePassError> {
context.compute_pass_end_pipeline_statistics_query(self)
}
fn end(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.compute_pass_end(self)
}
fn label(&self) -> Option<&str> {
self.label()
}
}

View File

@ -48,11 +48,11 @@ pub(crate) enum CommandEncoderStatus {
/// Ready to record commands. An encoder's initial state.
///
/// Command building methods like [`command_encoder_clear_buffer`] and
/// [`command_encoder_run_compute_pass`] require the encoder to be in this
/// [`compute_pass_end`] require the encoder to be in this
/// state.
///
/// [`command_encoder_clear_buffer`]: Global::command_encoder_clear_buffer
/// [`command_encoder_run_compute_pass`]: Global::command_encoder_run_compute_pass
/// [`compute_pass_end`]: Global::compute_pass_end
Recording,
/// Command recording is complete, and the buffer is ready for submission.
@ -847,8 +847,12 @@ pub enum PassErrorScope {
indirect: bool,
pipeline: Option<id::ComputePipelineId>,
},
#[error("In a push_debug_group command")]
PushDebugGroup,
#[error("In a pop_debug_group command")]
PopDebugGroup,
#[error("In a insert_debug_marker command")]
InsertDebugMarker,
}
impl PrettyError for PassErrorScope {

View File

@ -247,10 +247,16 @@ impl RenderPass {
}
}
#[inline]
pub fn parent_id(&self) -> id::CommandEncoderId {
self.parent_id
}
#[inline]
pub fn label(&self) -> Option<&str> {
self.base.label.as_deref()
}
#[cfg(feature = "trace")]
pub fn into_command(self) -> crate::device::trace::Command {
crate::device::trace::Command::RunRenderPass {
@ -1303,13 +1309,9 @@ impl<'a, 'd, A: HalApi> RenderPassInfo<'a, 'd, A> {
// Common routines between render/compute
impl Global {
pub fn command_encoder_run_render_pass<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
pass: &RenderPass,
) -> Result<(), RenderPassError> {
self.command_encoder_run_render_pass_impl::<A>(
encoder_id,
pub fn render_pass_end<A: HalApi>(&self, pass: &RenderPass) -> Result<(), RenderPassError> {
self.render_pass_end_impl::<A>(
pass.parent_id(),
pass.base.as_ref(),
&pass.color_targets,
pass.depth_stencil_target.as_ref(),
@ -1319,7 +1321,7 @@ impl Global {
}
#[doc(hidden)]
pub fn command_encoder_run_render_pass_impl<A: HalApi>(
pub fn render_pass_end_impl<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<RenderCommand>,

View File

@ -2559,16 +2559,6 @@ impl crate::context::Context for ContextWebGpu {
)
}
fn command_encoder_end_compute_pass(
&self,
_encoder: &Self::CommandEncoderId,
_encoder_data: &Self::CommandEncoderData,
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
pass_data.0.end();
}
fn command_encoder_begin_render_pass(
&self,
_encoder: &Self::CommandEncoderId,
@ -2667,16 +2657,6 @@ impl crate::context::Context for ContextWebGpu {
create_identified(encoder_data.0.begin_render_pass(&mapped_desc))
}
fn command_encoder_end_render_pass(
&self,
_encoder: &Self::CommandEncoderId,
_encoder_data: &Self::CommandEncoderData,
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
pass_data.0.end();
}
fn command_encoder_finish(
&self,
_encoder: Self::CommandEncoderId,
@ -3131,6 +3111,14 @@ impl crate::context::Context for ContextWebGpu {
);
}
fn compute_pass_end(
&self,
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
pass_data.0.end();
}
fn render_bundle_encoder_set_pipeline(
&self,
_encoder: &mut Self::RenderBundleEncoderId,
@ -3710,6 +3698,14 @@ impl crate::context::Context for ContextWebGpu {
.collect::<js_sys::Array>();
pass_data.0.execute_bundles(&mapped);
}
fn render_pass_end(
&self,
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
pass_data.0.end();
}
}
pub(crate) type SurfaceOutputDetail = ();

View File

@ -484,6 +484,12 @@ pub struct ComputePass {
error_sink: ErrorSink,
}
#[derive(Debug)]
pub struct RenderPass {
pass: wgc::command::RenderPass,
error_sink: ErrorSink,
}
#[derive(Debug)]
pub struct CommandEncoder {
error_sink: ErrorSink,
@ -526,7 +532,7 @@ impl crate::Context for ContextWgpuCore {
type ComputePassId = Unused;
type ComputePassData = ComputePass;
type RenderPassId = Unused;
type RenderPassData = wgc::command::RenderPass;
type RenderPassData = RenderPass;
type CommandBufferId = wgc::id::CommandBufferId;
type CommandBufferData = ();
type RenderBundleEncoderId = Unused;
@ -1916,29 +1922,10 @@ impl crate::Context for ContextWgpuCore {
)
}
fn command_encoder_end_compute_pass(
&self,
encoder: &Self::CommandEncoderId,
encoder_data: &Self::CommandEncoderData,
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
if let Err(cause) = pass_data.pass.run(&self.0) {
let name = wgc::gfx_select!(encoder => self.0.command_buffer_label(encoder.into_command_buffer_id()));
self.handle_error(
&encoder_data.error_sink,
cause,
"encoder",
Some(&name),
"a ComputePass",
);
}
}
fn command_encoder_begin_render_pass(
&self,
encoder: &Self::CommandEncoderId,
_encoder_data: &Self::CommandEncoderData,
encoder_data: &Self::CommandEncoderData,
desc: &crate::RenderPassDescriptor<'_, '_>,
) -> (Self::RenderPassId, Self::RenderPassData) {
if desc.color_attachments.len() > wgc::MAX_COLOR_ATTACHMENTS {
@ -1982,42 +1969,24 @@ impl crate::Context for ContextWgpuCore {
(
Unused,
wgc::command::RenderPass::new(
*encoder,
&wgc::command::RenderPassDescriptor {
label: desc.label.map(Borrowed),
color_attachments: Borrowed(&colors),
depth_stencil_attachment: depth_stencil.as_ref(),
timestamp_writes: timestamp_writes.as_ref(),
occlusion_query_set: desc
.occlusion_query_set
.map(|query_set| query_set.id.into()),
},
),
RenderPass {
pass: wgc::command::RenderPass::new(
*encoder,
&wgc::command::RenderPassDescriptor {
label: desc.label.map(Borrowed),
color_attachments: Borrowed(&colors),
depth_stencil_attachment: depth_stencil.as_ref(),
timestamp_writes: timestamp_writes.as_ref(),
occlusion_query_set: desc
.occlusion_query_set
.map(|query_set| query_set.id.into()),
},
),
error_sink: encoder_data.error_sink.clone(),
},
)
}
fn command_encoder_end_render_pass(
&self,
encoder: &Self::CommandEncoderId,
encoder_data: &Self::CommandEncoderData,
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
if let Err(cause) =
wgc::gfx_select!(encoder => self.0.command_encoder_run_render_pass(*encoder, pass_data))
{
let name = wgc::gfx_select!(encoder => self.0.command_buffer_label(encoder.into_command_buffer_id()));
self.handle_error(
&encoder_data.error_sink,
cause,
"encoder",
Some(&name),
"a RenderPass",
);
}
}
fn command_encoder_finish(
&self,
encoder: Self::CommandEncoderId,
@ -2396,7 +2365,13 @@ impl crate::Context for ContextWgpuCore {
_pipeline_data: &Self::ComputePipelineData,
) {
if let Err(cause) = pass_data.pass.set_pipeline(&self.0, *pipeline) {
self.handle_error_nolabel(&pass_data.error_sink, cause, "ComputePass::set_pipeline");
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::set_pipeline",
);
}
}
@ -2413,7 +2388,13 @@ impl crate::Context for ContextWgpuCore {
.pass
.set_bind_group(&self.0, index, *bind_group, offsets)
{
self.handle_error_nolabel(&pass_data.error_sink, cause, "ComputePass::set_bind_group");
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::set_bind_group",
);
}
}
@ -2424,7 +2405,15 @@ impl crate::Context for ContextWgpuCore {
offset: u32,
data: &[u8],
) {
pass_data.pass.set_push_constant(&self.0, offset, data);
if let Err(cause) = pass_data.pass.set_push_constant(&self.0, offset, data) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::set_push_constant",
);
}
}
fn compute_pass_insert_debug_marker(
@ -2433,7 +2422,15 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::ComputePassData,
label: &str,
) {
pass_data.pass.insert_debug_marker(&self.0, label, 0);
if let Err(cause) = pass_data.pass.insert_debug_marker(&self.0, label, 0) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::insert_debug_marker",
);
}
}
fn compute_pass_push_debug_group(
@ -2442,7 +2439,15 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::ComputePassData,
group_label: &str,
) {
pass_data.pass.push_debug_group(&self.0, group_label, 0);
if let Err(cause) = pass_data.pass.push_debug_group(&self.0, group_label, 0) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::push_debug_group",
);
}
}
fn compute_pass_pop_debug_group(
@ -2450,7 +2455,15 @@ impl crate::Context for ContextWgpuCore {
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
pass_data.pass.pop_debug_group(&self.0);
if let Err(cause) = pass_data.pass.pop_debug_group(&self.0) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::pop_debug_group",
);
}
}
fn compute_pass_write_timestamp(
@ -2465,7 +2478,13 @@ impl crate::Context for ContextWgpuCore {
.pass
.write_timestamp(&self.0, *query_set, query_index)
{
self.handle_error_nolabel(&pass_data.error_sink, cause, "ComputePass::write_timestamp");
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::write_timestamp",
);
}
}
@ -2482,9 +2501,11 @@ impl crate::Context for ContextWgpuCore {
.pass
.begin_pipeline_statistics_query(&self.0, *query_set, query_index)
{
self.handle_error_nolabel(
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::begin_pipeline_statistics_query",
);
}
@ -2495,7 +2516,15 @@ impl crate::Context for ContextWgpuCore {
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
pass_data.pass.end_pipeline_statistics_query(&self.0);
if let Err(cause) = pass_data.pass.end_pipeline_statistics_query(&self.0) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::end_pipeline_statistics_query",
);
}
}
fn compute_pass_dispatch_workgroups(
@ -2506,7 +2535,15 @@ impl crate::Context for ContextWgpuCore {
y: u32,
z: u32,
) {
pass_data.pass.dispatch_workgroups(&self.0, x, y, z);
if let Err(cause) = pass_data.pass.dispatch_workgroups(&self.0, x, y, z) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::dispatch_workgroups",
);
}
}
fn compute_pass_dispatch_workgroups_indirect(
@ -2522,14 +2559,32 @@ impl crate::Context for ContextWgpuCore {
.pass
.dispatch_workgroups_indirect(&self.0, *indirect_buffer, indirect_offset)
{
self.handle_error_nolabel(
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::dispatch_workgroups_indirect",
);
}
}
fn compute_pass_end(
&self,
_pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
) {
if let Err(cause) = pass_data.pass.end(&self.0) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"ComputePass::end",
);
}
}
fn render_bundle_encoder_set_pipeline(
&self,
_encoder: &mut Self::RenderBundleEncoderId,
@ -2722,7 +2777,7 @@ impl crate::Context for ContextWgpuCore {
pipeline: &Self::RenderPipelineId,
_pipeline_data: &Self::RenderPipelineData,
) {
wgpu_render_pass_set_pipeline(pass_data, *pipeline)
wgpu_render_pass_set_pipeline(&mut pass_data.pass, *pipeline)
}
fn render_pass_set_bind_group(
@ -2734,7 +2789,7 @@ impl crate::Context for ContextWgpuCore {
_bind_group_data: &Self::BindGroupData,
offsets: &[wgt::DynamicOffset],
) {
wgpu_render_pass_set_bind_group(pass_data, index, *bind_group, offsets)
wgpu_render_pass_set_bind_group(&mut pass_data.pass, index, *bind_group, offsets)
}
fn render_pass_set_index_buffer(
@ -2747,7 +2802,9 @@ impl crate::Context for ContextWgpuCore {
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) {
pass_data.set_index_buffer(*buffer, index_format, offset, size)
pass_data
.pass
.set_index_buffer(*buffer, index_format, offset, size)
}
fn render_pass_set_vertex_buffer(
@ -2760,7 +2817,7 @@ impl crate::Context for ContextWgpuCore {
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) {
wgpu_render_pass_set_vertex_buffer(pass_data, slot, *buffer, offset, size)
wgpu_render_pass_set_vertex_buffer(&mut pass_data.pass, slot, *buffer, offset, size)
}
fn render_pass_set_push_constants(
@ -2771,7 +2828,7 @@ impl crate::Context for ContextWgpuCore {
offset: u32,
data: &[u8],
) {
wgpu_render_pass_set_push_constants(pass_data, stages, offset, data)
wgpu_render_pass_set_push_constants(&mut pass_data.pass, stages, offset, data)
}
fn render_pass_draw(
@ -2782,7 +2839,7 @@ impl crate::Context for ContextWgpuCore {
instances: Range<u32>,
) {
wgpu_render_pass_draw(
pass_data,
&mut pass_data.pass,
vertices.end - vertices.start,
instances.end - instances.start,
vertices.start,
@ -2799,7 +2856,7 @@ impl crate::Context for ContextWgpuCore {
instances: Range<u32>,
) {
wgpu_render_pass_draw_indexed(
pass_data,
&mut pass_data.pass,
indices.end - indices.start,
instances.end - instances.start,
indices.start,
@ -2816,7 +2873,7 @@ impl crate::Context for ContextWgpuCore {
_indirect_buffer_data: &Self::BufferData,
indirect_offset: wgt::BufferAddress,
) {
wgpu_render_pass_draw_indirect(pass_data, *indirect_buffer, indirect_offset)
wgpu_render_pass_draw_indirect(&mut pass_data.pass, *indirect_buffer, indirect_offset)
}
fn render_pass_draw_indexed_indirect(
@ -2827,7 +2884,11 @@ impl crate::Context for ContextWgpuCore {
_indirect_buffer_data: &Self::BufferData,
indirect_offset: wgt::BufferAddress,
) {
wgpu_render_pass_draw_indexed_indirect(pass_data, *indirect_buffer, indirect_offset)
wgpu_render_pass_draw_indexed_indirect(
&mut pass_data.pass,
*indirect_buffer,
indirect_offset,
)
}
fn render_pass_multi_draw_indirect(
@ -2839,7 +2900,12 @@ impl crate::Context for ContextWgpuCore {
indirect_offset: wgt::BufferAddress,
count: u32,
) {
wgpu_render_pass_multi_draw_indirect(pass_data, *indirect_buffer, indirect_offset, count)
wgpu_render_pass_multi_draw_indirect(
&mut pass_data.pass,
*indirect_buffer,
indirect_offset,
count,
)
}
fn render_pass_multi_draw_indexed_indirect(
@ -2852,7 +2918,7 @@ impl crate::Context for ContextWgpuCore {
count: u32,
) {
wgpu_render_pass_multi_draw_indexed_indirect(
pass_data,
&mut pass_data.pass,
*indirect_buffer,
indirect_offset,
count,
@ -2872,7 +2938,7 @@ impl crate::Context for ContextWgpuCore {
max_count: u32,
) {
wgpu_render_pass_multi_draw_indirect_count(
pass_data,
&mut pass_data.pass,
*indirect_buffer,
indirect_offset,
*count_buffer,
@ -2894,7 +2960,7 @@ impl crate::Context for ContextWgpuCore {
max_count: u32,
) {
wgpu_render_pass_multi_draw_indexed_indirect_count(
pass_data,
&mut pass_data.pass,
*indirect_buffer,
indirect_offset,
*count_buffer,
@ -2909,7 +2975,7 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::RenderPassData,
color: wgt::Color,
) {
wgpu_render_pass_set_blend_constant(pass_data, &color)
wgpu_render_pass_set_blend_constant(&mut pass_data.pass, &color)
}
fn render_pass_set_scissor_rect(
@ -2921,7 +2987,7 @@ impl crate::Context for ContextWgpuCore {
width: u32,
height: u32,
) {
wgpu_render_pass_set_scissor_rect(pass_data, x, y, width, height)
wgpu_render_pass_set_scissor_rect(&mut pass_data.pass, x, y, width, height)
}
fn render_pass_set_viewport(
@ -2935,7 +3001,15 @@ impl crate::Context for ContextWgpuCore {
min_depth: f32,
max_depth: f32,
) {
wgpu_render_pass_set_viewport(pass_data, x, y, width, height, min_depth, max_depth)
wgpu_render_pass_set_viewport(
&mut pass_data.pass,
x,
y,
width,
height,
min_depth,
max_depth,
)
}
fn render_pass_set_stencil_reference(
@ -2944,7 +3018,7 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::RenderPassData,
reference: u32,
) {
wgpu_render_pass_set_stencil_reference(pass_data, reference)
wgpu_render_pass_set_stencil_reference(&mut pass_data.pass, reference)
}
fn render_pass_insert_debug_marker(
@ -2953,7 +3027,7 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::RenderPassData,
label: &str,
) {
wgpu_render_pass_insert_debug_marker(pass_data, label, 0);
wgpu_render_pass_insert_debug_marker(&mut pass_data.pass, label, 0);
}
fn render_pass_push_debug_group(
@ -2962,7 +3036,7 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::RenderPassData,
group_label: &str,
) {
wgpu_render_pass_push_debug_group(pass_data, group_label, 0);
wgpu_render_pass_push_debug_group(&mut pass_data.pass, group_label, 0);
}
fn render_pass_pop_debug_group(
@ -2970,7 +3044,7 @@ impl crate::Context for ContextWgpuCore {
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
wgpu_render_pass_pop_debug_group(pass_data);
wgpu_render_pass_pop_debug_group(&mut pass_data.pass);
}
fn render_pass_write_timestamp(
@ -2981,7 +3055,7 @@ impl crate::Context for ContextWgpuCore {
_query_set_data: &Self::QuerySetData,
query_index: u32,
) {
wgpu_render_pass_write_timestamp(pass_data, *query_set, query_index)
wgpu_render_pass_write_timestamp(&mut pass_data.pass, *query_set, query_index)
}
fn render_pass_begin_occlusion_query(
@ -2990,7 +3064,7 @@ impl crate::Context for ContextWgpuCore {
pass_data: &mut Self::RenderPassData,
query_index: u32,
) {
wgpu_render_pass_begin_occlusion_query(pass_data, query_index)
wgpu_render_pass_begin_occlusion_query(&mut pass_data.pass, query_index)
}
fn render_pass_end_occlusion_query(
@ -2998,7 +3072,7 @@ impl crate::Context for ContextWgpuCore {
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
wgpu_render_pass_end_occlusion_query(pass_data)
wgpu_render_pass_end_occlusion_query(&mut pass_data.pass)
}
fn render_pass_begin_pipeline_statistics_query(
@ -3009,7 +3083,11 @@ impl crate::Context for ContextWgpuCore {
_query_set_data: &Self::QuerySetData,
query_index: u32,
) {
wgpu_render_pass_begin_pipeline_statistics_query(pass_data, *query_set, query_index)
wgpu_render_pass_begin_pipeline_statistics_query(
&mut pass_data.pass,
*query_set,
query_index,
)
}
fn render_pass_end_pipeline_statistics_query(
@ -3017,7 +3095,7 @@ impl crate::Context for ContextWgpuCore {
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
wgpu_render_pass_end_pipeline_statistics_query(pass_data)
wgpu_render_pass_end_pipeline_statistics_query(&mut pass_data.pass)
}
fn render_pass_execute_bundles(
@ -3027,7 +3105,24 @@ impl crate::Context for ContextWgpuCore {
render_bundles: &mut dyn Iterator<Item = (Self::RenderBundleId, &Self::RenderBundleData)>,
) {
let temp_render_bundles = render_bundles.map(|(i, _)| i).collect::<SmallVec<[_; 4]>>();
wgpu_render_pass_execute_bundles(pass_data, &temp_render_bundles)
wgpu_render_pass_execute_bundles(&mut pass_data.pass, &temp_render_bundles)
}
fn render_pass_end(
&self,
_pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
) {
let encoder = pass_data.pass.parent_id();
if let Err(cause) = wgc::gfx_select!(encoder => self.0.render_pass_end(&pass_data.pass)) {
self.handle_error(
&pass_data.error_sink,
cause,
LABEL,
pass_data.pass.label(),
"RenderPass::end",
);
}
}
}

View File

@ -467,26 +467,12 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
encoder_data: &Self::CommandEncoderData,
desc: &ComputePassDescriptor<'_>,
) -> (Self::ComputePassId, Self::ComputePassData);
fn command_encoder_end_compute_pass(
&self,
encoder: &Self::CommandEncoderId,
encoder_data: &Self::CommandEncoderData,
pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
);
fn command_encoder_begin_render_pass(
&self,
encoder: &Self::CommandEncoderId,
encoder_data: &Self::CommandEncoderData,
desc: &RenderPassDescriptor<'_, '_>,
) -> (Self::RenderPassId, Self::RenderPassData);
fn command_encoder_end_render_pass(
&self,
encoder: &Self::CommandEncoderId,
encoder_data: &Self::CommandEncoderData,
pass: &mut Self::RenderPassId,
pass_data: &mut Self::RenderPassData,
);
fn command_encoder_finish(
&self,
encoder: Self::CommandEncoderId,
@ -626,7 +612,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
fn device_start_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData);
fn device_stop_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData);
fn pipeline_cache_get_data(
&self,
cache: &Self::PipelineCacheId,
@ -710,6 +695,11 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
indirect_buffer_data: &Self::BufferData,
indirect_offset: BufferAddress,
);
fn compute_pass_end(
&self,
pass: &mut Self::ComputePassId,
pass_data: &mut Self::ComputePassData,
);
fn render_bundle_encoder_set_pipeline(
&self,
@ -1042,6 +1032,7 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
pass_data: &mut Self::RenderPassData,
render_bundles: &mut dyn Iterator<Item = (Self::RenderBundleId, &Self::RenderBundleData)>,
);
fn render_pass_end(&self, pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData);
}
/// Object id.
@ -1476,26 +1467,12 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
encoder_data: &crate::Data,
desc: &ComputePassDescriptor<'_>,
) -> (ObjectId, Box<crate::Data>);
fn command_encoder_end_compute_pass(
&self,
encoder: &ObjectId,
encoder_data: &crate::Data,
pass: &mut ObjectId,
pass_data: &mut crate::Data,
);
fn command_encoder_begin_render_pass(
&self,
encoder: &ObjectId,
encoder_data: &crate::Data,
desc: &RenderPassDescriptor<'_, '_>,
) -> (ObjectId, Box<crate::Data>);
fn command_encoder_end_render_pass(
&self,
encoder: &ObjectId,
encoder_data: &crate::Data,
pass: &mut ObjectId,
pass_data: &mut crate::Data,
);
fn command_encoder_finish(
&self,
encoder: ObjectId,
@ -1707,6 +1684,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
indirect_buffer_data: &crate::Data,
indirect_offset: BufferAddress,
);
fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data);
fn render_bundle_encoder_set_pipeline(
&self,
@ -2031,6 +2009,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
pass_data: &mut crate::Data,
render_bundles: &mut dyn Iterator<Item = (&ObjectId, &crate::Data)>,
);
fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data);
}
// Blanket impl of DynContext for all types which implement Context.
@ -2804,26 +2783,6 @@ where
(compute_pass.into(), Box::new(data) as _)
}
fn command_encoder_end_compute_pass(
&self,
encoder: &ObjectId,
encoder_data: &crate::Data,
pass: &mut ObjectId,
pass_data: &mut crate::Data,
) {
let encoder = <T::CommandEncoderId>::from(*encoder);
let encoder_data = downcast_ref(encoder_data);
let mut pass = <T::ComputePassId>::from(*pass);
let pass_data = downcast_mut(pass_data);
Context::command_encoder_end_compute_pass(
self,
&encoder,
encoder_data,
&mut pass,
pass_data,
)
}
fn command_encoder_begin_render_pass(
&self,
encoder: &ObjectId,
@ -2837,20 +2796,6 @@ where
(render_pass.into(), Box::new(data) as _)
}
fn command_encoder_end_render_pass(
&self,
encoder: &ObjectId,
encoder_data: &crate::Data,
pass: &mut ObjectId,
pass_data: &mut crate::Data,
) {
let encoder = <T::CommandEncoderId>::from(*encoder);
let encoder_data = downcast_ref(encoder_data);
let mut pass = <T::RenderPassId>::from(*pass);
let pass_data = downcast_mut(pass_data);
Context::command_encoder_end_render_pass(self, &encoder, encoder_data, &mut pass, pass_data)
}
fn command_encoder_finish(
&self,
encoder: ObjectId,
@ -3312,6 +3257,12 @@ where
)
}
fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) {
let mut pass = <T::ComputePassId>::from(*pass);
let pass_data = downcast_mut(pass_data);
Context::compute_pass_end(self, &mut pass, pass_data)
}
fn render_bundle_encoder_set_pipeline(
&self,
encoder: &mut ObjectId,
@ -4074,6 +4025,12 @@ where
});
Context::render_pass_execute_bundles(self, &mut pass, pass_data, &mut render_bundles)
}
fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) {
let mut pass = <T::RenderPassId>::from(*pass);
let pass_data = downcast_mut(pass_data);
Context::render_pass_end(self, &mut pass, pass_data)
}
}
pub trait QueueWriteBuffer: WasmNotSendSync + Debug {

View File

@ -4697,13 +4697,9 @@ impl<'a> RenderPass<'a> {
impl<'a> Drop for RenderPass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
let parent_id = self.parent.id.as_ref().unwrap();
self.parent.context.command_encoder_end_render_pass(
parent_id,
self.parent.data.as_ref(),
&mut self.id,
self.data.as_mut(),
);
self.parent
.context
.render_pass_end(&mut self.id, self.data.as_mut());
}
}
}
@ -4875,13 +4871,9 @@ impl<'a> ComputePass<'a> {
impl<'a> Drop for ComputePass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
let parent_id = self.parent.id.as_ref().unwrap();
self.parent.context.command_encoder_end_compute_pass(
parent_id,
self.parent.data.as_ref(),
&mut self.id,
self.data.as_mut(),
);
self.parent
.context
.compute_pass_end(&mut self.id, self.data.as_mut());
}
}
}