Make bind group an Option for set_bind_group calls. (#6216)

This is just an API change for all the "set_bind_group" calls. Calls
that pass a Some() argument should have unchanged behavior. The None
cases are left as TODOs.
This commit is contained in:
Brad Werth 2024-09-06 14:29:09 -07:00 committed by GitHub
parent c87717b814
commit 9b36a3e129
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
59 changed files with 283 additions and 169 deletions

View File

@ -65,6 +65,15 @@ traits that have now been implemented for `wgpu` resources.
By @teoxoy [#6134](https://github.com/gfx-rs/wgpu/pull/6134).
#### `set_bind_group` now takes an `Option` for the bind group argument.
https://gpuweb.github.io/gpuweb/#programmable-passes-bind-groups specifies that bindGroup
is nullable. This change is the start of implementing this part of the spec. Callers that
specify a `Some()` value should have unchanged behavior. Handling of `None` values still
needs to be implemented by backends.
By @bradwerth [#6216](https://github.com/gfx-rs/wgpu/pull/6216).
### New Features
#### Naga

View File

@ -389,7 +389,7 @@ impl ComputepassState {
let end_idx = start_idx + dispatch_per_pass;
for dispatch_idx in start_idx..end_idx {
compute_pass.set_pipeline(&self.pipeline);
compute_pass.set_bind_group(0, &self.bind_groups[dispatch_idx], &[]);
compute_pass.set_bind_group(0, Some(&self.bind_groups[dispatch_idx]), &[]);
compute_pass.dispatch_workgroups(1, 1, 1);
}
@ -412,7 +412,7 @@ impl ComputepassState {
});
compute_pass.set_pipeline(self.bindless_pipeline.as_ref().unwrap());
compute_pass.set_bind_group(0, self.bindless_bind_group.as_ref().unwrap(), &[]);
compute_pass.set_bind_group(0, Some(self.bindless_bind_group.as_ref().unwrap()), &[]);
for _ in 0..dispatch_count_bindless {
compute_pass.dispatch_workgroups(1, 1, 1);
}

View File

@ -367,7 +367,7 @@ impl RenderpassState {
let end_idx = start_idx + draws_per_pass;
for draw_idx in start_idx..end_idx {
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.bind_groups[draw_idx], &[]);
render_pass.set_bind_group(0, Some(&self.bind_groups[draw_idx]), &[]);
for i in 0..VERTEX_BUFFERS_PER_DRAW {
render_pass.set_vertex_buffer(
i as u32,
@ -410,7 +410,7 @@ impl RenderpassState {
});
render_pass.set_pipeline(self.bindless_pipeline.as_ref().unwrap());
render_pass.set_bind_group(0, self.bindless_bind_group.as_ref().unwrap(), &[]);
render_pass.set_bind_group(0, Some(self.bindless_bind_group.as_ref().unwrap()), &[]);
for i in 0..VERTEX_BUFFERS_PER_DRAW {
render_pass.set_vertex_buffer(i as u32, self.vertex_buffers[0].slice(..));
}

View File

@ -150,7 +150,7 @@ pub fn op_webgpu_render_bundle_encoder_set_bind_group(
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
&mut render_bundle_encoder_resource.0.borrow_mut(),
index,
bind_group_resource.1,
Some(bind_group_resource.1),
dynamic_offsets_data.as_ptr(),
dynamic_offsets_data.len(),
);

View File

@ -136,7 +136,7 @@ pub fn op_webgpu_compute_pass_set_bind_group(
.compute_pass_set_bind_group(
&mut compute_pass_resource.0.borrow_mut(),
index,
bind_group_resource.1,
Some(bind_group_resource.1),
dynamic_offsets_data,
)?;

View File

@ -231,7 +231,7 @@ pub fn op_webgpu_render_pass_set_bind_group(
.render_pass_set_bind_group(
&mut render_pass_resource.0.borrow_mut(),
index,
bind_group_resource.1,
Some(bind_group_resource.1),
dynamic_offsets_data,
)?;

View File

@ -298,7 +298,7 @@ impl crate::framework::Example for Example {
timestamp_writes: None,
});
cpass.set_pipeline(&self.compute_pipeline);
cpass.set_bind_group(0, &self.particle_bind_groups[self.frame_num % 2], &[]);
cpass.set_bind_group(0, Some(&self.particle_bind_groups[self.frame_num % 2]), &[]);
cpass.dispatch_workgroups(self.work_group_count, 1, 1);
}
command_encoder.pop_debug_group();

View File

@ -128,11 +128,11 @@ impl Example {
occlusion_query_set: None,
});
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.global_group, &[]);
rpass.set_bind_group(0, Some(&self.global_group), &[]);
for i in 0..self.bunnies.len() {
let offset =
(i as wgpu::DynamicOffset) * (uniform_alignment as wgpu::DynamicOffset);
rpass.set_bind_group(1, &self.local_group, &[offset]);
rpass.set_bind_group(1, Some(&self.local_group), &[offset]);
rpass.draw(0..4, 0..1);
}
}

View File

@ -305,7 +305,7 @@ impl crate::framework::Example for Example {
});
rpass.set_pipeline(&self.pipeline_upscale);
rpass.set_bind_group(0, &self.bind_group_upscale, &[]);
rpass.set_bind_group(0, Some(&self.bind_group_upscale), &[]);
rpass.draw(0..3, 0..1);
if let Some(pipeline_lines) = &self.pipeline_lines {

View File

@ -361,7 +361,7 @@ impl crate::framework::Example for Example {
});
rpass.push_debug_group("Prepare data for draw.");
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_bind_group(0, Some(&self.bind_group), &[]);
rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint16);
rpass.set_vertex_buffer(0, self.vertex_buf.slice(..));
rpass.pop_debug_group();

View File

@ -135,7 +135,7 @@ async fn execute_gpu_inner(
timestamp_writes: None,
});
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.insert_debug_marker("compute collatz iterations");
cpass.dispatch_workgroups(numbers.len() as u32, 1, 1); // Number of cells to run, the (x,y,z) size of item being processed
}

View File

@ -128,7 +128,7 @@ async fn execute(
timestamp_writes: None,
});
compute_pass.set_pipeline(&patient_pipeline);
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.set_bind_group(0, Some(&bind_group), &[]);
compute_pass.dispatch_workgroups(local_patient_workgroup_results.len() as u32, 1, 1);
}
queue.submit(Some(command_encoder.finish()));
@ -150,7 +150,7 @@ async fn execute(
timestamp_writes: None,
});
compute_pass.set_pipeline(&hasty_pipeline);
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.set_bind_group(0, Some(&bind_group), &[]);
compute_pass.dispatch_workgroups(local_patient_workgroup_results.len() as u32, 1, 1);
}
queue.submit(Some(command_encoder.finish()));

View File

@ -127,7 +127,7 @@ async fn run() {
timestamp_writes: None,
});
compute_pass.set_pipeline(&pipeline);
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.set_bind_group(0, Some(&bind_group), &[]);
/* Note that since each workgroup will cover both arrays, we only need to
cover the length of one array. */
compute_pass.dispatch_workgroups(local_a.len() as u32, 1, 1);

View File

@ -180,7 +180,7 @@ impl Example {
);
}
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.draw(0..3, 0..1);
if let Some(ref query_sets) = query_sets {
rpass.write_timestamp(&query_sets.timestamp, timestamp_query_index_base + 1);
@ -497,7 +497,7 @@ impl crate::framework::Example for Example {
occlusion_query_set: None,
});
rpass.set_pipeline(&self.draw_pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_bind_group(0, Some(&self.bind_group), &[]);
rpass.draw(0..4, 0..1);
}

View File

@ -59,7 +59,7 @@ async fn compute(local_buffer: &mut [u32], context: &WgpuContext) {
timestamp_writes: None,
});
compute_pass.set_pipeline(&context.pipeline);
compute_pass.set_bind_group(0, &context.bind_group, &[]);
compute_pass.set_bind_group(0, Some(&context.bind_group), &[]);
compute_pass.dispatch_workgroups(local_buffer.len() as u32, 1, 1);
}
// We finish the compute pass by dropping it.

View File

@ -779,10 +779,10 @@ impl crate::framework::Example for Example {
occlusion_query_set: None,
});
pass.set_pipeline(&self.shadow_pass.pipeline);
pass.set_bind_group(0, &self.shadow_pass.bind_group, &[]);
pass.set_bind_group(0, Some(&self.shadow_pass.bind_group), &[]);
for entity in &self.entities {
pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]);
pass.set_bind_group(1, Some(&self.entity_bind_group), &[entity.uniform_offset]);
pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format);
pass.set_vertex_buffer(0, entity.vertex_buf.slice(..));
pass.draw_indexed(0..entity.index_count as u32, 0, 0..1);
@ -823,10 +823,10 @@ impl crate::framework::Example for Example {
occlusion_query_set: None,
});
pass.set_pipeline(&self.forward_pass.pipeline);
pass.set_bind_group(0, &self.forward_pass.bind_group, &[]);
pass.set_bind_group(0, Some(&self.forward_pass.bind_group), &[]);
for entity in &self.entities {
pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]);
pass.set_bind_group(1, Some(&self.entity_bind_group), &[entity.uniform_offset]);
pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format);
pass.set_vertex_buffer(0, entity.vertex_buf.slice(..));
pass.draw_indexed(0..entity.index_count as u32, 0, 0..1);

View File

@ -451,7 +451,7 @@ impl crate::framework::Example for Example {
occlusion_query_set: None,
});
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_bind_group(0, Some(&self.bind_group), &[]);
rpass.set_pipeline(&self.entity_pipeline);
for entity in self.entities.iter() {

View File

@ -202,7 +202,7 @@ impl<const SRGB: bool> crate::framework::Example for Example<SRGB> {
});
rpass.push_debug_group("Prepare data for draw.");
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_bind_group(0, Some(&self.bind_group), &[]);
rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint16);
rpass.set_vertex_buffer(0, self.vertex_buf.slice(..));
rpass.pop_debug_group();

View File

@ -117,7 +117,7 @@ async fn run(_path: Option<String>) {
label: None,
timestamp_writes: None,
});
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.set_bind_group(0, Some(&bind_group), &[]);
compute_pass.set_pipeline(&pipeline);
compute_pass.dispatch_workgroups(TEXTURE_DIMS.0 as u32, TEXTURE_DIMS.1 as u32, 1);
}

View File

@ -391,12 +391,12 @@ impl crate::framework::Example for Example {
rpass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
rpass.set_index_buffer(self.index_buffer.slice(..), self.index_format);
if self.uniform_workaround {
rpass.set_bind_group(0, &self.bind_group, &[0]);
rpass.set_bind_group(0, Some(&self.bind_group), &[0]);
rpass.draw_indexed(0..6, 0, 0..1);
rpass.set_bind_group(0, &self.bind_group, &[256]);
rpass.set_bind_group(0, Some(&self.bind_group), &[256]);
rpass.draw_indexed(6..12, 0, 0..1);
} else {
rpass.set_bind_group(0, &self.bind_group, &[0]);
rpass.set_bind_group(0, Some(&self.bind_group), &[0]);
rpass.draw_indexed(0..12, 0, 0..1);
}

View File

@ -324,7 +324,7 @@ fn compute_pass(
});
*next_unused_query += 2;
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.dispatch_workgroups(1, 1, 1);
if device
.features()

View File

@ -327,7 +327,11 @@ async fn run(event_loop: EventLoop<()>, window: Arc<Window>) {
});
render_pass.set_pipeline(&wgpu_context_ref.pipeline);
// (9)
render_pass.set_bind_group(0, &wgpu_context_ref.bind_group, &[]);
render_pass.set_bind_group(
0,
Some(&wgpu_context_ref.bind_group),
&[],
);
render_pass.draw(0..3, 0..1);
}
wgpu_context_ref.queue.submit(Some(encoder.finish()));

View File

@ -630,7 +630,7 @@ impl crate::framework::Example for Example {
multiview: None,
});
encoder.set_pipeline(&terrain_pipeline);
encoder.set_bind_group(0, &terrain_flipped_bind_group, &[]);
encoder.set_bind_group(0, Some(&terrain_flipped_bind_group), &[]);
encoder.set_vertex_buffer(0, terrain_vertex_buf.slice(..));
encoder.draw(0..terrain_vertices.len() as u32, 0..1);
encoder.finish(&wgpu::RenderBundleDescriptor::default())
@ -784,7 +784,7 @@ impl crate::framework::Example for Example {
occlusion_query_set: None,
});
rpass.set_pipeline(&self.terrain_pipeline);
rpass.set_bind_group(0, &self.terrain_normal_bind_group, &[]);
rpass.set_bind_group(0, Some(&self.terrain_normal_bind_group), &[]);
rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..));
rpass.draw(0..self.terrain_vertex_count as u32, 0..1);
}
@ -811,7 +811,7 @@ impl crate::framework::Example for Example {
});
rpass.set_pipeline(&self.water_pipeline);
rpass.set_bind_group(0, &self.water_bind_group, &[]);
rpass.set_bind_group(0, Some(&self.water_bind_group), &[]);
rpass.set_vertex_buffer(0, self.water_vertex_buf.slice(..));
rpass.draw(0..self.water_vertex_count as u32, 0..1);
}

View File

@ -70,7 +70,7 @@
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
bind_group_id: Some(Id(0, 1, Empty)),
),
SetPipeline(Id(0, 1, Empty)),
],

View File

@ -149,7 +149,7 @@
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
bind_group_id: Some(Id(0, 1, Empty)),
),
Dispatch((4, 1, 1)),
],

View File

@ -150,7 +150,7 @@
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
bind_group_id: Some(Id(0, 1, Empty)),
),
Dispatch((4, 1, 1)),
],

View File

@ -377,7 +377,7 @@ fn copy_via_compute(
let mut pass = encoder.begin_compute_pass(&ComputePassDescriptor::default());
pass.set_pipeline(&pipeline_copy);
pass.set_bind_group(0, &bg, &[]);
pass.set_bind_group(0, Some(&bg), &[]);
pass.dispatch_workgroups(1, 1, 1);
}

View File

@ -110,7 +110,7 @@ static BGRA8_UNORM_STORAGE: GpuTestConfiguration = GpuTestConfiguration::new()
timestamp_writes: None,
});
pass.set_bind_group(0, &bg, &[]);
pass.set_bind_group(0, Some(&bg), &[]);
pass.set_pipeline(&pipeline);
pass.dispatch_workgroups(256, 256, 1);
}

View File

@ -92,11 +92,11 @@ async fn bgl_dedupe(ctx: TestingContext) {
timestamp_writes: None,
});
pass.set_bind_group(0, &bg_1b, &[]);
pass.set_bind_group(0, Some(&bg_1b), &[]);
pass.set_pipeline(&pipeline);
pass.dispatch_workgroups(1, 1, 1);
pass.set_bind_group(0, &bg_1a, &[]);
pass.set_bind_group(0, Some(&bg_1a), &[]);
pass.dispatch_workgroups(1, 1, 1);
drop(pass);
@ -179,7 +179,7 @@ fn bgl_dedupe_with_dropped_user_handle(ctx: TestingContext) {
timestamp_writes: None,
});
pass.set_bind_group(0, &bg, &[]);
pass.set_bind_group(0, Some(&bg), &[]);
pass.set_pipeline(&pipeline);
pass.dispatch_workgroups(1, 1, 1);
@ -250,10 +250,10 @@ fn get_derived_bgl(ctx: TestingContext) {
pass.set_pipeline(&pipeline);
pass.set_bind_group(0, &bg1, &[]);
pass.set_bind_group(0, Some(&bg1), &[]);
pass.dispatch_workgroups(1, 1, 1);
pass.set_bind_group(0, &bg2, &[]);
pass.set_bind_group(0, Some(&bg2), &[]);
pass.dispatch_workgroups(1, 1, 1);
drop(pass);
@ -313,7 +313,7 @@ fn separate_pipelines_have_incompatible_derived_bgls(ctx: TestingContext) {
pass.set_pipeline(&pipeline1);
// We use the wrong bind group for this pipeline here. This should fail.
pass.set_bind_group(0, &bg2, &[]);
pass.set_bind_group(0, Some(&bg2), &[]);
pass.dispatch_workgroups(1, 1, 1);
fail(
@ -385,7 +385,7 @@ fn derived_bgls_incompatible_with_regular_bgls(ctx: TestingContext) {
pass.set_pipeline(&pipeline);
pass.set_bind_group(0, &bg, &[]);
pass.set_bind_group(0, Some(&bg), &[]);
pass.dispatch_workgroups(1, 1, 1);
fail(
@ -476,8 +476,8 @@ fn bgl_dedupe_derived(ctx: TestingContext) {
timestamp_writes: None,
});
pass.set_pipeline(&pipeline);
pass.set_bind_group(0, &bind_group_0, &[]);
pass.set_bind_group(1, &bind_group_1, &[]);
pass.set_bind_group(0, Some(&bind_group_0), &[]);
pass.set_bind_group(1, Some(&bind_group_1), &[]);
pass.dispatch_workgroups(1, 1, 1);
drop(pass);

View File

@ -328,7 +328,7 @@ static MINIMUM_BUFFER_BINDING_SIZE_DISPATCH: GpuTestConfiguration = GpuTestConfi
timestamp_writes: None,
});
pass.set_bind_group(0, &bind_group, &[]);
pass.set_bind_group(0, Some(&bind_group), &[]);
pass.set_pipeline(&pipeline);
pass.dispatch_workgroups(1, 1, 1);

View File

@ -45,7 +45,7 @@ async fn compute_pass_resource_ownership(ctx: TestingContext) {
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor::default());
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.dispatch_workgroups_indirect(&indirect_buffer, 0);
// Now drop all resources we set. Then do a device poll to make sure the resources are really not dropped too early, no matter what.
@ -95,7 +95,7 @@ async fn compute_pass_query_set_ownership_pipeline_statistics(ctx: TestingContex
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor::default());
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.begin_pipeline_statistics_query(&query_set, 0);
cpass.dispatch_workgroups(1, 1, 1);
cpass.end_pipeline_statistics_query();
@ -153,7 +153,7 @@ async fn compute_pass_query_set_ownership_timestamps(ctx: TestingContext) {
}),
});
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.write_timestamp(&query_set_write_timestamp, 0);
cpass.dispatch_workgroups(1, 1, 1);
@ -203,7 +203,7 @@ async fn compute_pass_keep_encoder_alive(ctx: TestingContext) {
// Record some draw commands.
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.dispatch_workgroups_indirect(&indirect_buffer, 0);
// Dropping the pass will still execute the pass, even though there's no way to submit it.

View File

@ -194,7 +194,7 @@ async fn draw_test_with_reports(
});
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bg, &[]);
rpass.set_bind_group(0, Some(&bg), &[]);
let global_report = ctx.instance.generate_report().unwrap();
let report = global_report.hub_report();

View File

@ -115,7 +115,7 @@ static NV12_TEXTURE_CREATION_SAMPLING: GpuTestConfiguration = GpuTestConfigurati
occlusion_query_set: None,
});
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.draw(0..4, 0..1);
drop(rpass);
ctx.queue.submit(Some(encoder.finish()));

View File

@ -90,7 +90,7 @@ static PARTIALLY_BOUNDED_ARRAY: GpuTestConfiguration = GpuTestConfiguration::new
timestamp_writes: None,
});
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.dispatch_workgroups(1, 1, 1);
}

View File

@ -167,7 +167,7 @@ async fn validate_pipeline(
timestamp_writes: None,
});
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, bind_group, &[]);
cpass.set_bind_group(0, Some(bind_group), &[]);
cpass.dispatch_workgroups(1, 1, 1);
}

View File

@ -46,7 +46,7 @@ fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer {
.create_command_encoder(&CommandEncoderDescriptor::default());
let mut cpass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::default());
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
drop(cpass);
cmd_buf.finish()

View File

@ -119,7 +119,7 @@ async fn partial_update_test(ctx: TestingContext) {
timestamp_writes: None,
});
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
// -- Dispatch 0 --

View File

@ -163,7 +163,7 @@ async fn multi_stage_data_binding_test(ctx: TestingContext) {
});
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bg, &[]);
rpass.set_bind_group(0, Some(&bg), &[]);
rpass.set_push_constants(
wgpu::ShaderStages::VERTEX_FRAGMENT,
0,

View File

@ -87,7 +87,7 @@ async fn render_pass_resource_ownership(ctx: TestingContext) {
drop(depth_stencil_view);
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.set_vertex_buffer(0, vertex_buffer.slice(..));
rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32);
rpass.begin_occlusion_query(0);
@ -163,7 +163,7 @@ async fn render_pass_query_set_ownership_pipeline_statistics(ctx: TestingContext
..Default::default()
});
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.set_vertex_buffer(0, vertex_buffer.slice(..));
rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32);
rpass.begin_pipeline_statistics_query(&query_set, 0);
@ -242,7 +242,7 @@ async fn render_pass_query_set_ownership_timestamps(ctx: TestingContext) {
rpass.write_timestamp(&query_set_write_timestamp, 0);
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.set_vertex_buffer(0, vertex_buffer.slice(..));
rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32);
rpass.draw(0..3, 0..1);
@ -305,7 +305,7 @@ async fn render_pass_keep_encoder_alive(ctx: TestingContext) {
// Record some a draw command.
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.set_vertex_buffer(0, vertex_buffer.slice(..));
rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32);
rpass.draw(0..3, 0..1);

View File

@ -349,7 +349,7 @@ async fn shader_input_output_test(
timestamp_writes: None,
});
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bg, &[]);
cpass.set_bind_group(0, Some(&bg), &[]);
if let InputStorageType::PushConstant = storage_type {
cpass.set_push_constants(0, bytemuck::cast_slice(&test.input_values))

View File

@ -119,7 +119,7 @@ static ZERO_INIT_WORKGROUP_MEMORY: GpuTestConfiguration = GpuTestConfiguration::
cpass.set_pipeline(&pipeline_read);
for i in 0..NR_OF_DISPATCHES {
cpass.set_bind_group(0, &bg, &[i * BUFFER_BINDING_SIZE]);
cpass.set_bind_group(0, Some(&bg), &[i * BUFFER_BINDING_SIZE]);
cpass.dispatch_workgroups(DISPATCH_SIZE.0, DISPATCH_SIZE.1, DISPATCH_SIZE.2);
}
drop(cpass);

View File

@ -148,7 +148,7 @@ async fn reinterpret(
occlusion_query_set: None,
});
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.set_bind_group(0, Some(&bind_group), &[]);
rpass.draw(0..3, 0..1);
drop(rpass);
ctx.queue.submit(Some(encoder.finish()));

View File

@ -93,7 +93,7 @@ static SUBGROUP_OPERATIONS: GpuTestConfiguration = GpuTestConfiguration::new()
timestamp_writes: None,
});
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_bind_group(0, Some(&bind_group), &[]);
cpass.dispatch_workgroups(1, 1, 1);
}
ctx.queue.submit(Some(encoder.finish()));

View File

@ -315,7 +315,7 @@ async fn vertex_formats_common(ctx: TestingContext, tests: &[Test<'_>]) {
rpass.set_vertex_buffer(0, buffer_input.slice(..));
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bg, &[]);
rpass.set_bind_group(0, Some(&bg), &[]);
// Draw three vertices and no instance, which is enough to generate the
// checksums.

View File

@ -409,7 +409,7 @@ async fn vertex_index_common(ctx: TestingContext) {
render_encoder.set_vertex_buffer(1, identity_buffer.slice(..));
render_encoder.set_index_buffer(identity_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_encoder.set_pipeline(pipeline);
render_encoder.set_bind_group(0, &bg, &[]);
render_encoder.set_bind_group(0, Some(&bg), &[]);
let draws = test.case.draws();

View File

@ -582,8 +582,15 @@ fn set_bind_group(
dynamic_offsets: &[u32],
index: u32,
num_dynamic_offsets: usize,
bind_group_id: id::Id<id::markers::BindGroup>,
bind_group_id: Option<id::Id<id::markers::BindGroup>>,
) -> Result<(), RenderBundleErrorInner> {
if bind_group_id.is_none() {
// TODO: do appropriate cleanup for null bind_group.
return Ok(());
}
let bind_group_id = bind_group_id.unwrap();
let bind_group = bind_group_guard
.get_owned(bind_group_id)
.map_err(|_| RenderCommandError::InvalidBindGroupId(bind_group_id))?;
@ -981,12 +988,17 @@ impl RenderBundle {
num_dynamic_offsets,
bind_group,
} => {
let mut bg = None;
if bind_group.is_some() {
let bind_group = bind_group.as_ref().unwrap();
let raw_bg = bind_group.try_raw(snatch_guard)?;
bg = Some(raw_bg);
}
unsafe {
raw.set_bind_group(
pipeline_layout.as_ref().unwrap().raw(),
*index,
raw_bg,
bg,
&offsets[..*num_dynamic_offsets],
)
};
@ -1501,7 +1513,7 @@ impl State {
let offsets = &contents.dynamic_offsets;
return Some(ArcRenderCommand::SetBindGroup {
index: i.try_into().unwrap(),
bind_group: contents.bind_group.clone(),
bind_group: Some(contents.bind_group.clone()),
num_dynamic_offsets: offsets.end - offsets.start,
});
}
@ -1581,7 +1593,7 @@ pub mod bundle_ffi {
pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
bundle: &mut RenderBundleEncoder,
index: u32,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
offsets: *const DynamicOffset,
offset_length: usize,
) {

View File

@ -649,10 +649,8 @@ fn set_bind_group(
dynamic_offsets: &[DynamicOffset],
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup>,
bind_group: Option<Arc<BindGroup>>,
) -> Result<(), ComputePassErrorInner> {
bind_group.same_device_as(cmd_buf)?;
let max_bind_groups = state.device.limits.max_bind_groups;
if index >= max_bind_groups {
return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
@ -668,7 +666,16 @@ fn set_bind_group(
);
state.dynamic_offset_count += num_dynamic_offsets;
if bind_group.is_none() {
// TODO: Handle bind_group None.
return Ok(());
}
let bind_group = bind_group.unwrap();
let bind_group = state.tracker.bind_groups.insert_single(bind_group);
bind_group.same_device_as(cmd_buf)?;
bind_group.validate_dynamic_bindings(index, &state.temp_offsets)?;
state
@ -700,7 +707,7 @@ fn set_bind_group(
state.raw_encoder.set_bind_group(
pipeline_layout,
index + i as u32,
raw_bg,
Some(raw_bg),
&e.dynamic_offsets,
);
}
@ -745,7 +752,7 @@ fn set_pipeline(
state.raw_encoder.set_bind_group(
pipeline.layout.raw(),
start_index as u32 + i as u32,
raw_bg,
Some(raw_bg),
&e.dynamic_offsets,
);
}
@ -952,7 +959,7 @@ impl Global {
&self,
pass: &mut ComputePass,
index: u32,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
offsets: &[DynamicOffset],
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::SetBindGroup;
@ -973,12 +980,18 @@ impl Global {
return Ok(());
}
let mut bind_group = None;
if bind_group_id.is_some() {
let bind_group_id = bind_group_id.unwrap();
let hub = &self.hub;
let bind_group = hub
let bg = hub
.bind_groups
.get(bind_group_id)
.map_err(|_| ComputePassErrorInner::InvalidBindGroupId(bind_group_id))
.map_pass_err(scope)?;
bind_group = Some(bg);
}
base.commands.push(ArcComputeCommand::SetBindGroup {
index,

View File

@ -13,7 +13,7 @@ pub enum ComputeCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
},
SetPipeline(id::ComputePipelineId),
@ -89,16 +89,29 @@ impl ComputeCommand {
index,
num_dynamic_offsets,
bind_group_id,
} => ArcComputeCommand::SetBindGroup {
} => {
if bind_group_id.is_none() {
return Ok(ArcComputeCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| {
bind_group: None,
});
}
let bind_group_id = bind_group_id.unwrap();
let bg = bind_group_guard.get_owned(bind_group_id).map_err(|_| {
ComputePassError {
scope: PassErrorScope::SetBindGroup,
inner: ComputePassErrorInner::InvalidBindGroupId(bind_group_id),
}
})?,
},
})?;
ArcComputeCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group: Some(bg),
}
}
ComputeCommand::SetPipeline(pipeline_id) => ArcComputeCommand::SetPipeline(
pipelines_guard
@ -185,7 +198,7 @@ pub enum ArcComputeCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup>,
bind_group: Option<Arc<BindGroup>>,
},
SetPipeline(Arc<ComputePipeline>),

View File

@ -805,7 +805,7 @@ impl<T: Copy + PartialEq> Default for StateChange<T> {
#[derive(Debug)]
struct BindGroupStateChange {
last_states: [StateChange<id::BindGroupId>; hal::MAX_BIND_GROUPS],
last_states: [StateChange<Option<id::BindGroupId>>; hal::MAX_BIND_GROUPS],
}
impl BindGroupStateChange {
@ -817,7 +817,7 @@ impl BindGroupStateChange {
fn set_and_check_redundant(
&mut self,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
index: u32,
dynamic_offsets: &mut Vec<u32>,
offsets: &[wgt::DynamicOffset],

View File

@ -635,8 +635,8 @@ pub enum RenderPassErrorInner {
SurfaceTextureDropped,
#[error("Not enough memory left for render pass")]
OutOfMemory,
#[error("The bind group at index {0:?} is invalid")]
InvalidBindGroup(u32),
#[error("BindGroupId {0:?} is invalid")]
InvalidBindGroupId(id::BindGroupId),
#[error("Unable to clear non-present/read-only depth")]
InvalidDepthOps,
#[error("Unable to clear non-present/read-only stencil")]
@ -1934,12 +1934,16 @@ fn set_bind_group(
dynamic_offsets: &[DynamicOffset],
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup>,
bind_group: Option<Arc<BindGroup>>,
) -> Result<(), RenderPassErrorInner> {
if bind_group.is_none() {
api_log!("RenderPass::set_bind_group {index} None");
} else {
api_log!(
"RenderPass::set_bind_group {index} {}",
bind_group.error_ident()
bind_group.as_ref().unwrap().error_ident()
);
}
let max_bind_groups = state.device.limits.max_bind_groups;
if index >= max_bind_groups {
@ -1957,6 +1961,12 @@ fn set_bind_group(
);
state.dynamic_offset_count += num_dynamic_offsets;
if bind_group.is_none() {
// TODO: Handle bind_group None.
return Ok(());
}
let bind_group = bind_group.unwrap();
let bind_group = state.tracker.bind_groups.insert_single(bind_group);
bind_group.same_device_as(cmd_buf.as_ref())?;
@ -1999,7 +2009,7 @@ fn set_bind_group(
state.raw_encoder.set_bind_group(
pipeline_layout,
index + i as u32,
raw_bg,
Some(raw_bg),
&e.dynamic_offsets,
);
}
@ -2073,7 +2083,7 @@ fn set_pipeline(
state.raw_encoder.set_bind_group(
pipeline.layout.raw(),
start_index as u32 + i as u32,
raw_bg,
Some(raw_bg),
&e.dynamic_offsets,
);
}
@ -2788,7 +2798,7 @@ impl Global {
&self,
pass: &mut RenderPass,
index: u32,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
offsets: &[DynamicOffset],
) -> Result<(), RenderPassError> {
let scope = PassErrorScope::SetBindGroup;
@ -2808,12 +2818,18 @@ impl Global {
return Ok(());
}
let mut bind_group = None;
if bind_group_id.is_some() {
let bind_group_id = bind_group_id.unwrap();
let hub = &self.hub;
let bind_group = hub
let bg = hub
.bind_groups
.get(bind_group_id)
.map_err(|_| RenderPassErrorInner::InvalidBindGroup(index))
.map_err(|_| RenderPassErrorInner::InvalidBindGroupId(bind_group_id))
.map_pass_err(scope)?;
bind_group = Some(bg);
}
base.commands.push(ArcRenderCommand::SetBindGroup {
index,

View File

@ -17,7 +17,7 @@ pub enum RenderCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group_id: id::BindGroupId,
bind_group_id: Option<id::BindGroupId>,
},
SetPipeline(id::RenderPipelineId),
SetIndexBuffer {
@ -147,16 +147,29 @@ impl RenderCommand {
index,
num_dynamic_offsets,
bind_group_id,
} => ArcRenderCommand::SetBindGroup {
} => {
if bind_group_id.is_none() {
return Ok(ArcRenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| {
bind_group: None,
});
}
let bind_group_id = bind_group_id.unwrap();
let bg = bind_group_guard.get_owned(bind_group_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::SetBindGroup,
inner: RenderPassErrorInner::InvalidBindGroup(index),
inner: RenderPassErrorInner::InvalidBindGroupId(bind_group_id),
}
})?;
ArcRenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group: Some(bg),
}
}
})?,
},
RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline(
pipelines_guard
@ -384,7 +397,7 @@ pub enum ArcRenderCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup>,
bind_group: Option<Arc<BindGroup>>,
},
SetPipeline(Arc<RenderPipeline>),
SetIndexBuffer {

View File

@ -61,7 +61,7 @@ pub trait DynCommandEncoder: DynResource + std::fmt::Debug {
&mut self,
layout: &dyn DynPipelineLayout,
index: u32,
group: &dyn DynBindGroup,
group: Option<&dyn DynBindGroup>,
dynamic_offsets: &[wgt::DynamicOffset],
);
@ -282,9 +282,15 @@ impl<C: CommandEncoder + DynResource> DynCommandEncoder for C {
&mut self,
layout: &dyn DynPipelineLayout,
index: u32,
group: &dyn DynBindGroup,
group: Option<&dyn DynBindGroup>,
dynamic_offsets: &[wgt::DynamicOffset],
) {
if group.is_none() {
// TODO: Handle group None correctly.
return;
}
let group = group.unwrap();
let layout = layout.expect_downcast_ref();
let group = group.expect_downcast_ref();
unsafe { C::set_bind_group(self, layout, index, group, dynamic_offsets) };

View File

@ -48,14 +48,15 @@ impl<'encoder> ComputePass<'encoder> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &BindGroup,
bind_group: Option<&BindGroup>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.map(|x| x.data.as_ref());
DynContext::compute_pass_set_bind_group(
&*self.inner.context,
self.inner.data.as_mut(),
index,
bind_group.data.as_ref(),
bg,
offsets,
);
}

View File

@ -66,14 +66,15 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
bind_group: Option<&'a BindGroup>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.map(|x| x.data.as_ref());
DynContext::render_bundle_encoder_set_bind_group(
&*self.parent.context,
self.data.as_mut(),
index,
bind_group.data.as_ref(),
bg,
offsets,
)
}

View File

@ -77,14 +77,15 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &BindGroup,
bind_group: Option<&BindGroup>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.map(|x| x.data.as_ref());
DynContext::render_pass_set_bind_group(
&*self.inner.context,
self.inner.data.as_mut(),
index,
bind_group.data.as_ref(),
bg,
offsets,
)
}

View File

@ -2757,9 +2757,14 @@ impl crate::context::Context for ContextWebGpu {
&self,
pass_data: &mut Self::ComputePassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
if bind_group_data.is_none() {
// TODO: Handle the None case.
return;
}
let bind_group_data = bind_group_data.unwrap();
if offsets.is_empty() {
pass_data.0.set_bind_group(index, Some(&bind_group_data.0));
} else {
@ -2869,9 +2874,14 @@ impl crate::context::Context for ContextWebGpu {
&self,
encoder_data: &mut Self::RenderBundleEncoderData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
if bind_group_data.is_none() {
// TODO: Handle the None case.
return;
}
let bind_group_data = bind_group_data.unwrap();
if offsets.is_empty() {
encoder_data
.0
@ -3021,9 +3031,14 @@ impl crate::context::Context for ContextWebGpu {
&self,
pass_data: &mut Self::RenderPassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
if bind_group_data.is_none() {
// TODO: Handle the None case.
return;
}
let bind_group_data = bind_group_data.unwrap();
if offsets.is_empty() {
pass_data.0.set_bind_group(index, Some(&bind_group_data.0));
} else {

View File

@ -2168,15 +2168,14 @@ impl crate::Context for ContextWgpuCore {
&self,
pass_data: &mut Self::ComputePassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
if let Err(cause) = self.0.compute_pass_set_bind_group(
&mut pass_data.pass,
index,
*bind_group_data,
offsets,
) {
let bg = bind_group_data.cloned();
if let Err(cause) =
self.0
.compute_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets)
{
self.handle_error(
&pass_data.error_sink,
cause,
@ -2364,14 +2363,15 @@ impl crate::Context for ContextWgpuCore {
&self,
encoder_data: &mut Self::RenderBundleEncoderData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
let bg = bind_group_data.cloned();
unsafe {
wgpu_render_bundle_set_bind_group(
encoder_data,
index,
*bind_group_data,
bg,
offsets.as_ptr(),
offsets.len(),
)
@ -2494,12 +2494,13 @@ impl crate::Context for ContextWgpuCore {
&self,
pass_data: &mut Self::RenderPassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[wgt::DynamicOffset],
) {
let bg = bind_group_data.cloned();
if let Err(cause) =
self.0
.render_pass_set_bind_group(&mut pass_data.pass, index, *bind_group_data, offsets)
.render_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets)
{
self.handle_error(
&pass_data.error_sink,

View File

@ -441,7 +441,7 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
&self,
pass_data: &mut Self::ComputePassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[DynamicOffset],
);
fn compute_pass_set_push_constants(
@ -494,7 +494,7 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
&self,
encoder_data: &mut Self::RenderBundleEncoderData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[DynamicOffset],
);
#[allow(clippy::too_many_arguments)]
@ -557,7 +557,7 @@ pub trait Context: Debug + WasmNotSendSync + Sized {
&self,
pass_data: &mut Self::RenderPassData,
index: u32,
bind_group_data: &Self::BindGroupData,
bind_group_data: Option<&Self::BindGroupData>,
offsets: &[DynamicOffset],
);
#[allow(clippy::too_many_arguments)]
@ -1112,7 +1112,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
&self,
pass_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
);
fn compute_pass_set_push_constants(
@ -1155,7 +1155,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
&self,
encoder_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
);
#[allow(clippy::too_many_arguments)]
@ -1214,7 +1214,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync {
&self,
pass_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
);
#[allow(clippy::too_many_arguments)]
@ -2161,12 +2161,12 @@ where
&self,
pass_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
) {
let pass_data = downcast_mut::<T::ComputePassData>(pass_data);
let bind_group_data = downcast_ref(bind_group_data);
Context::compute_pass_set_bind_group(self, pass_data, index, bind_group_data, offsets)
let bg = bind_group_data.map(downcast_ref);
Context::compute_pass_set_bind_group(self, pass_data, index, bg, offsets)
}
fn compute_pass_set_push_constants(
@ -2272,18 +2272,12 @@ where
&self,
encoder_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
) {
let encoder_data = downcast_mut::<T::RenderBundleEncoderData>(encoder_data);
let bind_group_data = downcast_ref(bind_group_data);
Context::render_bundle_encoder_set_bind_group(
self,
encoder_data,
index,
bind_group_data,
offsets,
)
let bg = bind_group_data.map(downcast_ref);
Context::render_bundle_encoder_set_bind_group(self, encoder_data, index, bg, offsets)
}
fn render_bundle_encoder_set_index_buffer(
@ -2406,12 +2400,12 @@ where
&self,
pass_data: &mut crate::Data,
index: u32,
bind_group_data: &crate::Data,
bind_group_data: Option<&crate::Data>,
offsets: &[DynamicOffset],
) {
let pass_data = downcast_mut::<T::RenderPassData>(pass_data);
let bind_group_data = downcast_ref(bind_group_data);
Context::render_pass_set_bind_group(self, pass_data, index, bind_group_data, offsets)
let bg = bind_group_data.map(downcast_ref);
Context::render_pass_set_bind_group(self, pass_data, index, bg, offsets)
}
fn render_pass_set_index_buffer(

View File

@ -10,7 +10,12 @@ pub trait RenderEncoder<'a> {
/// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
///
/// If the bind group have dynamic offsets, provide them in order of their declaration.
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]);
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&'a BindGroup>,
offsets: &[DynamicOffset],
);
/// Sets the active render pipeline.
///
@ -101,7 +106,12 @@ pub trait RenderEncoder<'a> {
impl<'a> RenderEncoder<'a> for RenderPass<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&'a BindGroup>,
offsets: &[DynamicOffset],
) {
Self::set_bind_group(self, index, bind_group, offsets);
}
@ -152,7 +162,12 @@ impl<'a> RenderEncoder<'a> for RenderPass<'a> {
impl<'a> RenderEncoder<'a> for RenderBundleEncoder<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&'a BindGroup>,
offsets: &[DynamicOffset],
) {
Self::set_bind_group(self, index, bind_group, offsets);
}