Properly Deal with Timeouts (#7030)

This commit is contained in:
Connor Fitzgerald 2025-02-14 18:19:51 -05:00 committed by GitHub
parent f90f19c7e8
commit 7e119968ce
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
74 changed files with 475 additions and 358 deletions

View File

@ -40,7 +40,7 @@ Bottom level categories:
## Unreleased
### Major Changes
### Major Features
#### Hashmaps Removed from APIs
@ -51,6 +51,66 @@ also allows more easily creating these structures inline.
By @cwfitzgerald in [#7133](https://github.com/gfx-rs/wgpu/pull/7133)
#### `device.poll` Api Reworked
This release reworked the poll api significantly to allow polling to return errors when polling hits internal timeout limits.
`Maintain` was renamed `PollType`. Additionally, `poll` now returns a result containing information about what happened during the poll.
```diff
-pub fn wgpu::Device::poll(&self, maintain: wgpu::Maintain) -> wgpu::MaintainResult
+pub fn wgpu::Device::poll(&self, poll_type: wgpu::PollType) -> Result<wgpu::PollStatus, wgpu::PollError>
-device.poll(wgpu::Maintain::Poll);
+device.poll(wgpu::PollType::Poll).unwrap();
```
```rust
pub enum PollType<T> {
/// On wgpu-core based backends, block until the given submission has
/// completed execution, and any callbacks have been invoked.
///
/// On WebGPU, this has no effect. Callbacks are invoked from the
/// window event loop.
WaitForSubmissionIndex(T),
/// Same as WaitForSubmissionIndex but waits for the most recent submission.
Wait,
/// Check the device for a single time without blocking.
Poll,
}
pub enum PollStatus {
/// There are no active submissions in flight as of the beginning of the poll call.
/// Other submissions may have been queued on other threads during the call.
///
/// This implies that the given Wait was satisfied before the timeout.
QueueEmpty,
/// The requested Wait was satisfied before the timeout.
WaitSucceeded,
/// This was a poll.
Poll,
}
pub enum PollError {
/// The requested Wait timed out before the submission was completed.
Timeout,
}
```
> [!WARNING]
> As part of this change, WebGL's default behavior has changed. Previously `device.poll(Wait)` appeared as though it functioned correctly. This was a quirk caused by the bug that these PRs fixed. Now it will always return `Timeout` if the submission has not already completed. As many people rely on this behavior on WebGL, there is a new options in `BackendOptions`. If you want the old behavior, set the following on instance creation:
>
> ```rust
> instance_desc.backend_options.gl.fence_behavior = wgpu::GlFenceBehavior::AutoFinish;
> ```
>
> You will lose the ability to know exactly when a submission has completed, but `device.poll(Wait)` will behave the same as it does on native.
By @cwfitzgerald in [#6942](https://github.com/gfx-rs/wgpu/pull/6942).
By @cwfitzgerald in [#7030](https://github.com/gfx-rs/wgpu/pull/7030).
### New Features
#### General

1
Cargo.lock generated
View File

@ -4734,6 +4734,7 @@ dependencies = [
"log",
"serde",
"serde_json",
"thiserror 2.0.11",
"web-sys",
]

View File

@ -152,7 +152,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
drop(bind_group);
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration

View File

@ -486,7 +486,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
}
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration
@ -531,7 +535,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
state.device_state.queue.submit(buffers);
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration
@ -573,7 +581,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
state.device_state.queue.submit([buffer]);
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration

View File

@ -492,7 +492,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
}
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration
@ -535,7 +539,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
state.device_state.queue.submit(buffers);
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration
@ -571,7 +579,11 @@ fn run_bench(ctx: &mut Criterion) {
duration += start.elapsed();
state.device_state.queue.submit([buffer]);
state.device_state.device.poll(wgpu::Maintain::Wait);
state
.device_state
.device
.poll(wgpu::PollType::Wait)
.unwrap();
}
duration

View File

@ -61,7 +61,7 @@ fn run_bench(ctx: &mut Criterion) {
drop(buffers);
state.queue.submit([]);
state.device.poll(wgpu::Maintain::Wait);
state.device.poll(wgpu::PollType::Wait).unwrap();
}
duration

View File

@ -161,7 +161,7 @@ impl GPUBuffer {
while !*done.borrow() {
{
self.instance
.device_poll(self.device, wgpu_types::Maintain::wait())
.device_poll(self.device, wgpu_types::PollType::wait())
.unwrap();
}
tokio::time::sleep(Duration::from_millis(10)).await;

View File

@ -615,7 +615,7 @@ impl GPUDevice {
#[fast]
fn stop_capture(&self) {
self.instance
.device_poll(self.id, wgpu_types::Maintain::wait())
.device_poll(self.id, wgpu_types::PollType::wait())
.unwrap();
self.instance.device_stop_capture(self.id);
}

View File

@ -592,9 +592,7 @@ impl<E: Example + wgpu::WasmNotSendSync> From<ExampleTestParams<E>>
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(

View File

@ -183,7 +183,7 @@ async fn get_data<T: bytemuck::Pod>(
let buffer_slice = staging_buffer.slice(..);
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
receiver.recv_async().await.unwrap().unwrap();
output.copy_from_slice(bytemuck::cast_slice(&buffer_slice.get_mapped_range()[..]));
staging_buffer.unmap();

View File

@ -172,7 +172,7 @@ async fn get_data<T: bytemuck::Pod>(
let buffer_slice = staging_buffer.slice(..);
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
receiver.recv_async().await.unwrap().unwrap();
output.copy_from_slice(bytemuck::cast_slice(&buffer_slice.get_mapped_range()[..]));
staging_buffer.unmap();

View File

@ -410,7 +410,7 @@ impl crate::framework::Example for Example {
.slice(..)
.map_async(wgpu::MapMode::Read, |_| ());
// Wait for device to be done rendering mipmaps
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
// This is guaranteed to be ready.
let timestamp_view = query_sets
.mapping_buffer

View File

@ -355,7 +355,7 @@ impl crate::framework::Example for Example {
rpass.draw_indexed(0..12, 0, 0..1);
}
queue.submit(Some(encoder.finish()));
device.poll(wgpu::Maintain::Wait);
device.poll(wgpu::PollType::Wait).unwrap();
}
}

View File

@ -132,7 +132,7 @@ async fn run(_path: Option<String>) {
let buffer_slice = output_staging_buffer.slice(..);
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
receiver.recv_async().await.unwrap().unwrap();
log::info!("Output buffer mapped.");
{

View File

@ -106,11 +106,8 @@ async fn compute(local_buffer: &mut [u32], context: &WgpuContext) {
// In order for the mapping to be completed, one of three things must happen.
// One of those can be calling `Device::poll`. This isn't necessary on the web as devices
// are polled automatically but natively, we need to make sure this happens manually.
// `Maintain::Wait` will cause the thread to wait on native but not on WebGpu.
context
.device
.poll(wgpu::Maintain::wait())
.panic_on_timeout();
// `PollType::Wait` will cause the thread to wait on native but not on WebGpu.
context.device.poll(wgpu::PollType::wait()).unwrap();
log::info!("Device polled.");
// Now we await the receiving and panic if anything went wrong because we're lazy.
receiver.recv_async().await.unwrap().unwrap();

View File

@ -143,7 +143,7 @@ async fn run(_path: Option<String>) {
let buffer_slice = output_staging_buffer.slice(..);
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
receiver.recv_async().await.unwrap().unwrap();
log::info!("Output buffer mapped");
{

View File

@ -161,7 +161,7 @@ impl Queries {
self.destination_buffer
.slice(..)
.map_async(wgpu::MapMode::Read, |_| ());
device.poll(wgpu::Maintain::wait()).panic_on_timeout();
device.poll(wgpu::PollType::wait()).unwrap();
let timestamps = {
let timestamp_view = self

View File

@ -243,7 +243,7 @@ fn main() {
// Wait for the GPU to finish working on the submitted work. This doesn't work on WebGPU, so we would need
// to rely on the callback to know when the buffer is mapped.
device.poll(wgpu::Maintain::Wait);
device.poll(wgpu::PollType::Wait).unwrap();
// We can now read the data from the buffer.
let data = buffer_slice.get_mapped_range();

View File

@ -111,7 +111,7 @@ fn main() {
}
global.device_stop_capture(device);
global.device_poll(device, wgt::Maintain::wait()).unwrap();
global.device_poll(device, wgt::PollType::wait()).unwrap();
}
#[cfg(feature = "winit")]
{
@ -203,7 +203,7 @@ fn main() {
},
Event::LoopExiting => {
log::info!("Closing");
global.device_poll(device, wgt::Maintain::wait()).unwrap();
global.device_poll(device, wgt::PollType::wait()).unwrap();
}
_ => {}
}

View File

@ -133,7 +133,7 @@ impl Test<'_> {
println!("\t\t\tWaiting...");
global
.device_poll(device_id, wgt::Maintain::wait())
.device_poll(device_id, wgt::PollType::wait())
.unwrap();
for expect in self.expectations {

View File

@ -574,7 +574,7 @@ impl ReadbackBuffers {
) -> Vec<u8> {
let buffer_slice = buffer.slice(..);
buffer_slice.map_async(MapMode::Read, |_| ());
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let (block_width, block_height) = self.texture_format.block_dimensions();
let expected_bytes_per_row = (self.texture_width / block_width)
* self.texture_format.block_copy_size(aspect).unwrap_or(4);

View File

@ -42,7 +42,20 @@ pub fn initialize_instance(backends: wgpu::Backends, force_fxc: bool) -> Instanc
dx12: wgpu::Dx12BackendOptions {
shader_compiler: dx12_shader_compiler,
},
gl: wgpu::GlBackendOptions::from_env_or_default(),
gl: wgpu::GlBackendOptions {
fence_behavior: if cfg!(target_family = "wasm") {
// On WebGL, you cannot call Poll(Wait) with any timeout. This is because the
// browser does not things to block. However all of our tests are written to
// expect this behavior. This is the workaround to allow this to work.
//
// However on native you can wait, so we want to ensure that behavior as well.
wgpu::GlFenceBehavior::AutoFinish
} else {
wgpu::GlFenceBehavior::Normal
},
..Default::default()
}
.with_env(),
// TODO(https://github.com/gfx-rs/wgpu/issues/7119): Enable noop backend?
noop: wgpu::NoopBackendOptions::default(),
},

View File

@ -2,7 +2,10 @@ use crate::TestingContext;
impl TestingContext {
/// Utility to allow future asynchronous polling.
pub async fn async_poll(&self, maintain: wgpu::Maintain) -> wgpu::MaintainResult {
self.device.poll(maintain)
pub async fn async_poll(
&self,
poll_type: wgpu::PollType,
) -> Result<wgpu::PollStatus, wgpu::PollError> {
self.device.poll(poll_type)
}
}

View File

@ -142,9 +142,7 @@ static BGRA8_UNORM_STORAGE: GpuTestConfiguration = GpuTestConfiguration::new()
let buffer_slice = readback_buffer.slice(..);
buffer_slice.map_async(wgpu::MapMode::Read, Result::unwrap);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
{
let texels = buffer_slice.get_mapped_range();

View File

@ -257,7 +257,7 @@ async fn binding_array_buffers(
let slice = readback_buffer.slice(..);
slice.map_async(MapMode::Read, |_| {});
ctx.device.poll(Maintain::Wait);
ctx.device.poll(PollType::Wait).unwrap();
let data = slice.get_mapped_range();

View File

@ -243,7 +243,7 @@ async fn binding_array_samplers(ctx: TestingContext, partially_bound: bool) {
ctx.queue.submit(Some(encoder.finish()));
readback_buffer.slice(..).map_async(MapMode::Read, |_| {});
ctx.device.poll(Maintain::Wait);
ctx.device.poll(PollType::Wait).unwrap();
let readback_buffer_slice = readback_buffer.slice(..).get_mapped_range();

View File

@ -14,9 +14,7 @@ async fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label:
b0.slice(0..0)
.map_async(wgpu::MapMode::Read, Result::unwrap);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
{
let view = b0.slice(0..0).get_mapped_range();
@ -50,9 +48,7 @@ async fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label:
b0.slice(0..0)
.map_async(wgpu::MapMode::Write, Result::unwrap);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
//{
// let view = b0.slice(0..0).get_mapped_range_mut();
@ -81,9 +77,7 @@ async fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label:
b1.unmap();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
#[gpu_test]
@ -122,9 +116,7 @@ static MAP_OFFSET: GpuTestConfiguration = GpuTestConfiguration::new().run_async(
result.unwrap();
});
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
{
let slice = write_buf.slice(32..48);
@ -148,9 +140,7 @@ static MAP_OFFSET: GpuTestConfiguration = GpuTestConfiguration::new().run_async(
.slice(..)
.map_async(wgpu::MapMode::Read, Result::unwrap);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let slice = read_buf.slice(..);
let view = slice.get_mapped_range();

View File

@ -139,9 +139,7 @@ async fn map_test(
buffer.destroy();
}
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
if !before_unmap && !before_destroy {
{

View File

@ -35,7 +35,7 @@ fn cloneable_buffers(ctx: TestingContext) {
assert_eq!(&*data, &cloned_buffer_contents);
});
ctx.device.poll(wgpu::Maintain::Wait);
ctx.device.poll(wgpu::PollType::Wait).unwrap();
let data = buffer.slice(..).get_mapped_range();

View File

@ -52,9 +52,7 @@ async fn compute_pass_resource_ownership(ctx: TestingContext) {
drop(pipeline);
drop(bind_group);
drop(indirect_buffer);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_compute_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -102,9 +100,7 @@ async fn compute_pass_query_set_ownership_pipeline_statistics(ctx: TestingContex
// Drop the query set. Then do a device poll to make sure it's not dropped too early, no matter what.
drop(query_set);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_compute_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -160,9 +156,7 @@ async fn compute_pass_query_set_ownership_timestamps(ctx: TestingContext) {
// Drop the query sets. Then do a device poll to make sure they're not dropped too early, no matter what.
drop(query_set_timestamp_writes);
drop(query_set_write_timestamp);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_compute_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -197,9 +191,7 @@ async fn compute_pass_keep_encoder_alive(ctx: TestingContext) {
let mut cpass = cpass.forget_lifetime();
drop(encoder);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
// Record some draw commands.
cpass.set_pipeline(&pipeline);
@ -223,9 +215,7 @@ async fn assert_compute_pass_executed_normally(
encoder.copy_buffer_to_buffer(&gpu_buffer, 0, &cpu_buffer, 0, buffer_size);
ctx.queue.submit([encoder.finish()]);
cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data = cpu_buffer.slice(..).get_mapped_range();

View File

@ -27,9 +27,7 @@ static CROSS_DEVICE_BIND_GROUP_USAGE: GpuTestConfiguration = GpuTestConfiguratio
});
}
ctx.async_poll(wgpu::Maintain::Poll)
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::Poll).await.unwrap();
});
#[cfg(not(all(target_arch = "wasm32", not(target_os = "emscripten"))))]
@ -615,8 +613,9 @@ static DEVICE_DESTROY_THEN_LOST: GpuTestConfiguration = GpuTestConfiguration::ne
// Make sure the device queues are empty, which ensures that the closure
// has been called.
assert!(ctx
.async_poll(wgpu::Maintain::wait())
.async_poll(wgpu::PollType::wait())
.await
.unwrap()
.is_queue_empty());
assert!(

View File

@ -300,9 +300,7 @@ async fn run_test(ctx: &TestingContext, num_workgroups: &[u32; 3]) -> [u32; 3] {
.slice(..)
.map_async(wgpu::MapMode::Read, |_| {});
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let view = test_resources.readback_buffer.slice(..).get_mapped_range();

View File

@ -328,9 +328,7 @@ static IMAGE_BITMAP_IMPORT: GpuTestConfiguration =
readback_buffer
.slice(..)
.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let buffer = readback_buffer.slice(..).get_mapped_range();

View File

@ -14,9 +14,7 @@ static BUFFER_DESTROY: GpuTestConfiguration =
buffer.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
fail(
&ctx.device,
@ -30,9 +28,7 @@ static BUFFER_DESTROY: GpuTestConfiguration =
buffer.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
buffer.destroy();
@ -54,9 +50,7 @@ static BUFFER_DESTROY: GpuTestConfiguration =
}
let buffer = ctx.device.create_buffer(&descriptor);
buffer.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let buffer = ctx.device.create_buffer(&descriptor);
buffer.destroy();
{
@ -65,16 +59,12 @@ static BUFFER_DESTROY: GpuTestConfiguration =
let buffer = ctx.device.create_buffer(&descriptor);
buffer.destroy();
let buffer = ctx.device.create_buffer(&descriptor);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
buffer.destroy();
}
let buffer = ctx.device.create_buffer(&descriptor);
buffer.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
});
#[gpu_test]
@ -99,15 +89,11 @@ static TEXTURE_DESTROY: GpuTestConfiguration =
texture.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
texture.destroy();
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
texture.destroy();

View File

@ -245,9 +245,9 @@ async fn draw_test_with_reports(
// let report = global_report.hub_report();
// assert_eq!(report.command_buffers.num_allocated, 0);
ctx.async_poll(wgpu::Maintain::wait_for(submit_index))
ctx.async_poll(wgpu::PollType::wait_for(submit_index))
.await
.panic_on_timeout();
.unwrap();
let global_report = ctx.instance.generate_report().unwrap();
let report = global_report.hub_report();

View File

@ -115,9 +115,7 @@ static OCCLUSION_QUERY: GpuTestConfiguration = GpuTestConfiguration::new()
mapping_buffer
.slice(..)
.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let query_buffer_view = mapping_buffer.slice(..).get_mapped_range();
let query_data: &[u64; 3] = bytemuck::from_bytes(&query_buffer_view);

View File

@ -41,9 +41,7 @@ static RESTRICT_WORKGROUP_PRIVATE_FUNCTION_LET: GpuTestConfiguration = GpuTestCo
.slice(..)
.map_async(wgpu::MapMode::Read, |_| {});
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let view = test_resources.readback_buffer.slice(..).get_mapped_range();
@ -444,9 +442,7 @@ async fn d3d12_restrict_dynamic_buffers(ctx: TestingContext) {
.slice(..)
.map_async(wgpu::MapMode::Read, |_| {});
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let view = readback_buffer.slice(..).get_mapped_range();

View File

@ -175,9 +175,7 @@ async fn validate_pipeline(
encoder.copy_buffer_to_buffer(gpu_buffer, 0, cpu_buffer, 0, ARRAY_SIZE * 4);
ctx.queue.submit([encoder.finish()]);
cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data = cpu_buffer.slice(..).get_mapped_range();

View File

@ -3,7 +3,7 @@ use std::num::NonZeroU64;
use wgpu::{
BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry,
BindingResource, BindingType, BufferBindingType, BufferDescriptor, BufferUsages, CommandBuffer,
CommandEncoderDescriptor, ComputePassDescriptor, Maintain, ShaderStages,
CommandEncoderDescriptor, ComputePassDescriptor, PollType, ShaderStages,
};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestingContext};
@ -57,7 +57,7 @@ static WAIT: GpuTestConfiguration = GpuTestConfiguration::new().run_async(|ctx|
let cmd_buf = generate_dummy_work(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
});
#[gpu_test]
@ -66,8 +66,8 @@ static DOUBLE_WAIT: GpuTestConfiguration =
let cmd_buf = generate_dummy_work(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
ctx.async_poll(PollType::wait()).await.unwrap();
});
#[gpu_test]
@ -76,9 +76,7 @@ static WAIT_ON_SUBMISSION: GpuTestConfiguration =
let cmd_buf = generate_dummy_work(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
ctx.async_poll(Maintain::wait_for(index))
.await
.panic_on_timeout();
ctx.async_poll(PollType::wait_for(index)).await.unwrap();
});
#[gpu_test]
@ -87,12 +85,10 @@ static DOUBLE_WAIT_ON_SUBMISSION: GpuTestConfiguration =
let cmd_buf = generate_dummy_work(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
ctx.async_poll(Maintain::wait_for(index.clone()))
ctx.async_poll(PollType::wait_for(index.clone()))
.await
.panic_on_timeout();
ctx.async_poll(Maintain::wait_for(index))
.await
.panic_on_timeout();
.unwrap();
ctx.async_poll(PollType::wait_for(index)).await.unwrap();
});
#[gpu_test]
@ -103,12 +99,8 @@ static WAIT_OUT_OF_ORDER: GpuTestConfiguration =
let index1 = ctx.queue.submit(Some(cmd_buf1));
let index2 = ctx.queue.submit(Some(cmd_buf2));
ctx.async_poll(Maintain::wait_for(index2))
.await
.panic_on_timeout();
ctx.async_poll(Maintain::wait_for(index1))
.await
.panic_on_timeout();
ctx.async_poll(PollType::wait_for(index2)).await.unwrap();
ctx.async_poll(PollType::wait_for(index1)).await.unwrap();
});
/// Submit a command buffer to the wrong device. A wait poll shouldn't hang.
@ -142,5 +134,5 @@ async fn wait_after_bad_submission(ctx: TestingContext) {
// Specifically, the failed submission should not cause a new fence value to
// be allocated that will not be signalled until further work is
// successfully submitted, causing a greater fence value to be signalled.
device2.poll(wgpu::Maintain::Wait);
device2.poll(wgpu::PollType::Wait).unwrap();
}

View File

@ -144,9 +144,7 @@ async fn partial_update_test(ctx: TestingContext) {
encoder.copy_buffer_to_buffer(&gpu_buffer, 0, &cpu_buffer, 0, 32);
ctx.queue.submit([encoder.finish()]);
cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data = cpu_buffer.slice(..).get_mapped_range();
@ -363,9 +361,7 @@ async fn render_pass_test(ctx: &TestingContext, use_render_bundle: bool) {
let command_buffer = command_encoder.finish();
ctx.queue.submit([command_buffer]);
cpu_buffer.slice(..).map_async(MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let mapped_data = cpu_buffer.slice(..).get_mapped_range();
let result = bytemuck::cast_slice::<u8, i32>(&mapped_data).to_vec();
drop(mapped_data);

View File

@ -7,7 +7,7 @@ use wgpu::{
BlasBuildEntry, BlasGeometries, BlasGeometrySizeDescriptors, BlasTriangleGeometry,
BlasTriangleGeometrySizeDescriptor, BufferAddress, BufferUsages, CommandEncoderDescriptor,
ComputePassDescriptor, ComputePipelineDescriptor, CreateBlasDescriptor, CreateTlasDescriptor,
Maintain, TlasInstance, TlasPackage, VertexFormat,
PollType, TlasInstance, TlasPackage, VertexFormat,
};
use wgpu_macros::gpu_test;
use wgpu_test::{FailureCase, GpuTestConfiguration, TestParameters, TestingContext};
@ -89,7 +89,7 @@ fn acceleration_structure_use_after_free(ctx: TestingContext) {
// Drop the blas and ensure that if it was going to die, it is dead.
drop(blas);
ctx.device.poll(Maintain::Wait);
ctx.device.poll(PollType::Wait).unwrap();
// build the tlas package to ensure the blas is dropped
let mut encoder = ctx
@ -124,7 +124,7 @@ fn acceleration_structure_use_after_free(ctx: TestingContext) {
// Drop the TLAS package and ensure that if it was going to die, it is dead.
drop(tlas_package);
ctx.device.poll(Maintain::Wait);
ctx.device.poll(PollType::Wait).unwrap();
// Run the pass with the bind group that references the TLAS package.
let mut encoder = ctx

View File

@ -95,7 +95,7 @@ fn acceleration_structure_build(ctx: &TestingContext, use_index_buffer: bool) {
ctx.queue.submit(Some(encoder.finish()));
ctx.device.poll(wgpu::Maintain::Wait);
ctx.device.poll(wgpu::PollType::Wait).unwrap();
}
#[gpu_test]

View File

@ -166,7 +166,7 @@ static PASS_RESET_VERTEX_BUFFER: GpuTestConfiguration =
drop(vertex_buffer2);
// Make sure the buffers are actually deleted.
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let mut encoder2 = ctx
.device

View File

@ -36,7 +36,7 @@ static QUEUE_SUBMITTED_CALLBACK_ORDERING: GpuTestConfiguration = GpuTestConfigur
// Submit the work.
ctx.queue.submit(Some(encoder.finish()));
// Ensure the work is finished.
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
#[derive(Debug)]
struct OrderingContext {
@ -74,7 +74,7 @@ static QUEUE_SUBMITTED_CALLBACK_ORDERING: GpuTestConfiguration = GpuTestConfigur
});
// No GPU work is happening at this point, but we want to process callbacks.
ctx.async_poll(MaintainBase::Poll).await.panic_on_timeout();
ctx.async_poll(MaintainBase::Poll).await.unwrap();
// Extract the ordering out of the arc.
let ordering = Arc::into_inner(ordering).unwrap().into_inner();

View File

@ -32,9 +32,7 @@ async fn fill_test(ctx: &TestingContext, range: Range<u64>, size: u64) -> bool {
ctx.queue.submit(Some(encoder.finish()));
cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let buffer_slice = cpu_buffer.slice(..);
let buffer_data = buffer_slice.get_mapped_range();

View File

@ -73,7 +73,7 @@ async fn run_test(ctx: TestingContext, use_many_writes: bool) {
let result_cell = result_cell.clone();
move |result| result_cell.set(result).unwrap()
});
device.poll(wgpu::Maintain::Wait);
device.poll(wgpu::PollType::Wait).unwrap();
result_cell
.get()
.as_ref()

View File

@ -101,9 +101,7 @@ async fn render_pass_resource_ownership(ctx: TestingContext) {
drop(vertex_buffer);
drop(index_buffer);
drop(occlusion_query_set);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_render_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -172,9 +170,7 @@ async fn render_pass_query_set_ownership_pipeline_statistics(ctx: TestingContext
// Drop the query set. Then do a device poll to make sure it's not dropped too early, no matter what.
drop(query_set);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_render_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -250,9 +246,7 @@ async fn render_pass_query_set_ownership_timestamps(ctx: TestingContext) {
// Drop the query sets. Then do a device poll to make sure they're not dropped too early, no matter what.
drop(query_set_timestamp_writes);
drop(query_set_write_timestamp);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
}
assert_render_pass_executed_normally(encoder, gpu_buffer, cpu_buffer, buffer_size, ctx).await;
@ -299,9 +293,7 @@ async fn render_pass_keep_encoder_alive(ctx: TestingContext) {
let mut rpass = rpass.forget_lifetime();
drop(encoder);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
// Record some a draw command.
rpass.set_pipeline(&pipeline);
@ -327,9 +319,7 @@ async fn assert_render_pass_executed_normally(
encoder.copy_buffer_to_buffer(&gpu_buffer, 0, &cpu_buffer, 0, buffer_size);
ctx.queue.submit([encoder.finish()]);
cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data = cpu_buffer.slice(..).get_mapped_range();

View File

@ -110,7 +110,7 @@ fn sampler_creation_failure(ctx: TestingContext) {
let failed_count = sampler_storage.len();
sampler_storage.clear();
ctx.device.poll(wgpu::Maintain::Wait);
ctx.device.poll(wgpu::PollType::Wait).unwrap();
for i in 0..failed_count {
valid(&ctx.device, || {
@ -525,7 +525,7 @@ fn sampler_bind_group(ctx: TestingContext, group_type: GroupType) {
let buffer_slice = transfer_buffer.slice(..);
buffer_slice.map_async(wgpu::MapMode::Read, |_| {});
ctx.device.poll(wgpu::Maintain::Wait);
ctx.device.poll(wgpu::PollType::Wait).unwrap();
let buffer_data = buffer_slice.get_mapped_range();

View File

@ -1,6 +1,6 @@
use std::mem::size_of_val;
use wgpu::util::DeviceExt;
use wgpu::{BufferDescriptor, BufferUsages, Maintain, MapMode};
use wgpu::{BufferDescriptor, BufferUsages, MapMode, PollType};
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration, TestParameters, TestingContext};
const SHADER: &str = r#"
@ -122,7 +122,7 @@ async fn array_size_overrides(
ctx.queue.submit(Some(encoder.finish()));
mapping_buffer.slice(..).map_async(MapMode::Read, |_| ());
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let mapped = mapping_buffer.slice(..).get_mapped_range();

View File

@ -9,7 +9,7 @@ use std::{borrow::Cow, fmt::Debug};
use wgpu::{
Backends, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry,
BindingType, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, ComputePassDescriptor,
ComputePipelineDescriptor, Maintain, MapMode, PipelineLayoutDescriptor, PushConstantRange,
ComputePipelineDescriptor, MapMode, PipelineLayoutDescriptor, PollType, PushConstantRange,
ShaderModuleDescriptor, ShaderSource, ShaderStages,
};
@ -367,7 +367,7 @@ async fn shader_input_output_test(
ctx.queue.submit(Some(encoder.finish()));
mapping_buffer.slice(..).map_async(MapMode::Read, |_| ());
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let mapped = mapping_buffer.slice(..).get_mapped_range();

View File

@ -1,6 +1,6 @@
use std::mem::size_of_val;
use wgpu::util::DeviceExt;
use wgpu::{BufferDescriptor, BufferUsages, Maintain, MapMode};
use wgpu::{BufferDescriptor, BufferUsages, MapMode, PollType};
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration, TestParameters, TestingContext};
const SHADER: &str = r#"
@ -107,7 +107,7 @@ async fn workgroup_size_overrides(
ctx.queue.submit(Some(encoder.finish()));
mapping_buffer.slice(..).map_async(MapMode::Read, |_| ());
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let mapped = mapping_buffer.slice(..).get_mapped_range();

View File

@ -4,7 +4,7 @@ use wgpu::{
include_wgsl, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor,
BindGroupLayoutEntry, BindingResource, BindingType, BufferBinding, BufferBindingType,
BufferDescriptor, BufferUsages, CommandEncoderDescriptor, ComputePassDescriptor,
ComputePipelineDescriptor, DownlevelFlags, Limits, Maintain, MapMode, PipelineLayoutDescriptor,
ComputePipelineDescriptor, DownlevelFlags, Limits, MapMode, PipelineLayoutDescriptor, PollType,
ShaderStages,
};
@ -131,7 +131,7 @@ static ZERO_INIT_WORKGROUP_MEMORY: GpuTestConfiguration = GpuTestConfiguration::
ctx.queue.submit(Some(encoder.finish()));
mapping_buffer.slice(..).map_async(MapMode::Read, |_| ());
ctx.async_poll(Maintain::wait()).await.panic_on_timeout();
ctx.async_poll(PollType::wait()).await.unwrap();
let mapped = mapping_buffer.slice(..).get_mapped_range();

View File

@ -184,9 +184,7 @@ async fn reinterpret(
let slice = read_buffer.slice(..);
slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data: Vec<u8> = slice.get_mapped_range().to_vec();
let tolerance_data: [[u8; 4]; 4] = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 1, 1, 0]];

View File

@ -2,8 +2,8 @@ use std::time::Duration;
use wgpu::wgt::BufferDescriptor;
use wgpu::{
include_wgsl, BindGroupDescriptor, BindGroupEntry, BindingResource, BufferUsages,
ComputePassDescriptor, ComputePipelineDescriptor, DownlevelFlags, Extent3d, Features, Maintain,
MapMode, Origin3d, TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo,
ComputePassDescriptor, ComputePipelineDescriptor, DownlevelFlags, Extent3d, Features, MapMode,
Origin3d, PollType, TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo,
TextureAspect, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages,
};
use wgpu_macros::gpu_test;
@ -178,7 +178,7 @@ fn single_scalar_load(ctx: TestingContext) {
send.send(()).expect("Thread should wait for receive");
});
// Poll to run map.
ctx.device.poll(Maintain::Wait);
ctx.device.poll(PollType::Wait).unwrap();
recv.recv_timeout(Duration::from_secs(10))
.expect("mapping should not take this long");
let val = *bytemuck::from_bytes::<[f32; 4]>(&buffer.slice(..).get_mapped_range());

View File

@ -376,15 +376,11 @@ async fn vertex_formats_common(ctx: TestingContext, tests: &[Test<'_>]) {
// See https://github.com/gfx-rs/wgpu/issues/4732 for why this is split between two submissions
// with a hard wait in between.
ctx.queue.submit([encoder1.finish()]);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
ctx.queue.submit([encoder2.finish()]);
let slice = cpu_buffer.slice(..);
slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data: Vec<f32> = bytemuck::cast_slice(&slice.get_mapped_range()).to_vec();
let case_name = format!("Case {:?}", test.case);

View File

@ -455,15 +455,11 @@ async fn vertex_index_common(ctx: TestingContext) {
// See https://github.com/gfx-rs/wgpu/issues/4732 for why this is split between two submissions
// with a hard wait in between.
ctx.queue.submit([encoder1.finish()]);
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
ctx.queue.submit([encoder2.finish()]);
let slice = cpu_buffer.slice(..);
slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data: Vec<u32> = bytemuck::cast_slice(&slice.get_mapped_range()).to_vec();
let case_name = format!(

View File

@ -84,9 +84,7 @@ static WRITE_TEXTURE_SUBSET_2D: GpuTestConfiguration =
let slice = read_buffer.slice(..);
slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data: Vec<u8> = slice.get_mapped_range().to_vec();
for byte in &data[..(size as usize * 2)] {
@ -179,9 +177,7 @@ static WRITE_TEXTURE_SUBSET_3D: GpuTestConfiguration =
let slice = read_buffer.slice(..);
slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
ctx.async_poll(wgpu::PollType::wait()).await.unwrap();
let data: Vec<u8> = slice.get_mapped_range().to_vec();
for byte in &data[..((size * size) as usize * 2)] {

View File

@ -41,6 +41,6 @@ fn device_and_buffers() {
assert_eq!(*result.unwrap(), [1, 2, 3, 4, 5, 6, 7, 8],);
done.store(true, Relaxed);
});
device.poll(wgpu::Maintain::Wait);
device.poll(wgpu::PollType::Wait).unwrap();
assert!(done2.load(Relaxed));
}

View File

@ -1869,9 +1869,21 @@ impl Global {
// Wait for all work to finish before configuring the surface.
let snatch_guard = device.snatchable_lock.read();
let fence = device.fence.read();
match device.maintain(fence, wgt::Maintain::Wait, snatch_guard) {
Ok((closures, _)) => {
user_callbacks = closures;
let maintain_result;
(user_callbacks, maintain_result) =
device.maintain(fence, wgt::PollType::Wait, snatch_guard);
match maintain_result {
// We're happy
Ok(wgt::PollStatus::QueueEmpty) => {}
Ok(wgt::PollStatus::WaitSucceeded) => {
// After the wait, the queue should be empty. It can only be non-empty
// if another thread is submitting at the same time.
break 'error E::GpuWaitTimeout;
}
Ok(wgt::PollStatus::Poll) => {
unreachable!("Cannot get a Poll result from a Wait action.")
}
Err(e) => {
break 'error e.into();
@ -1931,38 +1943,32 @@ impl Global {
pub fn device_poll(
&self,
device_id: DeviceId,
maintain: wgt::Maintain<crate::SubmissionIndex>,
) -> Result<bool, WaitIdleError> {
api_log!("Device::poll {maintain:?}");
poll_type: wgt::PollType<crate::SubmissionIndex>,
) -> Result<wgt::PollStatus, WaitIdleError> {
api_log!("Device::poll {poll_type:?}");
let device = self.hub.devices.get(device_id);
let DevicePoll {
closures,
queue_empty,
} = Self::poll_single_device(&device, maintain)?;
let (closures, result) = Self::poll_single_device(&device, poll_type);
closures.fire();
Ok(queue_empty)
result
}
fn poll_single_device(
device: &crate::device::Device,
maintain: wgt::Maintain<crate::SubmissionIndex>,
) -> Result<DevicePoll, WaitIdleError> {
poll_type: wgt::PollType<crate::SubmissionIndex>,
) -> (UserClosures, Result<wgt::PollStatus, WaitIdleError>) {
let snatch_guard = device.snatchable_lock.read();
let fence = device.fence.read();
let (closures, queue_empty) = device.maintain(fence, maintain, snatch_guard)?;
let maintain_result = device.maintain(fence, poll_type, snatch_guard);
// Some deferred destroys are scheduled in maintain so run this right after
// to avoid holding on to them until the next device poll.
device.deferred_resource_destruction();
Ok(DevicePoll {
closures,
queue_empty,
})
maintain_result
}
/// Poll all devices belonging to the specified backend.
@ -1974,7 +1980,7 @@ impl Global {
fn poll_all_devices_of_api(
&self,
force_wait: bool,
closures: &mut UserClosures,
closure_list: &mut UserClosures,
) -> Result<bool, WaitIdleError> {
profiling::scope!("poll_device");
@ -1984,20 +1990,19 @@ impl Global {
let device_guard = hub.devices.read();
for (_id, device) in device_guard.iter() {
let maintain = if force_wait {
wgt::Maintain::Wait
let poll_type = if force_wait {
wgt::PollType::Wait
} else {
wgt::Maintain::Poll
wgt::PollType::Poll
};
let DevicePoll {
closures: cbs,
queue_empty,
} = Self::poll_single_device(device, maintain)?;
let (closures, result) = Self::poll_single_device(device, poll_type);
all_queue_empty &= queue_empty;
let is_queue_empty = matches!(result, Ok(wgt::PollStatus::QueueEmpty));
closures.extend(cbs);
all_queue_empty &= is_queue_empty;
closure_list.extend(closures);
}
}
@ -2265,8 +2270,3 @@ impl Global {
)
}
}
struct DevicePoll {
closures: UserClosures,
queue_empty: bool,
}

View File

@ -109,6 +109,17 @@ pub enum WaitIdleError {
Device(#[from] DeviceError),
#[error("Tried to wait using a submission index ({0}) that has not been returned by a successful submission (last successful submission: {1})")]
WrongSubmissionIndex(SubmissionIndex, SubmissionIndex),
#[error("Timed out trying to wait for the given submission index.")]
Timeout,
}
impl WaitIdleError {
pub fn to_poll_error(&self) -> Option<wgt::PollError> {
match self {
WaitIdleError::Timeout => Some(wgt::PollError::Timeout),
_ => None,
}
}
}
/// Resource tracking for a device.

View File

@ -1301,17 +1301,22 @@ impl Queue {
// This will schedule destruction of all resources that are no longer needed
// by the user but used in the command stream, among other things.
let fence_guard = RwLockWriteGuard::downgrade(fence);
let (closures, _) =
match self
.device
.maintain(fence_guard, wgt::Maintain::Poll, snatch_guard)
{
Ok(closures) => closures,
Err(WaitIdleError::Device(err)) => {
break 'error Err(QueueSubmitError::Queue(err))
}
Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
};
let (closures, result) =
self.device
.maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
match result {
Ok(status) => {
debug_assert!(matches!(
status,
wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
));
}
Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
Err(WaitIdleError::WrongSubmissionIndex(..)) => {
unreachable!("Cannot get WrongSubmissionIndex from Poll")
}
Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
};
Ok(closures)
};

View File

@ -379,73 +379,133 @@ impl Device {
assert!(self.queue.set(Arc::downgrade(queue)).is_ok());
}
/// Check this device for completed commands.
/// Check the current status of the GPU and process any submissions that have
/// finished.
///
/// The `maintain` argument tells how the maintenance function should behave, either
/// blocking or just polling the current state of the gpu.
/// The `poll_type` argument tells if this function should wait for a particular
/// submission index to complete, or if it should just poll the current status.
///
/// Return a pair `(closures, queue_empty)`, where:
/// This will process _all_ completed submissions, even if the caller only asked
/// us to poll to a given submission index.
///
/// - `closures` is a list of actions to take: mapping buffers, notifying the user
/// Return a pair `(closures, result)`, where:
///
/// - `queue_empty` is a boolean indicating whether there are more queue
/// submissions still in flight. (We have to take the locks needed to
/// produce this information for other reasons, so we might as well just
/// return it to our callers.)
/// - `closures` is a list of callbacks that need to be invoked informing the user
/// about various things occurring. These happen and should be handled even if
/// this function returns an error, hence they are outside of the result.
///
/// - `results` is a boolean indicating the result of the wait operation, including
/// if there was a timeout or a validation error.
pub(crate) fn maintain<'this>(
&'this self,
fence: crate::lock::RwLockReadGuard<ManuallyDrop<Box<dyn hal::DynFence>>>,
maintain: wgt::Maintain<crate::SubmissionIndex>,
poll_type: wgt::PollType<crate::SubmissionIndex>,
snatch_guard: SnatchGuard,
) -> Result<(UserClosures, bool), WaitIdleError> {
) -> (UserClosures, Result<wgt::PollStatus, WaitIdleError>) {
profiling::scope!("Device::maintain");
// Determine which submission index `maintain` represents.
let submission_index = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
let mut user_closures = UserClosures::default();
// If a wait was requested, determine which submission index to wait for.
let wait_submission_index = match poll_type {
wgt::PollType::WaitForSubmissionIndex(submission_index) => {
let last_successful_submission_index = self
.last_successful_submission_index
.load(Ordering::Acquire);
if submission_index > last_successful_submission_index {
return Err(WaitIdleError::WrongSubmissionIndex(
let result = Err(WaitIdleError::WrongSubmissionIndex(
submission_index,
last_successful_submission_index,
));
return (user_closures, result);
}
submission_index
Some(submission_index)
}
wgt::Maintain::Wait => self
.last_successful_submission_index
.load(Ordering::Acquire),
wgt::Maintain::Poll => unsafe { self.raw().get_fence_value(fence.as_ref()) }
.map_err(|e| self.handle_hal_error(e))?,
wgt::PollType::Wait => Some(
self.last_successful_submission_index
.load(Ordering::Acquire),
),
wgt::PollType::Poll => None,
};
// If necessary, wait for that submission to complete.
if maintain.is_wait() {
log::trace!("Device::maintain: waiting for submission index {submission_index}");
unsafe {
self.raw()
.wait(fence.as_ref(), submission_index, CLEANUP_WAIT_MS)
}
.map_err(|e| self.handle_hal_error(e))?;
}
// Wait for the submission index if requested.
if let Some(target_submission_index) = wait_submission_index {
log::trace!("Device::maintain: waiting for submission index {target_submission_index}");
let (submission_closures, mapping_closures, queue_empty) =
if let Some(queue) = self.get_queue() {
queue.maintain(submission_index, &snatch_guard)
} else {
(SmallVec::new(), Vec::new(), true)
let wait_result = unsafe {
self.raw()
.wait(fence.as_ref(), target_submission_index, CLEANUP_WAIT_MS)
};
// This error match is only about `DeviceErrors`. At this stage we do not care if
// the wait succeeded or not, and the `Ok(bool)`` variant is ignored.
if let Err(e) = wait_result {
let hal_error: WaitIdleError = self.handle_hal_error(e).into();
return (user_closures, Err(hal_error));
}
}
// Get the currently finished submission index. This may be higher than the requested
// wait, or it may be less than the requested wait if the wait failed.
let fence_value_result = unsafe { self.raw().get_fence_value(fence.as_ref()) };
let current_finished_submission = match fence_value_result {
Ok(fence_value) => fence_value,
Err(e) => {
let hal_error: WaitIdleError = self.handle_hal_error(e).into();
return (user_closures, Err(hal_error));
}
};
// Maintain all finished submissions on the queue, updating the relevant user closures and collecting if the queue is empty.
//
// We don't use the result of the wait here, as we want to progress forward as far as possible
// and the wait could have been for submissions that finished long ago.
let mut queue_empty = false;
if let Some(queue) = self.get_queue() {
let queue_result = queue.maintain(current_finished_submission, &snatch_guard);
(
user_closures.submissions,
user_closures.mappings,
queue_empty,
) = queue_result
};
// Based on the queue empty status, and the current finished submission index, determine the result of the poll.
let result = if queue_empty {
if let Some(wait_submission_index) = wait_submission_index {
// Assert to ensure that if we received a queue empty status, the fence shows the correct value.
// This is defensive, as this should never be hit.
assert!(
current_finished_submission >= wait_submission_index,
"If the queue is empty, the current submission index ({}) should be at least the wait submission index ({})",
current_finished_submission,
wait_submission_index
);
}
Ok(wgt::PollStatus::QueueEmpty)
} else if let Some(wait_submission_index) = wait_submission_index {
// This is theoretically possible to succeed more than checking on the poll result
// as submissions could have finished in the time between the timeout resolving,
// the thread getting scheduled again, and us checking the fence value.
if current_finished_submission >= wait_submission_index {
Ok(wgt::PollStatus::WaitSucceeded)
} else {
Err(WaitIdleError::Timeout)
}
} else {
Ok(wgt::PollStatus::Poll)
};
// Detect if we have been destroyed and now need to lose the device.
//
// If we are invalid (set at start of destroy) and our queue is empty,
// and we have a DeviceLostClosure, return the closure to be called by
// our caller. This will complete the steps for both destroy and for
// "lose the device".
let mut device_lost_invocations = SmallVec::new();
let mut should_release_gpu_resource = false;
if !self.is_valid() && queue_empty {
// We can release gpu resources associated with this device (but not
@ -455,11 +515,13 @@ impl Device {
// If we have a DeviceLostClosure, build an invocation with the
// reason DeviceLostReason::Destroyed and no message.
if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
device_lost_invocations.push(DeviceLostInvocation {
closure: device_lost_closure,
reason: DeviceLostReason::Destroyed,
message: String::new(),
});
user_closures
.device_lost_invocations
.push(DeviceLostInvocation {
closure: device_lost_closure,
reason: DeviceLostReason::Destroyed,
message: String::new(),
});
}
}
@ -471,12 +533,7 @@ impl Device {
self.release_gpu_resources();
}
let closures = UserClosures {
mappings: mapping_closures,
submissions: submission_closures,
device_lost_invocations,
};
Ok((closures, queue_empty))
(user_closures, result)
}
pub(crate) fn create_buffer(

View File

@ -62,6 +62,8 @@ pub enum ConfigureSurfaceError {
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
#[error("`SurfaceOutput` must be dropped before a new `Surface` is made")]
PreviousOutputExists,
#[error("Failed to wait for GPU to come idle before reconfiguring the Surface")]
GpuWaitTimeout,
#[error("Both `Surface` width and height must be non-zero. Wait to recreate the `Surface` until the window has non-zero area.")]
ZeroArea,
#[error("`Surface` width and height must be within the maximum supported texture size. Requested was ({width}, {height}), maximum extent for either dimension is {max_texture_dimension_2d}.")]
@ -99,6 +101,7 @@ impl From<WaitIdleError> for ConfigureSurfaceError {
match e {
WaitIdleError::Device(d) => ConfigureSurfaceError::Device(d),
WaitIdleError::WrongSubmissionIndex(..) => unreachable!(),
WaitIdleError::Timeout => ConfigureSurfaceError::GpuWaitTimeout,
}
}
}

View File

@ -14,7 +14,7 @@ struct GLFence {
pub struct Fence {
last_completed: AtomicFenceValue,
pending: Vec<GLFence>,
fence_mode: wgt::GlFenceBehavior,
fence_behavior: wgt::GlFenceBehavior,
}
impl crate::DynFence for Fence {}
@ -29,7 +29,7 @@ impl Fence {
Self {
last_completed: AtomicFenceValue::new(0),
pending: Vec::new(),
fence_mode: options.short_circuit_fences,
fence_behavior: options.fence_behavior,
}
}
@ -38,7 +38,7 @@ impl Fence {
gl: &glow::Context,
value: crate::FenceValue,
) -> Result<(), crate::DeviceError> {
if self.fence_mode.is_auto_finish() {
if self.fence_behavior.is_auto_finish() {
*self.last_completed.get_mut() = value;
return Ok(());
}
@ -57,7 +57,7 @@ impl Fence {
pub fn get_latest(&self, gl: &glow::Context) -> crate::FenceValue {
let mut max_value = self.last_completed.load(Ordering::Acquire);
if self.fence_mode.is_auto_finish() {
if self.fence_behavior.is_auto_finish() {
return max_value;
}
@ -82,7 +82,7 @@ impl Fence {
}
pub fn maintain(&mut self, gl: &glow::Context) {
if self.fence_mode.is_auto_finish() {
if self.fence_behavior.is_auto_finish() {
return;
}
@ -105,7 +105,7 @@ impl Fence {
) -> Result<bool, crate::DeviceError> {
let last_completed = self.last_completed.load(Ordering::Acquire);
if self.fence_mode.is_auto_finish() {
if self.fence_behavior.is_auto_finish() {
return Ok(last_completed >= wait_value);
}
@ -154,7 +154,7 @@ impl Fence {
}
pub fn destroy(self, gl: &glow::Context) {
if self.fence_mode.is_auto_finish() {
if self.fence_behavior.is_auto_finish() {
return;
}

View File

@ -37,7 +37,7 @@ alloc_instead_of_core = "warn"
[features]
default = ["std"]
std = ["js-sys/std", "web-sys/std"]
std = ["js-sys/std", "web-sys/std", "thiserror/std"]
strict_asserts = []
fragile-send-sync-non-atomic-wasm = []
serde = ["dep:serde"]
@ -47,6 +47,7 @@ counters = []
[dependencies]
bitflags = { workspace = true, features = ["serde"] }
log.workspace = true
thiserror = { workspace = true, optional = true }
serde = { workspace = true, default-features = false, features = [
"alloc",
"derive",

View File

@ -230,7 +230,7 @@ pub struct GlBackendOptions {
/// Which OpenGL ES 3 minor version to request, if using OpenGL ES.
pub gles_minor_version: Gles3MinorVersion,
/// Behavior of OpenGL fences. Affects how `on_completed_work_done` and `device.poll` behave.
pub short_circuit_fences: GlFenceBehavior,
pub fence_behavior: GlFenceBehavior,
}
impl GlBackendOptions {
@ -242,7 +242,7 @@ impl GlBackendOptions {
let gles_minor_version = Gles3MinorVersion::from_env().unwrap_or_default();
Self {
gles_minor_version,
short_circuit_fences: GlFenceBehavior::Normal,
fence_behavior: GlFenceBehavior::Normal,
}
}
@ -252,10 +252,10 @@ impl GlBackendOptions {
#[must_use]
pub fn with_env(self) -> Self {
let gles_minor_version = self.gles_minor_version.with_env();
let short_circuit_fences = self.short_circuit_fences.with_env();
let short_circuit_fences = self.fence_behavior.with_env();
Self {
gles_minor_version,
short_circuit_fences,
fence_behavior: short_circuit_fences,
}
}
}
@ -472,7 +472,7 @@ pub enum GlFenceBehavior {
///
/// This solves a very specific issue that arose due to a bug in wgpu-core that made
/// many WebGL programs work when they "shouldn't" have. If you have code that is trying
/// to call `device.poll(wgpu::Maintain::Wait)` on WebGL, you need to enable this option
/// to call `device.poll(wgpu::PollType::Wait)` on WebGL, you need to enable this option
/// for the "Wait" to behave how you would expect.
///
/// Previously all `poll(Wait)` acted like the OpenGL fences were signalled even if they weren't.

View File

@ -3986,7 +3986,7 @@ impl Default for ColorWrites {
/// Passed to `Device::poll` to control how and if it should block.
#[derive(Clone, Debug)]
pub enum Maintain<T> {
pub enum PollType<T> {
/// On wgpu-core based backends, block until the given submission has
/// completed execution, and any callbacks have been invoked.
///
@ -3999,7 +3999,7 @@ pub enum Maintain<T> {
Poll,
}
impl<T> Maintain<T> {
impl<T> PollType<T> {
/// Construct a [`Self::Wait`] variant
#[must_use]
pub fn wait() -> Self {
@ -4018,7 +4018,7 @@ impl<T> Maintain<T> {
Self::WaitForSubmissionIndex(submission_index)
}
/// This maintain represents a wait of some kind.
/// This `PollType` represents a wait of some kind.
#[must_use]
pub fn is_wait(&self) -> bool {
match *self {
@ -4029,39 +4029,57 @@ impl<T> Maintain<T> {
/// Map on the wait index type.
#[must_use]
pub fn map_index<U, F>(self, func: F) -> Maintain<U>
pub fn map_index<U, F>(self, func: F) -> PollType<U>
where
F: FnOnce(T) -> U,
{
match self {
Self::WaitForSubmissionIndex(i) => Maintain::WaitForSubmissionIndex(func(i)),
Self::Wait => Maintain::Wait,
Self::Poll => Maintain::Poll,
Self::WaitForSubmissionIndex(i) => PollType::WaitForSubmissionIndex(func(i)),
Self::Wait => PollType::Wait,
Self::Poll => PollType::Poll,
}
}
}
/// Result of a maintain operation.
pub enum MaintainResult {
/// There are no active submissions in flight as of the beginning of the poll call.
/// Other submissions may have been queued on other threads at the same time.
///
/// This implies that the given poll is complete.
SubmissionQueueEmpty,
/// More information coming soon <https://github.com/gfx-rs/wgpu/pull/5012>
Ok,
/// Error states after a device poll
#[derive(Debug)]
#[cfg_attr(feature = "std", derive(thiserror::Error))]
pub enum PollError {
/// The requested Wait timed out before the submission was completed.
#[cfg_attr(
feature = "std",
error("The requested Wait timed out before the submission was completed.")
)]
Timeout,
}
impl MaintainResult {
/// Returns true if the result is [`Self::SubmissionQueueEmpty`].
/// Status of device poll operation.
#[derive(Debug, PartialEq, Eq)]
pub enum PollStatus {
/// There are no active submissions in flight as of the beginning of the poll call.
/// Other submissions may have been queued on other threads during the call.
///
/// This implies that the given Wait was satisfied before the timeout.
QueueEmpty,
/// The requested Wait was satisfied before the timeout.
WaitSucceeded,
/// This was a poll.
Poll,
}
impl PollStatus {
/// Returns true if the result is [`Self::QueueEmpty`]`.
#[must_use]
pub fn is_queue_empty(&self) -> bool {
matches!(self, Self::SubmissionQueueEmpty)
matches!(self, Self::QueueEmpty)
}
/// Panics if the [`MaintainResult`] is not Ok.
pub fn panic_on_timeout(self) {
let _ = self;
/// Returns true if the result is either [`Self::WaitSucceeded`] or [`Self::QueueEmpty`].
#[must_use]
pub fn wait_finished(&self) -> bool {
matches!(self, Self::WaitSucceeded | Self::QueueEmpty)
}
}

View File

@ -33,7 +33,7 @@ pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
impl Device {
/// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed.
/// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
///
/// Return `true` if the queue is empty, or `false` if there are more queue
/// submissions still in flight. (Note that, unless access to the [`Queue`] is
@ -42,8 +42,8 @@ impl Device {
/// other threads could submit new work at any time.)
///
/// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
pub fn poll(&self, maintain: Maintain) -> MaintainResult {
self.inner.poll(maintain)
pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
self.inner.poll(poll_type)
}
/// The features which can be used on this device.

View File

@ -39,11 +39,11 @@ pub struct SubmissionIndex {
#[cfg(send_sync)]
static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
pub use wgt::Maintain as MaintainBase;
pub use wgt::PollType as MaintainBase;
/// Passed to [`Device::poll`] to control how and if it should block.
pub type Maintain = wgt::Maintain<SubmissionIndex>;
pub type PollType = wgt::PollType<SubmissionIndex>;
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Maintain: Send, Sync);
static_assertions::assert_impl_all!(PollType: Send, Sync);
/// A write-only view into a staging buffer.
///

View File

@ -75,6 +75,13 @@ impl Surface<'_> {
/// Initializes [`Surface`] for presentation.
///
/// If the surface is already configured, this will wait for the GPU to come idle
/// before recreating the swapchain to prevent race conditions.
///
/// # Validation Errors
/// - Submissions that happen _during_ the configure may cause the
/// internal wait-for-idle to fail, raising a validation error.
///
/// # Panics
///
/// - A old [`SurfaceTexture`] is still alive referencing an old surface.

View File

@ -2414,9 +2414,9 @@ impl dispatch::DeviceInterface for WebDevice {
// No capturing api in webgpu
}
fn poll(&self, _maintain: crate::Maintain) -> crate::MaintainResult {
fn poll(&self, _poll_type: crate::PollType) -> Result<crate::PollStatus, crate::PollError> {
// Device is polled automatically
crate::MaintainResult::SubmissionQueueEmpty
Ok(crate::PollStatus::QueueEmpty)
}
fn get_internal_counters(&self) -> crate::InternalCounters {

View File

@ -1645,14 +1645,17 @@ impl dispatch::DeviceInterface for CoreDevice {
self.context.0.device_stop_capture(self.id);
}
fn poll(&self, maintain: crate::Maintain) -> crate::MaintainResult {
let maintain_inner = maintain.map_index(|i| i.index);
fn poll(&self, poll_type: crate::PollType) -> Result<crate::PollStatus, crate::PollError> {
let maintain_inner = poll_type.map_index(|i| i.index);
match self.context.0.device_poll(self.id, maintain_inner) {
Ok(done) => match done {
true => wgt::MaintainResult::SubmissionQueueEmpty,
false => wgt::MaintainResult::Ok,
},
Err(err) => self.context.handle_error_fatal(err, "Device::poll"),
Ok(status) => Ok(status),
Err(err) => {
if let Some(poll_error) = err.to_poll_error() {
return Err(poll_error);
}
self.context.handle_error_fatal(err, "Device::poll")
}
}
}

View File

@ -192,7 +192,7 @@ pub trait DeviceInterface: CommonTraits {
fn start_capture(&self);
fn stop_capture(&self);
fn poll(&self, maintain: crate::Maintain) -> crate::MaintainResult;
fn poll(&self, poll_type: crate::PollType) -> Result<crate::PollStatus, crate::PollError>;
fn get_internal_counters(&self) -> crate::InternalCounters;
fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport>;

View File

@ -65,18 +65,19 @@ pub use wgt::{
CompositeAlphaMode, CopyExternalImageDestInfo, CoreCounters, DepthBiasState, DepthStencilState,
DeviceLostReason, DeviceType, DownlevelCapabilities, DownlevelFlags, DownlevelLimits,
Dx12BackendOptions, Dx12Compiler, DynamicOffset, Extent3d, Face, Features, FeaturesWGPU,
FeaturesWebGPU, FilterMode, FrontFace, GlBackendOptions, Gles3MinorVersion, HalCounters,
ImageSubresourceRange, IndexFormat, InstanceDescriptor, InstanceFlags, InternalCounters,
Limits, MaintainResult, MemoryHints, MultisampleState, NoopBackendOptions, Origin2d, Origin3d,
PipelineStatisticsTypes, PolygonMode, PowerPreference, PredefinedColorSpace, PresentMode,
PresentationTimestamp, PrimitiveState, PrimitiveTopology, PushConstantRange, QueryType,
RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor, ShaderLocation, ShaderModel,
ShaderRuntimeChecks, ShaderStages, StencilFaceState, StencilOperation, StencilState,
StorageTextureAccess, SurfaceCapabilities, SurfaceStatus, TexelCopyBufferLayout, TextureAspect,
TextureDimension, TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures,
TextureSampleType, TextureTransition, TextureUsages, TextureUses, TextureViewDimension,
VertexAttribute, VertexFormat, VertexStepMode, WasmNotSend, WasmNotSendSync, WasmNotSync,
COPY_BUFFER_ALIGNMENT, COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT,
FeaturesWebGPU, FilterMode, FrontFace, GlBackendOptions, GlFenceBehavior, Gles3MinorVersion,
HalCounters, ImageSubresourceRange, IndexFormat, InstanceDescriptor, InstanceFlags,
InternalCounters, Limits, MemoryHints, MultisampleState, NoopBackendOptions, Origin2d,
Origin3d, PipelineStatisticsTypes, PollError, PollStatus, PolygonMode, PowerPreference,
PredefinedColorSpace, PresentMode, PresentationTimestamp, PrimitiveState, PrimitiveTopology,
PushConstantRange, QueryType, RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor,
ShaderLocation, ShaderModel, ShaderRuntimeChecks, ShaderStages, StencilFaceState,
StencilOperation, StencilState, StorageTextureAccess, SurfaceCapabilities, SurfaceStatus,
TexelCopyBufferLayout, TextureAspect, TextureDimension, TextureFormat,
TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType, TextureTransition,
TextureUsages, TextureUses, TextureViewDimension, VertexAttribute, VertexFormat,
VertexStepMode, WasmNotSend, WasmNotSendSync, WasmNotSync, COPY_BUFFER_ALIGNMENT,
COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT,
QUERY_RESOLVE_BUFFER_ALIGNMENT, QUERY_SET_MAX_QUERIES, QUERY_SIZE, VERTEX_STRIDE_ALIGNMENT,
};
#[expect(deprecated)]