GPU Test Framework (#3873)

This commit is contained in:
Connor Fitzgerald 2023-10-19 12:06:42 -04:00 committed by GitHub
parent 75989192a9
commit 543f921639
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 3508 additions and 2959 deletions

View File

@ -6,7 +6,8 @@ skip-tree = [
{ name = "wgpu-info" },
]
skip = [
{ name = "wgpu" }
{ name = "wgpu" },
{ name = "fastrand" }
]
wildcards = "deny"

View File

@ -270,17 +270,14 @@ jobs:
# Windows
- name: Windows x86_64
os: windows-2022
backends: dx12
# Mac
- name: Mac aarch64
os: [self-hosted, macOS]
backends: vulkan metal
# Linux
- name: Linux x86_64
os: ubuntu-22.04
backends: vulkan gl
name: Test ${{ matrix.name }}
runs-on: ${{ matrix.os }}
@ -294,16 +291,10 @@ jobs:
with:
tool: cargo-nextest,cargo-llvm-cov
- name: install swiftshader
if: matrix.os == 'ubuntu-22.04'
shell: bash
- name: Install Repo MSRV toolchain
run: |
set -e
mkdir -p swiftshader
curl -LsSf https://github.com/gfx-rs/ci-build/releases/latest/download/swiftshader-linux-x86_64.tar.xz | tar -xf - -C swiftshader
echo "VK_ICD_FILENAMES=$PWD/swiftshader/vk_swiftshader_icd.json" >> $GITHUB_ENV
rustup toolchain install ${{ env.REPO_MSRV }} --no-self-update --profile=minimal
cargo -V
- name: install llvmpipe, vulkan sdk
if: matrix.os == 'ubuntu-22.04'
@ -317,8 +308,10 @@ jobs:
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
sudo add-apt-repository ppa:kisak/kisak-mesa
sudo apt-get update
sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev vulkan-sdk
sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev vulkan-sdk mesa-vulkan-drivers
- name: disable debug
shell: bash
@ -334,22 +327,12 @@ jobs:
with:
key: test-${{ matrix.os }}-${{ env.CACHE_SUFFIX }}
- name: run wgpu-info
shell: bash
run: |
set -e
cargo llvm-cov --no-cfg-coverage run --bin wgpu-info --no-report --features vulkan-portability
- name: run tests
shell: bash
run: |
set -e
for backend in ${{ matrix.backends }}; do
echo "======= NATIVE TESTS $backend ======";
WGPU_BACKEND=$backend cargo llvm-cov --no-cfg-coverage nextest --no-fail-fast --no-report --features vulkan-portability
done
cargo xtask test --llvm-cov
- uses: actions/upload-artifact@v3
if: always() # We want artifacts even if the tests fail.
@ -421,6 +404,7 @@ jobs:
- name: run rustfmt
run: |
cargo fmt -- --check
cargo fmt --manifest-path xtask/Cargo.toml -- --check
check-cts-runner:
name: Clippy cts_runner

3
.gitignore vendored
View File

@ -28,3 +28,6 @@ cts/
# Readme says to put angle in working directory
*.dll
# Cached GPU config
.gpuconfig

551
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@ members = [
"wgpu-core",
"wgpu-hal",
"wgpu-info",
"wgpu-macros",
"wgpu-types",
"tests",
]
@ -20,6 +21,7 @@ default-members = [
"wgpu-core",
"wgpu-hal",
"wgpu-info",
"wgpu-macros",
"wgpu-types",
"tests"
]
@ -64,21 +66,27 @@ bytemuck = { version = "1.14", features = ["derive"] }
cfg_aliases = "0.1"
cfg-if = "1"
codespan-reporting = "0.11"
ctor = "0.2"
ddsfile = "0.5"
env_logger = "0.10"
flume = "0.11"
futures-lite = "1"
futures-intrusive = "0.5"
rustc-hash = "1.1.0"
glam = "0.24.2"
heck = "0.4.0"
image = { version = "0.24", default-features = false, features = ["png"] }
# libloading 0.8 switches from `winapi` to `windows-sys`; permit either
libloading = ">=0.7, <0.9"
libc = "0.2"
libtest-mimic = "0.6"
log = "0.4"
nanorand = { version = "0.7", default-features = false, features = ["wyrand"] }
nv-flip = "0.1"
num-traits = { version = "0.2" }
noise = "0.8"
obj = "0.10"
once_cell = "1"
# parking_lot 0.12 switches from `winapi` to `windows`; permit either
parking_lot = ">=0.11,<0.13"
pico-args = { version = "0.5.0", features = ["eq-separator", "short-space-opt", "combined-flags"] }
@ -96,10 +104,17 @@ thiserror = "1"
wgpu = { version = "0.17.0", path = "./wgpu" }
wgpu-core = { version = "0.17.0", path = "./wgpu-core" }
wgpu-example = { version = "0.17.0", path = "./examples/common" }
wgpu-macros = { version = "0.17.0", path = "./wgpu-macros" }
wgpu-test = { version = "0.17", path = "./tests"}
wgpu-types = { version = "0.17.0", path = "./wgpu-types" }
winit = { version = "0.28.7", features = [ "android-native-activity" ] }
# Metal dependencies
block = "0.1"
metal = "0.25.0"
objc = "0.2.5"
core-graphics-types = "0.1"
# Vulkan dependencies
ash = "0.37.3"
gpu-alloc = "0.6"

View File

@ -160,7 +160,7 @@ To install it, run `cargo install cargo-nextest`.
To run the test suite:
```
cargo nextest run --no-fail-fast
cargo xtask test
```
To run the test suite on WebGL (currently incomplete):

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "boids"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -326,16 +326,16 @@ impl wgpu_example::framework::Example for Example {
}
/// run example
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("boids");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn boids() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "boids",
// Generated on 1080ti on Vk/Windows
image_path: "examples/boids/screenshot.png",
width: 1024,
@ -347,5 +347,8 @@ fn boids() {
// Lots of validation errors, maybe related to https://github.com/gfx-rs/wgpu/issues/3160
.expect_fail(wgpu_test::FailureCase::molten_vk()),
comparisons: &[wgpu_test::ComparisonType::Mean(0.005)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "bunnymark"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -353,16 +353,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("bunnymark");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn bunnymark() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "bunnymark",
image_path: "/examples/bunnymark/screenshot.png",
width: 1024,
height: 768,
@ -376,5 +376,8 @@ fn bunnymark() {
threshold: 0.05,
},
],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -7,6 +7,8 @@ use std::time::Instant;
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use wgpu::{WasmNotSend, WasmNotSync};
use wgpu_test::GpuTestConfiguration;
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
@ -493,7 +495,9 @@ pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
#[derive(Clone)]
pub struct ExampleTestParams<E> {
pub name: &'static str,
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
@ -502,21 +506,21 @@ pub struct FrameworkRefTest {
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
pub _phantom: std::marker::PhantomData<E>,
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
impl<E: Example + WasmNotSend + WasmNotSync> From<ExampleTestParams<E>> for GpuTestConfiguration {
fn from(params: ExampleTestParams<E>) -> Self {
GpuTestConfiguration::new()
.name(params.name)
.parameters({
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
params.base_test_parameters.clone().features(features)
})
.run_async(move |ctx| async move {
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
@ -556,6 +560,8 @@ pub fn test<E: Example>(mut params: FrameworkRefTest) {
&ctx.queue,
);
{
let spawner = Spawner::new();
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
// Handle specific case for bunnymark
@ -578,6 +584,7 @@ pub fn test<E: Example>(mut params: FrameworkRefTest) {
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
}
}
}
let mut cmd_buf = ctx
.device
@ -619,12 +626,8 @@ pub fn test<E: Example>(mut params: FrameworkRefTest) {
params.height,
&bytes,
params.comparisons,
);
},
);
)
.await;
})
}
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "conservative-raster"
path = "src/main.rs"
harness = false
[dependencies]
wasm-bindgen-test.workspace = true

View File

@ -312,21 +312,24 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("conservative-raster");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn conservative_raster() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "conservative-raster",
image_path: "/examples/conservative-raster/screenshot.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::default(),
base_test_parameters: wgpu_test::TestParameters::default(),
comparisons: &[wgpu_test::ComparisonType::Mean(0.0)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "cube"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -405,16 +405,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("cube");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn cube() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "cube",
// Generated on 1080ti on Vk/Windows
image_path: "/examples/cube/screenshot.png",
width: 1024,
@ -424,13 +424,14 @@ fn cube() {
comparisons: &[
wgpu_test::ComparisonType::Mean(0.04), // Bounded by Intel 630 on Vk/Windows
],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn cube_lines() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_LINES: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "cube-lines",
// Generated on 1080ti on Vk/Windows
image_path: "/examples/cube/screenshot-lines.png",
width: 1024,
@ -445,5 +446,8 @@ fn cube_lines() {
threshold: 0.36,
}, // Bounded by 1080ti on DX12
],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,11 +9,12 @@ publish = false
[[bin]]
name = "hello-compute"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true
env_logger.workspace = true
futures-intrusive.workspace = true
flume.workspace = true
pollster.workspace = true
wgpu.workspace = true
winit.workspace = true
@ -27,4 +28,3 @@ wasm-bindgen-futures.workspace = true
[dev-dependencies]
wasm-bindgen-test.workspace = true
wgpu-test.workspace = true

View File

@ -4,6 +4,7 @@ use wgpu::util::DeviceExt;
// Indicates a u32 overflow in an intermediate Collatz value
const OVERFLOW: u32 = 0xffffffff;
#[cfg_attr(test, allow(dead_code))]
async fn run() {
let numbers = if std::env::args().len() <= 1 {
let default = vec![1, 2, 3, 4];
@ -31,6 +32,7 @@ async fn run() {
log::info!("Steps: [{}]", disp_steps.join(", "));
}
#[cfg_attr(test, allow(dead_code))]
async fn execute_gpu(numbers: &[u32]) -> Option<Vec<u32>> {
// Instantiates instance of WebGPU
let instance = wgpu::Instance::default();
@ -54,12 +56,6 @@ async fn execute_gpu(numbers: &[u32]) -> Option<Vec<u32>> {
.await
.unwrap();
let info = adapter.get_info();
// skip this on LavaPipe temporarily
if info.vendor == 0x10005 {
return None;
}
execute_gpu_inner(&device, &queue, numbers).await
}
@ -150,7 +146,7 @@ async fn execute_gpu_inner(
// Note that we're not calling `.await` here.
let buffer_slice = staging_buffer.slice(..);
// Sets the buffer up for mapping, sending over the result of the mapping back to us when it is finished.
let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap());
// Poll the device in a blocking manner so that our future resolves.
@ -159,7 +155,7 @@ async fn execute_gpu_inner(
device.poll(wgpu::Maintain::Wait);
// Awaits until `buffer_future` can be read from
if let Some(Ok(())) = receiver.receive().await {
if let Ok(Ok(())) = receiver.recv_async().await {
// Gets contents of buffer
let data = buffer_slice.get_mapped_range();
// Since contents are got in bytes, this converts these bytes back to u32
@ -181,6 +177,7 @@ async fn execute_gpu_inner(
}
}
#[cfg(not(test))]
fn main() {
#[cfg(not(target_arch = "wasm32"))]
{
@ -195,5 +192,8 @@ fn main() {
}
}
#[cfg(all(test, not(target_arch = "wasm32")))]
#[cfg(test)]
mod tests;
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -1,94 +1,70 @@
use std::sync::Arc;
use super::*;
use wgpu_test::{initialize_test, FailureCase, TestParameters};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn test_compute_1() {
initialize_test(
#[gpu_test]
static COMPUTE_1: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.skip(FailureCase::adapter("V3D")),
|ctx| {
)
.run_async(|ctx| {
let input = &[1, 2, 3, 4];
pollster::block_on(assert_execute_gpu(
&ctx.device,
&ctx.queue,
input,
&[0, 1, 7, 2],
));
},
);
}
async move { assert_execute_gpu(&ctx.device, &ctx.queue, input, &[0, 1, 7, 2]).await }
});
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn test_compute_2() {
initialize_test(
#[gpu_test]
static COMPUTE_2: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.skip(FailureCase::adapter("V3D")),
|ctx| {
)
.run_async(|ctx| {
let input = &[5, 23, 10, 9];
pollster::block_on(assert_execute_gpu(
&ctx.device,
&ctx.queue,
input,
&[5, 15, 6, 19],
));
},
);
}
async move { assert_execute_gpu(&ctx.device, &ctx.queue, input, &[5, 15, 6, 19]).await }
});
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn test_compute_overflow() {
initialize_test(
#[gpu_test]
static COMPUTE_OVERFLOW: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.skip(FailureCase::adapter("V3D")),
|ctx| {
)
.run_async(|ctx| {
let input = &[77031, 837799, 8400511, 63728127];
pollster::block_on(assert_execute_gpu(
async move {
assert_execute_gpu(
&ctx.device,
&ctx.queue,
input,
&[350, 524, OVERFLOW, OVERFLOW],
));
},
);
)
.await
}
});
#[test]
// Wasm doesn't support threads
fn test_multithreaded_compute() {
initialize_test(
#[cfg(not(target_arch = "wasm32"))]
#[gpu_test]
static MULTITHREADED_COMPUTE: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.skip(FailureCase::adapter("V3D"))
// https://github.com/gfx-rs/wgpu/issues/3944
.skip(FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"swiftshader",
))
// https://github.com/gfx-rs/wgpu/issues/3250
.skip(FailureCase::backend_adapter(wgpu::Backends::GL, "llvmpipe"))
.skip(FailureCase::molten_vk()),
|ctx| {
use std::{sync::mpsc, thread, time::Duration};
.skip(FailureCase::adapter("V3D")),
)
.run_sync(|ctx| {
use std::{sync::mpsc, sync::Arc, thread, time::Duration};
let ctx = Arc::new(ctx);
@ -114,9 +90,7 @@ fn test_multithreaded_compute() {
rx.recv_timeout(Duration::from_secs(10))
.expect("A thread never completed.");
}
},
);
}
});
async fn assert_execute_gpu(
device: &wgpu::Device,

View File

@ -13,7 +13,7 @@ path = "src/main.rs"
[dependencies]
bytemuck.workspace = true
env_logger.workspace = true
futures-intrusive.workspace = true
flume.workspace = true
log.workspace = true
pollster.workspace = true
wgpu.workspace = true

View File

@ -179,10 +179,10 @@ async fn get_data<T: bytemuck::Pod>(
);
queue.submit(Some(command_encoder.finish()));
let buffer_slice = staging_buffer.slice(..);
let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
let (sender, receiver) = flume::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
device.poll(wgpu::Maintain::Wait);
receiver.receive().await.unwrap().unwrap();
receiver.recv_async().await.unwrap().unwrap();
output.copy_from_slice(bytemuck::cast_slice(&buffer_slice.get_mapped_range()[..]));
staging_buffer.unmap();
}

View File

@ -1,23 +1,18 @@
use super::*;
use pollster::FutureExt;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters};
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn hello_synchronization_test_results() {
initialize_test(
#[gpu_test]
static SYNC: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
// Taken from hello-compute tests.
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults()),
|ctx| {
)
.run_async(|ctx| async move {
let ExecuteResults {
patient_workgroup_results,
hasty_workgroup_results: _,
} = execute(&ctx.device, &ctx.queue, ARR_SIZE).block_on();
} = execute(&ctx.device, &ctx.queue, ARR_SIZE).await;
assert_eq!(patient_workgroup_results, [16_u32; ARR_SIZE]);
},
);
}
});

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "mipmap"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -506,16 +506,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("mipmap");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn mipmap() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "mipmap",
image_path: "/examples/mipmap/screenshot.png",
width: 1024,
height: 768,
@ -523,19 +523,23 @@ fn mipmap() {
base_test_parameters: wgpu_test::TestParameters::default()
.expect_fail(wgpu_test::FailureCase::backend(wgpu::Backends::GL)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn mipmap_query() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_QUERY: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "mipmap-query",
image_path: "/examples/mipmap/screenshot-query.png",
width: 1024,
height: 768,
optional_features: QUERY_FEATURES,
base_test_parameters: wgpu_test::TestParameters::default()
.expect_fail(wgpu_test::FailureCase::backend(wgpu::Backends::GL)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}
comparisons: &[wgpu_test::ComparisonType::Mean(0.025)],
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "msaa-line"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -313,16 +313,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("msaa-line");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn msaa_line() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "msaa-line",
image_path: "/examples/msaa-line/screenshot.png",
width: 1024,
height: 768,
@ -343,5 +343,8 @@ fn msaa_line() {
threshold: 0.29,
},
],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "shadow"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -1,4 +1,4 @@
use std::{borrow::Cow, f32::consts, iter, mem, ops::Range, rc::Rc};
use std::{borrow::Cow, f32::consts, iter, mem, ops::Range, sync::Arc};
use bytemuck::{Pod, Zeroable};
use wgpu::util::{align_to, DeviceExt};
@ -80,8 +80,8 @@ struct Entity {
mx_world: glam::Mat4,
rotation_speed: f32,
color: wgpu::Color,
vertex_buf: Rc<wgpu::Buffer>,
index_buf: Rc<wgpu::Buffer>,
vertex_buf: Arc<wgpu::Buffer>,
index_buf: Arc<wgpu::Buffer>,
index_format: wgpu::IndexFormat,
index_count: usize,
uniform_offset: wgpu::DynamicOffset,
@ -221,7 +221,7 @@ impl wgpu_example::framework::Example for Example {
// Create the vertex and index buffers
let vertex_size = mem::size_of::<Vertex>();
let (cube_vertex_data, cube_index_data) = create_cube();
let cube_vertex_buf = Rc::new(device.create_buffer_init(
let cube_vertex_buf = Arc::new(device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Cubes Vertex Buffer"),
contents: bytemuck::cast_slice(&cube_vertex_data),
@ -229,7 +229,7 @@ impl wgpu_example::framework::Example for Example {
},
));
let cube_index_buf = Rc::new(device.create_buffer_init(
let cube_index_buf = Arc::new(device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Cubes Index Buffer"),
contents: bytemuck::cast_slice(&cube_index_data),
@ -306,8 +306,8 @@ impl wgpu_example::framework::Example for Example {
mx_world: glam::Mat4::IDENTITY,
rotation_speed: 0.0,
color: wgpu::Color::WHITE,
vertex_buf: Rc::new(plane_vertex_buf),
index_buf: Rc::new(plane_index_buf),
vertex_buf: Arc::new(plane_vertex_buf),
index_buf: Arc::new(plane_index_buf),
index_format,
index_count: plane_index_data.len(),
uniform_offset: 0,
@ -327,8 +327,8 @@ impl wgpu_example::framework::Example for Example {
mx_world,
rotation_speed: cube.rotation,
color: wgpu::Color::GREEN,
vertex_buf: Rc::clone(&cube_vertex_buf),
index_buf: Rc::clone(&cube_index_buf),
vertex_buf: Arc::clone(&cube_vertex_buf),
index_buf: Arc::clone(&cube_index_buf),
index_format,
index_count: cube_index_data.len(),
uniform_offset: ((i + 1) * uniform_alignment as usize) as _,
@ -840,16 +840,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("shadow");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn shadow() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "shadow",
image_path: "/examples/shadow/screenshot.png",
width: 1024,
height: 768,
@ -860,12 +860,10 @@ fn shadow() {
.expect_fail(wgpu_test::FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"V3D",
))
// llvmpipe versions in CI are flaky: https://github.com/gfx-rs/wgpu/issues/2594
.skip(wgpu_test::FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"llvmpipe",
)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "skybox"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -52,7 +52,7 @@ impl Camera {
}
}
pub struct Skybox {
pub struct Example {
camera: Camera,
sky_pipeline: wgpu::RenderPipeline,
entity_pipeline: wgpu::RenderPipeline,
@ -63,7 +63,7 @@ pub struct Skybox {
staging_belt: wgpu::util::StagingBelt,
}
impl Skybox {
impl Example {
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24Plus;
fn create_depth_texture(
@ -89,7 +89,7 @@ impl Skybox {
}
}
impl wgpu_example::framework::Example for Skybox {
impl wgpu_example::framework::Example for Example {
fn optional_features() -> wgpu::Features {
wgpu::Features::TEXTURE_COMPRESSION_ASTC
| wgpu::Features::TEXTURE_COMPRESSION_ETC2
@ -356,7 +356,7 @@ impl wgpu_example::framework::Example for Skybox {
let depth_view = Self::create_depth_texture(config, device);
Skybox {
Example {
camera,
sky_pipeline,
entity_pipeline,
@ -461,16 +461,16 @@ impl wgpu_example::framework::Example for Skybox {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Skybox>("skybox");
wgpu_example::framework::run::<Example>("skybox");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn skybox() {
wgpu_example::framework::test::<Skybox>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "skybox",
image_path: "/examples/skybox/screenshot.png",
width: 1024,
height: 768,
@ -479,44 +479,50 @@ fn skybox() {
wgpu_test::FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"),
),
comparisons: &[wgpu_test::ComparisonType::Mean(0.015)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn skybox_bc1() {
wgpu_example::framework::test::<Skybox>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_BCN: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "skybox-bc1",
image_path: "/examples/skybox/screenshot-bc1.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::TEXTURE_COMPRESSION_BC,
base_test_parameters: wgpu_test::TestParameters::default(), // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn skybox_etc2() {
wgpu_example::framework::test::<Skybox>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_ETC2: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "skybox-etc2",
image_path: "/examples/skybox/screenshot-etc2.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::TEXTURE_COMPRESSION_ETC2,
base_test_parameters: wgpu_test::TestParameters::default(), // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
comparisons: &[wgpu_test::ComparisonType::Mean(0.015)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn skybox_astc() {
wgpu_example::framework::test::<Skybox>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_ASTC: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "skybox-astc",
image_path: "/examples/skybox/screenshot-astc.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::TEXTURE_COMPRESSION_ASTC,
base_test_parameters: wgpu_test::TestParameters::default(), // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
comparisons: &[wgpu_test::ComparisonType::Mean(0.016)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "stencil-triangles"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -15,7 +15,7 @@ fn vertex(x: f32, y: f32) -> Vertex {
}
}
struct Triangles {
struct Example {
outer_vertex_buffer: wgpu::Buffer,
mask_vertex_buffer: wgpu::Buffer,
outer_pipeline: wgpu::RenderPipeline,
@ -23,7 +23,7 @@ struct Triangles {
stencil_buffer: wgpu::Texture,
}
impl wgpu_example::framework::Example for Triangles {
impl wgpu_example::framework::Example for Example {
fn init(
config: &wgpu::SurfaceConfiguration,
_adapter: &wgpu::Adapter,
@ -155,7 +155,7 @@ impl wgpu_example::framework::Example for Triangles {
});
// Done
Triangles {
Example {
outer_vertex_buffer,
mask_vertex_buffer,
outer_pipeline,
@ -230,21 +230,24 @@ impl wgpu_example::framework::Example for Triangles {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Triangles>("stencil-triangles");
wgpu_example::framework::run::<Example>("stencil-triangles");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn stencil_triangles() {
wgpu_example::framework::test::<Triangles>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "stencil-triangles",
image_path: "/examples/stencil-triangles/screenshot.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::default(),
base_test_parameters: wgpu_test::TestParameters::default(),
comparisons: &[wgpu_test::ComparisonType::Mean(0.03)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "texture-arrays"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -406,29 +406,44 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("texture-arrays");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn texture_arrays_uniform() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "texture-arrays",
image_path: "/examples/texture-arrays/screenshot.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::empty(),
base_test_parameters: wgpu_test::TestParameters::default(),
comparisons: &[wgpu_test::ComparisonType::Mean(0.0)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn texture_arrays_non_uniform() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_UNIFORM: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "texture-arrays-uniform",
image_path: "/examples/texture-arrays/screenshot.png",
width: 1024,
height: 768,
optional_features: wgpu::Features::empty(),
base_test_parameters: wgpu_test::TestParameters::default(),
comparisons: &[wgpu_test::ComparisonType::Mean(0.0)],
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST_NON_UNIFORM: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "texture-arrays-non-uniform",
image_path: "/examples/texture-arrays/screenshot.png",
width: 1024,
height: 768,
@ -436,5 +451,8 @@ fn texture_arrays_non_uniform() {
wgpu::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
base_test_parameters: wgpu_test::TestParameters::default(),
comparisons: &[wgpu_test::ComparisonType::Mean(0.0)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -419,35 +419,29 @@ fn main() {
#[cfg(test)]
mod tests {
use wgpu_test::{gpu_test, GpuTestConfiguration};
use crate::{submit_render_and_compute_pass_with_queries, QueryResults};
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn test_timestamps_encoder() {
wgpu_test::initialize_test(
#[gpu_test]
static TIMESTAMPS_ENCODER: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
wgpu_test::TestParameters::default()
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY),
|ctx| {
test_timestamps(ctx, false);
},
);
}
)
.run_sync(|ctx| test_timestamps(ctx, false));
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn test_timestamps_passes() {
wgpu_test::initialize_test(
#[gpu_test]
static TIMESTAMPS_PASSES: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
wgpu_test::TestParameters::default()
.limits(wgpu::Limits::downlevel_defaults())
.features(
wgpu::Features::TIMESTAMP_QUERY | wgpu::Features::TIMESTAMP_QUERY_INSIDE_PASSES,
),
|ctx| {
test_timestamps(ctx, true);
},
);
}
)
.run_sync(|ctx| test_timestamps(ctx, true));
fn test_timestamps(ctx: wgpu_test::TestingContext, timestamps_inside_passes: bool) {
let queries = submit_render_and_compute_pass_with_queries(&ctx.device, &ctx.queue);

View File

@ -9,6 +9,7 @@ publish = false
[[bin]]
name = "water"
path = "src/main.rs"
harness = false
[dependencies]
bytemuck.workspace = true

View File

@ -819,16 +819,16 @@ impl wgpu_example::framework::Example for Example {
}
}
#[cfg(not(test))]
fn main() {
wgpu_example::framework::run::<Example>("water");
}
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[wasm_bindgen_test::wasm_bindgen_test]
fn water() {
wgpu_example::framework::test::<Example>(wgpu_example::framework::FrameworkRefTest {
#[cfg(test)]
#[wgpu_test::gpu_test]
static TEST: wgpu_example::framework::ExampleTestParams =
wgpu_example::framework::ExampleTestParams {
name: "water",
image_path: "/examples/water/screenshot.png",
width: 1024,
height: 768,
@ -836,5 +836,8 @@ fn water() {
base_test_parameters: wgpu_test::TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::READ_ONLY_DEPTH_STENCIL),
comparisons: &[wgpu_test::ComparisonType::Mean(0.01)],
});
}
_phantom: std::marker::PhantomData::<Example>,
};
#[cfg(test)]
wgpu_test::gpu_test_main!();

View File

@ -12,23 +12,37 @@ autotests = false
publish = false
[[test]]
name = "wgpu-tests"
path = "tests/root.rs"
name = "gpu-tests"
path = "tests/gpu.rs"
harness = false
[[test]]
name = "cpu-tests"
path = "tests/cpu.rs"
[features]
webgl = ["wgpu/webgl"]
[dependencies]
anyhow.workspace = true
arrayvec.workspace = true
bitflags.workspace = true
bytemuck.workspace = true
cfg-if.workspace = true
ctor.workspace = true
env_logger.workspace = true
futures-lite.workspace = true
heck.workspace = true
libtest-mimic.workspace = true
log.workspace = true
parking_lot.workspace = true
png.workspace = true
pollster.workspace = true
serde_json.workspace = true
serde.workspace = true
wgpu.workspace = true
wgt.workspace = true
wgpu-macros.workspace = true
wgt = { workspace = true, features = ["replay"] }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
nv-flip.workspace = true

113
tests/src/config.rs Normal file
View File

@ -0,0 +1,113 @@
use std::{future::Future, pin::Pin, sync::Arc};
use crate::{TestParameters, TestingContext};
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
pub type RunTestAsync = Arc<dyn Fn(TestingContext) -> Pin<Box<dyn Future<Output = ()>>>>;
// We can't use WasmNonSend and WasmNonSync here, as we need these to not require Send/Sync
// even with the `fragile-send-sync-non-atomic-wasm` enabled.
pub trait RunTestSendSync {}
impl<T> RunTestSendSync for T {}
} else {
pub type RunTestAsync = Arc<dyn Fn(TestingContext) -> Pin<Box<dyn Future<Output = ()> + Send + Sync>> + Send + Sync>;
pub trait RunTestSendSync: Send + Sync {}
impl<T> RunTestSendSync for T where T: Send + Sync {}
}
}
/// Configuration for a GPU test.
#[derive(Clone)]
pub struct GpuTestConfiguration {
pub(crate) name: String,
pub(crate) params: TestParameters,
pub(crate) test: Option<RunTestAsync>,
}
impl GpuTestConfiguration {
pub fn new() -> Self {
Self {
name: String::new(),
params: TestParameters::default(),
test: None,
}
}
/// Set the name of the test. Must be unique across all tests in the binary.
pub fn name(self, name: &str) -> Self {
Self {
name: String::from(name),
..self
}
}
#[doc(hidden)]
/// Derives the name from a `struct S` in the function initializing the test.
///
/// Does not overwrite a given name if a name has already been set
pub fn name_from_init_function_typename<S>(self, name: &'static str) -> Self {
if !self.name.is_empty() {
return self;
}
let type_name = std::any::type_name::<S>();
// We end up with a string like:
//
// module::path::we::want::test_name_initializer::S
//
// So we reverse search for the 4th colon from the end, and take everything before that.
let mut colons = 0;
let mut colon_4_index = type_name.len();
for i in (0..type_name.len()).rev() {
if type_name.as_bytes()[i] == b':' {
colons += 1;
}
if colons == 4 {
colon_4_index = i;
break;
}
}
let full = format!("{}::{}", &type_name[..colon_4_index], name);
Self { name: full, ..self }
}
/// Set the parameters that the test needs to succeed.
pub fn parameters(self, parameters: TestParameters) -> Self {
Self {
params: parameters,
..self
}
}
/// Make the test function an synchronous function.
pub fn run_sync(
self,
test: impl Fn(TestingContext) + Copy + RunTestSendSync + 'static,
) -> Self {
Self {
test: Some(Arc::new(move |ctx| Box::pin(async move { test(ctx) }))),
..self
}
}
/// Make the test function an asynchronous function/future.
pub fn run_async<F, R>(self, test: F) -> Self
where
F: Fn(TestingContext) -> R + RunTestSendSync + 'static,
R: Future<Output = ()> + RunTestSendSync + 'static,
{
Self {
test: Some(Arc::new(move |ctx| Box::pin(test(ctx)))),
..self
}
}
}
impl Default for GpuTestConfiguration {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,9 +1,12 @@
use std::{borrow::Cow, ffi::OsStr, io, path::Path};
//! Image comparison utilities
use std::{borrow::Cow, ffi::OsStr, path::Path};
use wgpu::util::{align_to, DeviceExt};
use wgpu::*;
fn read_png(path: impl AsRef<Path>, width: u32, height: u32) -> Option<Vec<u8>> {
#[cfg(not(target_arch = "wasm32"))]
async fn read_png(path: impl AsRef<Path>, width: u32, height: u32) -> Option<Vec<u8>> {
let data = match std::fs::read(&path) {
Ok(f) => f,
Err(e) => {
@ -15,7 +18,7 @@ fn read_png(path: impl AsRef<Path>, width: u32, height: u32) -> Option<Vec<u8>>
return None;
}
};
let decoder = png::Decoder::new(io::Cursor::new(data));
let decoder = png::Decoder::new(std::io::Cursor::new(data));
let mut reader = decoder.read_info().ok()?;
let mut buffer = vec![0; reader.output_buffer_size()];
@ -40,17 +43,15 @@ fn read_png(path: impl AsRef<Path>, width: u32, height: u32) -> Option<Vec<u8>>
Some(buffer)
}
#[allow(unused_variables)]
fn write_png(
#[cfg(not(target_arch = "wasm32"))]
async fn write_png(
path: impl AsRef<Path>,
width: u32,
height: u32,
data: &[u8],
compression: png::Compression,
) {
#[cfg(not(target_arch = "wasm32"))]
{
let file = io::BufWriter::new(std::fs::File::create(path).unwrap());
let file = std::io::BufWriter::new(std::fs::File::create(path).unwrap());
let mut encoder = png::Encoder::new(file, width, height);
encoder.set_color(png::ColorType::Rgba);
@ -60,12 +61,8 @@ fn write_png(
writer.write_image_data(data).unwrap();
}
}
pub fn calc_difference(lhs: u8, rhs: u8) -> u8 {
(lhs as i16 - rhs as i16).unsigned_abs() as u8
}
#[cfg_attr(target_arch = "wasm32", allow(unused))]
fn add_alpha(input: &[u8]) -> Vec<u8> {
input
.chunks_exact(3)
@ -73,6 +70,7 @@ fn add_alpha(input: &[u8]) -> Vec<u8> {
.collect()
}
#[cfg_attr(target_arch = "wasm32", allow(unused))]
fn remove_alpha(input: &[u8]) -> Vec<u8> {
input
.chunks_exact(4)
@ -148,7 +146,8 @@ impl ComparisonType {
}
}
pub fn compare_image_output(
#[cfg(not(target_arch = "wasm32"))]
pub async fn compare_image_output(
path: impl AsRef<Path> + AsRef<OsStr>,
adapter_info: &wgt::AdapterInfo,
width: u32,
@ -156,11 +155,10 @@ pub fn compare_image_output(
test_with_alpha: &[u8],
checks: &[ComparisonType],
) {
#[cfg(not(target_arch = "wasm32"))]
{
use std::{ffi::OsString, str::FromStr};
let reference_with_alpha = read_png(&path, width, height);
let reference_path = Path::new(&path);
let reference_with_alpha = read_png(&path, width, height).await;
let reference = match reference_with_alpha {
Some(v) => remove_alpha(&v),
@ -171,7 +169,8 @@ pub fn compare_image_output(
height,
test_with_alpha,
png::Compression::Best,
);
)
.await;
return;
}
};
@ -179,31 +178,6 @@ pub fn compare_image_output(
assert_eq!(reference.len(), test.len());
let reference_flip = nv_flip::FlipImageRgb8::with_data(width, height, &reference);
let test_flip = nv_flip::FlipImageRgb8::with_data(width, height, &test);
let error_map_flip = nv_flip::flip(
reference_flip,
test_flip,
nv_flip::DEFAULT_PIXELS_PER_DEGREE,
);
let mut pool = nv_flip::FlipPool::from_image(&error_map_flip);
let reference_path = Path::new(&path);
println!(
"Starting image comparison test with reference image \"{}\"",
reference_path.display()
);
print_flip(&mut pool);
// If there are no checks, we want to fail the test.
let mut all_passed = !checks.is_empty();
// We always iterate all of these, as the call to check prints
for check in checks {
all_passed &= check.check(&mut pool);
}
let file_stem = reference_path.file_stem().unwrap().to_string_lossy();
let renderer = format!(
"{}-{}-{}",
@ -219,11 +193,39 @@ pub fn compare_image_output(
OsString::from_str(&format!("{}-{}-difference.png", file_stem, renderer,)).unwrap(),
);
let mut all_passed;
let magma_image_with_alpha;
{
let reference_flip = nv_flip::FlipImageRgb8::with_data(width, height, &reference);
let test_flip = nv_flip::FlipImageRgb8::with_data(width, height, &test);
let error_map_flip = nv_flip::flip(
reference_flip,
test_flip,
nv_flip::DEFAULT_PIXELS_PER_DEGREE,
);
let mut pool = nv_flip::FlipPool::from_image(&error_map_flip);
println!(
"Starting image comparison test with reference image \"{}\"",
reference_path.display()
);
print_flip(&mut pool);
// If there are no checks, we want to fail the test.
all_passed = !checks.is_empty();
// We always iterate all of these, as the call to check prints
for check in checks {
all_passed &= check.check(&mut pool);
}
// Convert the error values to a false color reprensentation
let magma_image = error_map_flip
.apply_color_lut(&nv_flip::magma_lut())
.to_vec();
let magma_image_with_alpha = add_alpha(&magma_image);
magma_image_with_alpha = add_alpha(&magma_image);
}
write_png(
actual_path,
@ -231,26 +233,38 @@ pub fn compare_image_output(
height,
test_with_alpha,
png::Compression::Fast,
);
)
.await;
write_png(
difference_path,
width,
height,
&magma_image_with_alpha,
png::Compression::Fast,
);
)
.await;
if !all_passed {
panic!("Image data mismatch!")
}
}
#[cfg(target_arch = "wasm32")]
pub async fn compare_image_output(
path: impl AsRef<Path> + AsRef<OsStr>,
adapter_info: &wgt::AdapterInfo,
width: u32,
height: u32,
test_with_alpha: &[u8],
checks: &[ComparisonType],
) {
#[cfg(target_arch = "wasm32")]
{
let _ = (path, adapter_info, width, height, test_with_alpha, checks);
}
}
#[cfg_attr(target_arch = "wasm32", allow(unused))]
fn sanitize_for_path(s: &str) -> String {
s.chars()
.map(|ch| if ch.is_ascii_alphanumeric() { ch } else { '_' })

129
tests/src/init.rs Normal file
View File

@ -0,0 +1,129 @@
use wgpu::{Adapter, Device, Instance, Queue};
use wgt::{Backends, Features, Limits};
/// Initialize a wgpu instance with the options from the environment.
pub fn initialize_instance() -> Instance {
// We ignore `WGPU_BACKEND` for now, merely using test filtering to only run a single backend's tests.
//
// We can potentially work support back into the test runner in the future, but as the adapters are matched up
// based on adapter index, removing some backends messes up the indexes in annoying ways.
let backends = Backends::all();
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
Instance::new(wgpu::InstanceDescriptor {
backends,
flags: wgpu::InstanceFlags::debugging().with_env(),
dx12_shader_compiler,
gles_minor_version,
})
}
/// Initialize a wgpu adapter, taking the `n`th adapter from the instance.
pub async fn initialize_adapter(adapter_index: usize) -> (Adapter, Option<SurfaceGuard>) {
let instance = initialize_instance();
#[allow(unused_variables)]
let _surface: wgpu::Surface;
let surface_guard: Option<SurfaceGuard>;
// Create a canvas iff we need a WebGL2RenderingContext to have a working device.
#[cfg(not(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
)))]
{
surface_guard = None;
}
#[cfg(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
))]
{
// On wasm, append a canvas to the document body for initializing the adapter
let canvas = initialize_html_canvas();
_surface = instance
.create_surface_from_canvas(canvas.clone())
.expect("could not create surface from canvas");
surface_guard = Some(SurfaceGuard { canvas });
}
cfg_if::cfg_if! {
if #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))] {
let adapter_iter = instance.enumerate_adapters(wgpu::Backends::all());
let adapter_count = adapter_iter.len();
let adapter = adapter_iter.into_iter()
.nth(adapter_index)
.unwrap_or_else(|| panic!("Tried to get index {adapter_index} adapter, but adapter list was only {adapter_count} long. Is .gpuconfig out of date?"));
} else {
assert_eq!(adapter_index, 0);
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions::default()).await.unwrap();
}
}
log::info!("Testing using adapter: {:#?}", adapter.get_info());
(adapter, surface_guard)
}
/// Initialize a wgpu device from a given adapter.
pub async fn initialize_device(
adapter: &Adapter,
features: Features,
limits: Limits,
) -> (Device, Queue) {
let bundle = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features,
limits,
},
None,
)
.await;
match bundle {
Ok(b) => b,
Err(e) => panic!("Failed to initialize device: {e}"),
}
}
/// Create a canvas for testing.
#[cfg(target_arch = "wasm32")]
pub fn initialize_html_canvas() -> web_sys::HtmlCanvasElement {
use wasm_bindgen::JsCast;
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| {
let canvas = doc.create_element("Canvas").unwrap();
canvas.dyn_into::<web_sys::HtmlCanvasElement>().ok()
})
.expect("couldn't create canvas")
}
pub struct SurfaceGuard {
#[cfg(target_arch = "wasm32")]
#[allow(unused)]
canvas: web_sys::HtmlCanvasElement,
}
impl SurfaceGuard {
#[cfg(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
))]
pub(crate) fn check_for_unreported_errors(&self) -> bool {
use wasm_bindgen::JsCast;
self.canvas
.get_context("webgl2")
.unwrap()
.unwrap()
.dyn_into::<web_sys::WebGl2RenderingContext>()
.unwrap()
.get_error()
!= web_sys::WebGl2RenderingContext::NO_ERROR
}
}

View File

@ -1,595 +1,27 @@
//! This module contains common test-only code that needs to be shared between the examples and the tests.
#![allow(dead_code)] // This module is used in a lot of contexts and only parts of it will be used
use std::panic::{catch_unwind, AssertUnwindSafe};
use wgpu::{Adapter, Device, DownlevelFlags, Instance, Queue, Surface};
use wgt::{Backends, DeviceDescriptor, DownlevelCapabilities, Features, Limits};
//! Test utilities for the wgpu repository.
mod config;
pub mod image;
mod init;
mod isolation;
pub mod native;
mod params;
mod report;
mod run;
#[cfg(target_arch = "wasm32")]
pub use init::initialize_html_canvas;
pub use self::image::ComparisonType;
const CANVAS_ID: &str = "test-canvas";
async fn initialize_device(
adapter: &Adapter,
features: Features,
limits: Limits,
) -> (Device, Queue) {
let bundle = adapter
.request_device(
&DeviceDescriptor {
label: None,
features,
limits,
},
None,
)
.await;
match bundle {
Ok(b) => b,
Err(e) => panic!("Failed to initialize device: {e}"),
}
}
pub struct TestingContext {
pub adapter: Adapter,
pub adapter_info: wgt::AdapterInfo,
pub adapter_downlevel_capabilities: wgt::DownlevelCapabilities,
pub device: Device,
pub device_features: wgt::Features,
pub device_limits: wgt::Limits,
pub queue: Queue,
}
fn lowest_downlevel_properties() -> DownlevelCapabilities {
DownlevelCapabilities {
flags: wgt::DownlevelFlags::empty(),
limits: wgt::DownlevelLimits {},
shader_model: wgt::ShaderModel::Sm2,
}
}
/// Conditions under which a test should fail or be skipped.
///
/// By passing a `FailureCase` to [`TestParameters::expect_fail`], you can
/// mark a test as expected to fail under the indicated conditions. By
/// passing it to [`TestParameters::skip`], you can request that the
/// test be skipped altogether.
///
/// If a field is `None`, then that field does not restrict matches. For
/// example:
///
/// ```
/// # use wgpu_test::FailureCase;
/// FailureCase {
/// backends: Some(wgpu::Backends::DX11 | wgpu::Backends::DX12),
/// vendor: None,
/// adapter: Some("RTX"),
/// driver: None,
/// }
/// # ;
/// ```
///
/// This applies to all cards with `"RTX'` in their name on either
/// Direct3D backend, no matter the vendor ID or driver name.
///
/// The strings given here need only appear as a substring in the
/// corresponding [`AdapterInfo`] fields. The comparison is
/// case-insensitive.
///
/// The default value of `FailureCase` applies to any test case. That
/// is, there are no criteria to constrain the match.
///
/// [`AdapterInfo`]: wgt::AdapterInfo
#[derive(Default)]
pub struct FailureCase {
/// Backends expected to fail, or `None` for any backend.
///
/// If this is `None`, or if the test is using one of the backends
/// in `backends`, then this `FailureCase` applies.
pub backends: Option<wgpu::Backends>,
/// Vendor expected to fail, or `None` for any vendor.
///
/// If `Some`, this must match [`AdapterInfo::device`], which is
/// usually the PCI device id. Otherwise, this `FailureCase`
/// applies regardless of vendor.
///
/// [`AdapterInfo::device`]: wgt::AdapterInfo::device
pub vendor: Option<u32>,
/// Name of adaper expected to fail, or `None` for any adapter name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::name`], then this `FailureCase` applies. If
/// this is `None`, the adapter name isn't considered.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub adapter: Option<&'static str>,
/// Name of driver expected to fail, or `None` for any driver name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::driver`], then this `FailureCase` applies. If
/// this is `None`, the driver name isn't considered.
///
/// [`AdapterInfo::driver`]: wgt::AdapterInfo::driver
pub driver: Option<&'static str>,
}
impl FailureCase {
/// This case applies to all tests.
pub fn always() -> Self {
FailureCase::default()
}
/// This case applies to no tests.
pub fn never() -> Self {
FailureCase {
backends: Some(wgpu::Backends::empty()),
..FailureCase::default()
}
}
/// Tests running on any of the given backends.
pub fn backend(backends: wgpu::Backends) -> Self {
FailureCase {
backends: Some(backends),
..FailureCase::default()
}
}
/// Tests running on `adapter`.
///
/// For this case to apply, the `adapter` string must appear as a substring
/// of the adapter's [`AdapterInfo::name`]. The comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn adapter(adapter: &'static str) -> Self {
FailureCase {
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running on `backend` and `adapter`.
///
/// For this case to apply, the test must be using an adapter for one of the
/// given `backend` bits, and `adapter` string must appear as a substring of
/// the adapter's [`AdapterInfo::name`]. The string comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn backend_adapter(backends: wgpu::Backends, adapter: &'static str) -> Self {
FailureCase {
backends: Some(backends),
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running under WebGL.
///
/// Because of wasm's limited ability to recover from errors, we
/// usually need to skip the test altogether if it's not
/// supported, so this should be usually used with
/// [`TestParameters::skip`].
pub fn webgl2() -> Self {
#[cfg(target_arch = "wasm32")]
let case = FailureCase::backend(wgpu::Backends::GL);
#[cfg(not(target_arch = "wasm32"))]
let case = FailureCase::never();
case
}
/// Tests running on the MoltenVK Vulkan driver on macOS.
pub fn molten_vk() -> Self {
FailureCase {
backends: Some(wgpu::Backends::VULKAN),
driver: Some("MoltenVK"),
..FailureCase::default()
}
}
/// Test whether `self` applies to `info`.
///
/// If it does, return a `FailureReasons` whose set bits indicate
/// why. If it doesn't, return `None`.
///
/// The caller is responsible for converting the string-valued
/// fields of `info` to lower case, to ensure case-insensitive
/// matching.
fn applies_to(&self, info: &wgt::AdapterInfo) -> Option<FailureReasons> {
let mut reasons = FailureReasons::empty();
if let Some(backends) = self.backends {
if !backends.contains(wgpu::Backends::from(info.backend)) {
return None;
}
reasons.set(FailureReasons::BACKEND, true);
}
if let Some(vendor) = self.vendor {
if vendor != info.vendor {
return None;
}
reasons.set(FailureReasons::VENDOR, true);
}
if let Some(adapter) = self.adapter {
let adapter = adapter.to_lowercase();
if !info.name.contains(&adapter) {
return None;
}
reasons.set(FailureReasons::ADAPTER, true);
}
if let Some(driver) = self.driver {
let driver = driver.to_lowercase();
if !info.driver.contains(&driver) {
return None;
}
reasons.set(FailureReasons::DRIVER, true);
}
// If we got this far but no specific reasons were triggered, then this
// must be a wildcard.
if reasons.is_empty() {
Some(FailureReasons::ALWAYS)
} else {
Some(reasons)
}
}
}
// This information determines if a test should run.
pub struct TestParameters {
pub required_features: Features,
pub required_downlevel_properties: DownlevelCapabilities,
pub required_limits: Limits,
/// Conditions under which this test should be skipped.
pub skips: Vec<FailureCase>,
/// Conditions under which this test should be run, but is expected to fail.
pub failures: Vec<FailureCase>,
}
impl Default for TestParameters {
fn default() -> Self {
Self {
required_features: Features::empty(),
required_downlevel_properties: lowest_downlevel_properties(),
required_limits: Limits::downlevel_webgl2_defaults(),
skips: Vec::new(),
failures: Vec::new(),
}
}
}
bitflags::bitflags! {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct FailureReasons: u8 {
const BACKEND = 1 << 0;
const VENDOR = 1 << 1;
const ADAPTER = 1 << 2;
const DRIVER = 1 << 3;
const ALWAYS = 1 << 4;
}
}
// Builder pattern to make it easier
impl TestParameters {
/// Set of common features that most internal tests require for readback.
pub fn test_features_limits(self) -> Self {
self.features(Features::MAPPABLE_PRIMARY_BUFFERS | Features::VERTEX_WRITABLE_STORAGE)
.limits(wgpu::Limits::downlevel_defaults())
}
/// Set the list of features this test requires.
pub fn features(mut self, features: Features) -> Self {
self.required_features |= features;
self
}
pub fn downlevel_flags(mut self, downlevel_flags: DownlevelFlags) -> Self {
self.required_downlevel_properties.flags |= downlevel_flags;
self
}
/// Set the limits needed for the test.
pub fn limits(mut self, limits: Limits) -> Self {
self.required_limits = limits;
self
}
/// Mark the test as always failing, but not to be skipped.
pub fn expect_fail(mut self, when: FailureCase) -> Self {
self.failures.push(when);
self
}
/// Mark the test as always failing, and needing to be skipped.
pub fn skip(mut self, when: FailureCase) -> Self {
self.skips.push(when);
self
}
}
pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(TestingContext)) {
// We don't actually care if it fails
#[cfg(not(target_arch = "wasm32"))]
let _ = env_logger::try_init();
#[cfg(target_arch = "wasm32")]
let _ = console_log::init_with_level(log::Level::Info);
let _test_guard = isolation::OneTestPerProcessGuard::new();
let (adapter, _surface_guard) = initialize_adapter();
let adapter_info = adapter.get_info();
// Produce a lower-case version of the adapter info, for comparison against
// `parameters.skips` and `parameters.failures`.
let adapter_lowercase_info = wgt::AdapterInfo {
name: adapter_info.name.to_lowercase(),
driver: adapter_info.driver.to_lowercase(),
..adapter_info.clone()
};
let adapter_features = adapter.features();
let adapter_limits = adapter.limits();
let adapter_downlevel_capabilities = adapter.get_downlevel_capabilities();
let missing_features = parameters.required_features - adapter_features;
if !missing_features.is_empty() {
log::info!("TEST SKIPPED: MISSING FEATURES {:?}", missing_features);
return;
}
if !parameters.required_limits.check_limits(&adapter_limits) {
log::info!("TEST SKIPPED: LIMIT TOO LOW");
return;
}
let missing_downlevel_flags =
parameters.required_downlevel_properties.flags - adapter_downlevel_capabilities.flags;
if !missing_downlevel_flags.is_empty() {
log::info!(
"TEST SKIPPED: MISSING DOWNLEVEL FLAGS {:?}",
missing_downlevel_flags
);
return;
}
if adapter_downlevel_capabilities.shader_model
< parameters.required_downlevel_properties.shader_model
{
log::info!(
"TEST SKIPPED: LOW SHADER MODEL {:?}",
adapter_downlevel_capabilities.shader_model
);
return;
}
let (device, queue) = pollster::block_on(initialize_device(
&adapter,
parameters.required_features,
parameters.required_limits.clone(),
));
let context = TestingContext {
adapter,
adapter_info,
adapter_downlevel_capabilities,
device,
device_features: parameters.required_features,
device_limits: parameters.required_limits,
queue,
};
// Check if we should skip the test altogether.
if let Some(skip_reason) = parameters
.skips
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info))
{
log::info!("EXPECTED TEST FAILURE SKIPPED: {:?}", skip_reason);
return;
}
// Determine if we expect this test to fail, and if so, why.
let expected_failure_reason = parameters
.failures
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info));
// Run the test, and catch panics (possibly due to failed assertions).
let panicked = catch_unwind(AssertUnwindSafe(|| test_function(context))).is_err();
// Check whether any validation errors were reported during the test run.
cfg_if::cfg_if!(
if #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] {
let canary_set = wgpu::hal::VALIDATION_CANARY.get_and_reset();
} else {
let canary_set = _surface_guard.unwrap().check_for_unreported_errors();
}
);
// Summarize reasons for actual failure, if any.
let failure_cause = match (panicked, canary_set) {
(true, true) => Some("PANIC AND VALIDATION ERROR"),
(true, false) => Some("PANIC"),
(false, true) => Some("VALIDATION ERROR"),
(false, false) => None,
};
// Compare actual results against expectations.
match (failure_cause, expected_failure_reason) {
// The test passed, as expected.
(None, None) => {}
// The test failed unexpectedly.
(Some(cause), None) => {
panic!("UNEXPECTED TEST FAILURE DUE TO {cause}")
}
// The test passed unexpectedly.
(None, Some(reason)) => {
panic!("UNEXPECTED TEST PASS: {reason:?}");
}
// The test failed, as expected.
(Some(cause), Some(reason_expected)) => {
log::info!(
"EXPECTED FAILURE DUE TO {} (expected because of {:?})",
cause,
reason_expected
);
}
}
}
pub fn initialize_adapter() -> (Adapter, Option<SurfaceGuard>) {
let instance = initialize_instance();
let surface_guard: Option<SurfaceGuard>;
let compatible_surface;
// Create a canvas iff we need a WebGL2RenderingContext to have a working device.
#[cfg(not(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
)))]
{
surface_guard = None;
compatible_surface = None;
}
#[cfg(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
))]
{
// On wasm, append a canvas to the document body for initializing the adapter
let canvas = create_html_canvas();
// We use raw_window_handle here, as create_surface_from_canvas is not implemented on emscripten.
struct WindowHandle;
unsafe impl raw_window_handle::HasRawWindowHandle for WindowHandle {
fn raw_window_handle(&self) -> raw_window_handle::RawWindowHandle {
raw_window_handle::RawWindowHandle::Web({
let mut handle = raw_window_handle::WebWindowHandle::empty();
handle.id = 1;
handle
})
}
}
unsafe impl raw_window_handle::HasRawDisplayHandle for WindowHandle {
fn raw_display_handle(&self) -> raw_window_handle::RawDisplayHandle {
raw_window_handle::RawDisplayHandle::Web(
raw_window_handle::WebDisplayHandle::empty(),
)
}
}
let surface = unsafe {
instance
.create_surface(&WindowHandle)
.expect("could not create surface from canvas")
};
surface_guard = Some(SurfaceGuard { canvas });
compatible_surface = Some(surface);
}
let compatible_surface: Option<&Surface> = compatible_surface.as_ref();
let adapter = pollster::block_on(wgpu::util::initialize_adapter_from_env_or_default(
&instance,
compatible_surface,
))
.expect("could not find suitable adapter on the system");
(adapter, surface_guard)
}
pub fn initialize_instance() -> Instance {
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
Instance::new(wgpu::InstanceDescriptor {
backends,
flags: wgpu::InstanceFlags::debugging().with_env(),
dx12_shader_compiler,
gles_minor_version,
})
}
// Public because it is used by tests of interacting with canvas
pub struct SurfaceGuard {
#[cfg(target_arch = "wasm32")]
pub canvas: web_sys::HtmlCanvasElement,
}
impl SurfaceGuard {
fn check_for_unreported_errors(&self) -> bool {
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "wasm32", any(target_os = "emscripten", feature = "webgl")))] {
use wasm_bindgen::JsCast;
self.canvas
.get_context("webgl2")
.unwrap()
.unwrap()
.dyn_into::<web_sys::WebGl2RenderingContext>()
.unwrap()
.get_error()
!= web_sys::WebGl2RenderingContext::NO_ERROR
} else {
false
}
}
}
}
#[cfg(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
))]
impl Drop for SurfaceGuard {
fn drop(&mut self) {
delete_html_canvas();
}
}
#[cfg(target_arch = "wasm32")]
pub fn create_html_canvas() -> web_sys::HtmlCanvasElement {
use wasm_bindgen::JsCast;
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| {
let body = doc.body().unwrap();
let canvas = doc.create_element("Canvas").unwrap();
canvas.set_attribute("data-raw-handle", "1").unwrap();
canvas.set_id(CANVAS_ID);
body.append_child(&canvas).unwrap();
canvas.dyn_into::<web_sys::HtmlCanvasElement>().ok()
})
.expect("couldn't append canvas to document body")
}
#[cfg(all(
target_arch = "wasm32",
any(target_os = "emscripten", feature = "webgl")
))]
fn delete_html_canvas() {
if let Some(document) = web_sys::window().and_then(|win| win.document()) {
if let Some(element) = document.get_element_by_id(CANVAS_ID) {
element.remove();
}
};
}
// Run some code in an error scope and assert that validation fails.
pub use config::GpuTestConfiguration;
#[doc(hidden)]
pub use ctor::ctor;
pub use init::{initialize_adapter, initialize_device, initialize_instance};
pub use params::{FailureCase, FailureReasons, TestParameters};
pub use run::{execute_test, TestingContext};
pub use wgpu_macros::gpu_test;
/// Run some code in an error scope and assert that validation fails.
pub fn fail<T>(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T {
device.push_error_scope(wgpu::ErrorFilter::Validation);
let result = callback();
@ -598,7 +30,7 @@ pub fn fail<T>(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T {
result
}
// Run some code in an error scope and assert that validation succeeds.
/// Run some code in an error scope and assert that validation succeeds.
pub fn valid<T>(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T {
device.push_error_scope(wgpu::ErrorFilter::Validation);
let result = callback();
@ -607,8 +39,8 @@ pub fn valid<T>(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T {
result
}
// Run some code in an error scope and assert that validation succeeds or fails depending on the
// provided `should_fail` boolean.
/// Run some code in an error scope and assert that validation succeeds or fails depending on the
/// provided `should_fail` boolean.
pub fn fail_if<T>(device: &wgpu::Device, should_fail: bool, callback: impl FnOnce() -> T) -> T {
if should_fail {
fail(device, callback)
@ -616,3 +48,19 @@ pub fn fail_if<T>(device: &wgpu::Device, should_fail: bool, callback: impl FnOnc
valid(device, callback)
}
}
/// Adds the necissary main function for our gpu test harness.
#[macro_export]
macro_rules! gpu_test_main {
() => {
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[cfg(target_arch = "wasm32")]
fn main() {}
#[cfg(not(target_arch = "wasm32"))]
fn main() -> $crate::native::MainResult {
$crate::native::main()
}
};
}

104
tests/src/native.rs Normal file
View File

@ -0,0 +1,104 @@
#![cfg(not(target_arch = "wasm32"))]
//! Infrastructure for the native, `cargo-nextest` based harness.
//!
//! This is largly used by [`gpu_test_main`](crate::gpu_test_main) and [`gpu_test`](crate::gpu_test).
use std::{future::Future, pin::Pin};
use parking_lot::Mutex;
use crate::{
config::GpuTestConfiguration, params::TestInfo, report::AdapterReport, run::execute_test,
};
type NativeTestFuture = Pin<Box<dyn Future<Output = ()> + Send + Sync>>;
struct NativeTest {
name: String,
future: NativeTestFuture,
}
impl NativeTest {
fn from_configuration(
config: GpuTestConfiguration,
adapter: &AdapterReport,
adapter_index: usize,
) -> Self {
let backend = adapter.info.backend;
let device_name = &adapter.info.name;
let test_info = TestInfo::from_configuration(&config, adapter);
let full_name = format!(
"[{running_msg}] [{backend:?}/{device_name}/{adapter_index}] {base_name}",
running_msg = test_info.running_msg,
base_name = config.name,
);
Self {
name: full_name,
future: Box::pin(async move {
// Enable metal validation layers if we're running on metal.
//
// This is a process-wide setting as it's via environment variable, but all
// tests are run in separate processes.
//
// We don't do this in the instance initializer as we don't want to enable
// validation layers for the entire process, or other instances.
//
// We do not enable metal validation when running on moltenvk.
let metal_validation = backend == wgpu::Backend::Metal;
let env_value = if metal_validation { "1" } else { "0" };
std::env::set_var("MTL_DEBUG_LAYER", env_value);
std::env::set_var("MTL_SHADER_VALIDATION", env_value);
execute_test(config, Some(test_info), adapter_index).await;
}),
}
}
pub fn into_trial(self) -> libtest_mimic::Trial {
libtest_mimic::Trial::test(self.name, || {
pollster::block_on(self.future);
Ok(())
})
}
}
#[doc(hidden)]
pub static TEST_LIST: Mutex<Vec<crate::GpuTestConfiguration>> = Mutex::new(Vec::new());
/// Return value for the main function.
pub type MainResult = anyhow::Result<()>;
/// Main function that runs every gpu function once for every adapter on the system.
pub fn main() -> MainResult {
use anyhow::Context;
use crate::report::GpuReport;
let config_text =
&std::fs::read_to_string(format!("{}/../.gpuconfig", env!("CARGO_MANIFEST_DIR")))
.context("Failed to read .gpuconfig, did you run the tests via `cargo xtask test`?")?;
let report = GpuReport::from_json(config_text).context("Could not pare .gpuconfig JSON")?;
let mut test_guard = TEST_LIST.lock();
execute_native(test_guard.drain(..).flat_map(|test| {
report
.devices
.iter()
.enumerate()
.map(move |(adapter_index, adapter)| {
NativeTest::from_configuration(test.clone(), adapter, adapter_index)
})
}));
Ok(())
}
fn execute_native(tests: impl IntoIterator<Item = NativeTest>) {
let args = libtest_mimic::Arguments::from_args();
let trials = tests.into_iter().map(NativeTest::into_trial).collect();
libtest_mimic::run(&args, trials).exit_if_failed();
}

363
tests/src/params.rs Normal file
View File

@ -0,0 +1,363 @@
use arrayvec::ArrayVec;
use wgt::{DownlevelCapabilities, DownlevelFlags, Features, Limits};
use crate::{report::AdapterReport, GpuTestConfiguration};
/// Conditions under which a test should fail or be skipped.
///
/// By passing a `FailureCase` to [`TestParameters::expect_fail`], you can
/// mark a test as expected to fail under the indicated conditions. By
/// passing it to [`TestParameters::skip`], you can request that the
/// test be skipped altogether.
///
/// If a field is `None`, then that field does not restrict matches. For
/// example:
///
/// ```
/// # use wgpu_test::FailureCase;
/// FailureCase {
/// backends: Some(wgpu::Backends::DX11 | wgpu::Backends::DX12),
/// vendor: None,
/// adapter: Some("RTX"),
/// driver: None,
/// }
/// # ;
/// ```
///
/// This applies to all cards with `"RTX'` in their name on either
/// Direct3D backend, no matter the vendor ID or driver name.
///
/// The strings given here need only appear as a substring in the
/// corresponding [`AdapterInfo`] fields. The comparison is
/// case-insensitive.
///
/// The default value of `FailureCase` applies to any test case. That
/// is, there are no criteria to constrain the match.
///
/// [`AdapterInfo`]: wgt::AdapterInfo
#[derive(Default, Clone)]
pub struct FailureCase {
/// Backends expected to fail, or `None` for any backend.
///
/// If this is `None`, or if the test is using one of the backends
/// in `backends`, then this `FailureCase` applies.
pub backends: Option<wgpu::Backends>,
/// Vendor expected to fail, or `None` for any vendor.
///
/// If `Some`, this must match [`AdapterInfo::device`], which is
/// usually the PCI device id. Otherwise, this `FailureCase`
/// applies regardless of vendor.
///
/// [`AdapterInfo::device`]: wgt::AdapterInfo::device
pub vendor: Option<u32>,
/// Name of adaper expected to fail, or `None` for any adapter name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::name`], then this `FailureCase` applies. If
/// this is `None`, the adapter name isn't considered.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub adapter: Option<&'static str>,
/// Name of driver expected to fail, or `None` for any driver name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::driver`], then this `FailureCase` applies. If
/// this is `None`, the driver name isn't considered.
///
/// [`AdapterInfo::driver`]: wgt::AdapterInfo::driver
pub driver: Option<&'static str>,
}
impl FailureCase {
/// This case applies to all tests.
pub fn always() -> Self {
FailureCase::default()
}
/// This case applies to no tests.
pub fn never() -> Self {
FailureCase {
backends: Some(wgpu::Backends::empty()),
..FailureCase::default()
}
}
/// Tests running on any of the given backends.
pub fn backend(backends: wgpu::Backends) -> Self {
FailureCase {
backends: Some(backends),
..FailureCase::default()
}
}
/// Tests running on `adapter`.
///
/// For this case to apply, the `adapter` string must appear as a substring
/// of the adapter's [`AdapterInfo::name`]. The comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn adapter(adapter: &'static str) -> Self {
FailureCase {
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running on `backend` and `adapter`.
///
/// For this case to apply, the test must be using an adapter for one of the
/// given `backend` bits, and `adapter` string must appear as a substring of
/// the adapter's [`AdapterInfo::name`]. The string comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn backend_adapter(backends: wgpu::Backends, adapter: &'static str) -> Self {
FailureCase {
backends: Some(backends),
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running under WebGL.
///
/// Because of wasm's limited ability to recover from errors, we
/// usually need to skip the test altogether if it's not
/// supported, so this should be usually used with
/// [`TestParameters::skip`].
pub fn webgl2() -> Self {
#[cfg(target_arch = "wasm32")]
let case = FailureCase::backend(wgpu::Backends::GL);
#[cfg(not(target_arch = "wasm32"))]
let case = FailureCase::never();
case
}
/// Tests running on the MoltenVK Vulkan driver on macOS.
pub fn molten_vk() -> Self {
FailureCase {
backends: Some(wgpu::Backends::VULKAN),
driver: Some("MoltenVK"),
..FailureCase::default()
}
}
/// Test whether `self` applies to `info`.
///
/// If it does, return a `FailureReasons` whose set bits indicate
/// why. If it doesn't, return `None`.
///
/// The caller is responsible for converting the string-valued
/// fields of `info` to lower case, to ensure case-insensitive
/// matching.
pub(crate) fn applies_to(&self, info: &wgt::AdapterInfo) -> Option<FailureReasons> {
let mut reasons = FailureReasons::empty();
if let Some(backends) = self.backends {
if !backends.contains(wgpu::Backends::from(info.backend)) {
return None;
}
reasons.set(FailureReasons::BACKEND, true);
}
if let Some(vendor) = self.vendor {
if vendor != info.vendor {
return None;
}
reasons.set(FailureReasons::VENDOR, true);
}
if let Some(adapter) = self.adapter {
let adapter = adapter.to_lowercase();
if !info.name.contains(&adapter) {
return None;
}
reasons.set(FailureReasons::ADAPTER, true);
}
if let Some(driver) = self.driver {
let driver = driver.to_lowercase();
if !info.driver.contains(&driver) {
return None;
}
reasons.set(FailureReasons::DRIVER, true);
}
// If we got this far but no specific reasons were triggered, then this
// must be a wildcard.
if reasons.is_empty() {
Some(FailureReasons::ALWAYS)
} else {
Some(reasons)
}
}
}
const LOWEST_DOWNLEVEL_PROPERTIES: wgpu::DownlevelCapabilities = DownlevelCapabilities {
flags: wgt::DownlevelFlags::empty(),
limits: wgt::DownlevelLimits {},
shader_model: wgt::ShaderModel::Sm2,
};
/// This information determines if a test should run.
#[derive(Clone)]
pub struct TestParameters {
pub required_features: Features,
pub required_downlevel_caps: DownlevelCapabilities,
pub required_limits: Limits,
/// Conditions under which this test should be skipped.
pub skips: Vec<FailureCase>,
/// Conditions under which this test should be run, but is expected to fail.
pub failures: Vec<FailureCase>,
}
impl Default for TestParameters {
fn default() -> Self {
Self {
required_features: Features::empty(),
required_downlevel_caps: LOWEST_DOWNLEVEL_PROPERTIES,
required_limits: Limits::downlevel_webgl2_defaults(),
skips: Vec::new(),
failures: Vec::new(),
}
}
}
bitflags::bitflags! {
/// Ways that a given test can be expected to fail.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct FailureReasons: u8 {
const BACKEND = 1 << 0;
const VENDOR = 1 << 1;
const ADAPTER = 1 << 2;
const DRIVER = 1 << 3;
const ALWAYS = 1 << 4;
}
}
// Builder pattern to make it easier
impl TestParameters {
/// Set of common features that most internal tests require for readback.
pub fn test_features_limits(self) -> Self {
self.features(Features::MAPPABLE_PRIMARY_BUFFERS | Features::VERTEX_WRITABLE_STORAGE)
.limits(wgpu::Limits::downlevel_defaults())
}
/// Set the list of features this test requires.
pub fn features(mut self, features: Features) -> Self {
self.required_features |= features;
self
}
pub fn downlevel_flags(mut self, downlevel_flags: DownlevelFlags) -> Self {
self.required_downlevel_caps.flags |= downlevel_flags;
self
}
/// Set the limits needed for the test.
pub fn limits(mut self, limits: Limits) -> Self {
self.required_limits = limits;
self
}
/// Mark the test as always failing, but not to be skipped.
pub fn expect_fail(mut self, when: FailureCase) -> Self {
self.failures.push(when);
self
}
/// Mark the test as always failing, and needing to be skipped.
pub fn skip(mut self, when: FailureCase) -> Self {
self.skips.push(when);
self
}
}
/// Information about a test, including if if it should be skipped.
pub struct TestInfo {
pub skip: bool,
pub expected_failure_reason: Option<FailureReasons>,
pub running_msg: String,
}
impl TestInfo {
pub(crate) fn from_configuration(test: &GpuTestConfiguration, adapter: &AdapterReport) -> Self {
// Figure out if we should skip the test and if so, why.
let mut skipped_reasons: ArrayVec<_, 4> = ArrayVec::new();
let missing_features = test.params.required_features - adapter.features;
if !missing_features.is_empty() {
skipped_reasons.push("Features");
}
if !test.params.required_limits.check_limits(&adapter.limits) {
skipped_reasons.push("Limits");
}
let missing_downlevel_flags =
test.params.required_downlevel_caps.flags - adapter.downlevel_caps.flags;
if !missing_downlevel_flags.is_empty() {
skipped_reasons.push("Downlevel Flags");
}
if test.params.required_downlevel_caps.shader_model > adapter.downlevel_caps.shader_model {
skipped_reasons.push("Shader Model");
}
// Produce a lower-case version of the adapter info, for comparison against
// `parameters.skips` and `parameters.failures`.
let adapter_lowercase_info = wgt::AdapterInfo {
name: adapter.info.name.to_lowercase(),
driver: adapter.info.driver.to_lowercase(),
..adapter.info.clone()
};
// Check if we should skip the test altogether.
let skip_reason = test
.params
.skips
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info));
let expected_failure_reason = test
.params
.failures
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info));
let mut skip = false;
let running_msg = if let Some(reasons) = skip_reason {
skip = true;
let names: ArrayVec<_, 4> = reasons.iter_names().map(|(name, _)| name).collect();
let names_text = names.join(" | ");
format!("Skipped Failure: {}", names_text)
} else if !skipped_reasons.is_empty() {
skip = true;
format!("Skipped: {}", skipped_reasons.join(" | "))
} else if let Some(failure_resasons) = expected_failure_reason {
if cfg!(target_arch = "wasm32") {
skip = true;
}
let names: ArrayVec<_, 4> = failure_resasons
.iter_names()
.map(|(name, _)| name)
.collect();
let names_text = names.join(" | ");
format!("Executed Failure: {}", names_text)
} else {
String::from("Executed")
};
Self {
skip,
expected_failure_reason,
running_msg,
}
}
}

52
tests/src/report.rs Normal file
View File

@ -0,0 +1,52 @@
use std::collections::HashMap;
use serde::Deserialize;
use wgpu::{
AdapterInfo, DownlevelCapabilities, Features, Limits, TextureFormat, TextureFormatFeatures,
};
/// Report specifying the capabilities of the GPUs on the system.
///
/// Must be synchronized with the definition on wgpu-info/src/report.rs.
#[derive(Deserialize)]
pub(crate) struct GpuReport {
#[cfg_attr(target_arch = "wasm32", allow(unused))]
pub devices: Vec<AdapterReport>,
}
impl GpuReport {
#[cfg_attr(target_arch = "wasm32", allow(unused))]
pub(crate) fn from_json(file: &str) -> serde_json::Result<Self> {
serde_json::from_str(file)
}
}
/// A single report of the capabilities of an Adapter.
///
/// Must be synchronized with the definition on wgpu-info/src/report.rs.
#[derive(Deserialize)]
pub(crate) struct AdapterReport {
pub info: AdapterInfo,
pub features: Features,
pub limits: Limits,
pub downlevel_caps: DownlevelCapabilities,
#[allow(unused)]
pub texture_format_features: HashMap<TextureFormat, TextureFormatFeatures>,
}
impl AdapterReport {
pub(crate) fn from_adapter(adapter: &wgpu::Adapter) -> Self {
let info = adapter.get_info();
let features = adapter.features();
let limits = adapter.limits();
let downlevel_caps = adapter.get_downlevel_capabilities();
Self {
info,
features,
limits,
downlevel_caps,
texture_format_features: HashMap::new(), // todo
}
}
}

126
tests/src/run.rs Normal file
View File

@ -0,0 +1,126 @@
use std::panic::AssertUnwindSafe;
use futures_lite::FutureExt;
use wgpu::{Adapter, Device, Queue};
use crate::{
init::{initialize_adapter, initialize_device},
isolation,
params::TestInfo,
report::AdapterReport,
GpuTestConfiguration,
};
/// Parameters and resources hadned to the test function.
pub struct TestingContext {
pub adapter: Adapter,
pub adapter_info: wgpu::AdapterInfo,
pub adapter_downlevel_capabilities: wgpu::DownlevelCapabilities,
pub device: Device,
pub device_features: wgpu::Features,
pub device_limits: wgpu::Limits,
pub queue: Queue,
}
/// Execute the given test configuration with the given adapter index.
///
/// If test_info is specified, will use the information whether to skip the test.
/// If it is not, we'll create the test info from the adapter itself.
pub async fn execute_test(
config: GpuTestConfiguration,
test_info: Option<TestInfo>,
adapter_index: usize,
) {
// If we get information externally, skip based on that information before we do anything.
if let Some(TestInfo { skip: true, .. }) = test_info {
return;
}
// We don't actually care if it fails
#[cfg(not(target_arch = "wasm32"))]
let _ = env_logger::try_init();
#[cfg(target_arch = "wasm32")]
let _ = console_log::init_with_level(log::Level::Info);
let _test_guard = isolation::OneTestPerProcessGuard::new();
let (adapter, _surface_guard) = initialize_adapter(adapter_index).await;
let adapter_info = adapter.get_info();
let adapter_downlevel_capabilities = adapter.get_downlevel_capabilities();
let test_info = test_info.unwrap_or_else(|| {
let adapter_report = AdapterReport::from_adapter(&adapter);
TestInfo::from_configuration(&config, &adapter_report)
});
// We are now guaranteed to have information about this test, so skip if we need to.
if test_info.skip {
log::info!("TEST RESULT: SKIPPED");
return;
}
let (device, queue) = pollster::block_on(initialize_device(
&adapter,
config.params.required_features,
config.params.required_limits.clone(),
));
let context = TestingContext {
adapter,
adapter_info,
adapter_downlevel_capabilities,
device,
device_features: config.params.required_features,
device_limits: config.params.required_limits.clone(),
queue,
};
// Run the test, and catch panics (possibly due to failed assertions).
let panicked = AssertUnwindSafe((config.test.as_ref().unwrap())(context))
.catch_unwind()
.await
.is_err();
// Check whether any validation errors were reported during the test run.
cfg_if::cfg_if!(
if #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] {
let canary_set = wgpu::hal::VALIDATION_CANARY.get_and_reset();
} else if #[cfg(all(target_arch = "wasm32", feature = "webgl"))] {
let canary_set = _surface_guard.unwrap().check_for_unreported_errors();
} else {
// TODO: WebGPU
let canary_set = false;
}
);
// Summarize reasons for actual failure, if any.
let failure_cause = match (panicked, canary_set) {
(true, true) => Some("PANIC AND VALIDATION ERROR"),
(true, false) => Some("PANIC"),
(false, true) => Some("VALIDATION ERROR"),
(false, false) => None,
};
// Compare actual results against expectations.
match (failure_cause, test_info.expected_failure_reason) {
// The test passed, as expected.
(None, None) => log::info!("TEST RESULT: PASSED"),
// The test failed unexpectedly.
(Some(cause), None) => {
panic!("UNEXPECTED TEST FAILURE DUE TO {cause}")
}
// The test passed unexpectedly.
(None, Some(reason)) => {
panic!("UNEXPECTED TEST PASS: {reason:?}");
}
// The test failed, as expected.
(Some(cause), Some(reason_expected)) => {
log::info!(
"TEST RESULT: EXPECTED FAILURE DUE TO {} (expected because of {:?})",
cause,
reason_expected
);
}
}
}

View File

@ -2,8 +2,7 @@
use std::borrow::Cow;
use wasm_bindgen_test::*;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters};
const SHADER_SRC: &str = "
@group(0) @binding(0) var tex: texture_storage_2d<bgra8unorm, write>;
@ -14,17 +13,17 @@ fn main(@builtin(workgroup_id) wgid: vec3<u32>) {
}
";
#[test]
#[wasm_bindgen_test]
fn bgra8unorm_storage() {
let parameters = TestParameters::default()
#[gpu_test]
static BGRA8_UNORM_STORAGE: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.limits(wgpu::Limits {
max_storage_textures_per_shader_stage: 1,
..Default::default()
})
.features(wgpu::Features::BGRA8UNORM_STORAGE);
initialize_test(parameters, |ctx| {
.features(wgpu::Features::BGRA8UNORM_STORAGE),
)
.run_sync(|ctx| {
let device = &ctx.device;
let texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,
@ -155,4 +154,3 @@ fn bgra8unorm_storage() {
readback_buffer.unmap();
});
}

View File

@ -1,8 +1,8 @@
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration};
#[test]
fn bind_group_layout_deduplication() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static BIND_GROUP_LAYOUT_DEDUPLICATION: GpuTestConfiguration = GpuTestConfiguration::new()
.run_sync(|ctx| {
let entries_1 = &[];
let entries_2 = &[wgpu::BindGroupLayoutEntry {
@ -135,8 +135,7 @@ fn bind_group_layout_deduplication() {
}
ctx.queue.submit(Some(encoder.finish()));
})
}
});
const SHADER_SRC: &str = "
@vertex fn vs_main() -> @builtin(position) vec4<f32> { return vec4<f32>(1.0); }

View File

@ -1,4 +1,4 @@
use wgpu_test::{initialize_test, TestParameters, TestingContext};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters, TestingContext};
fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) {
let r = wgpu::BufferUsages::MAP_READ;
@ -80,20 +80,16 @@ fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str)
ctx.device.poll(wgpu::MaintainBase::Wait);
}
#[test]
#[ignore]
fn empty_buffer() {
// TODO: Currently wgpu does not accept empty buffer slices, which
// is what test is about.
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static EMPTY_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::always()))
.run_sync(|ctx| {
test_empty_buffer_range(&ctx, 2048, "regular buffer");
test_empty_buffer_range(&ctx, 0, "zero-sized buffer");
})
}
});
#[test]
fn test_map_offset() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static MAP_OFFSET: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
// This test writes 16 bytes at the beginning of buffer mapped mapped with
// an offset of 32 bytes. Then the buffer is copied into another buffer that
// is read back and we check that the written bytes are correctly placed at
@ -158,4 +154,3 @@ fn test_map_offset() {
assert_eq!(*byte, 0);
}
});
}

View File

@ -1,35 +1,36 @@
//! Tests for buffer copy validation.
use wasm_bindgen_test::wasm_bindgen_test;
use wgt::BufferAddress;
use wgpu_test::{fail_if, initialize_test, TestParameters};
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration};
#[test]
#[wasm_bindgen_test]
fn copy_alignment() {
fn try_copy(offset: BufferAddress, size: BufferAddress, should_fail: bool) {
initialize_test(TestParameters::default(), |ctx| {
fn try_copy(
ctx: &wgpu_test::TestingContext,
offset: BufferAddress,
size: BufferAddress,
should_fail: bool,
) {
let buffer = ctx.device.create_buffer(&BUFFER_DESCRIPTOR);
let data = vec![255; size as usize];
fail_if(&ctx.device, should_fail, || {
ctx.queue.write_buffer(&buffer, offset, &data)
});
}
#[gpu_test]
static COPY_ALIGNMENT: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
try_copy(&ctx, 0, 0, false);
try_copy(&ctx, 4, 16 + 1, true);
try_copy(&ctx, 64, 20 + 2, true);
try_copy(&ctx, 256, 44 + 3, true);
try_copy(&ctx, 1024, 8 + 4, false);
try_copy(&ctx, 0, 4, false);
try_copy(&ctx, 4 + 1, 8, true);
try_copy(&ctx, 64 + 2, 12, true);
try_copy(&ctx, 256 + 3, 16, true);
try_copy(&ctx, 1024 + 4, 4, false);
});
}
try_copy(0, 0, false);
try_copy(4, 16 + 1, true);
try_copy(64, 20 + 2, true);
try_copy(256, 44 + 3, true);
try_copy(1024, 8 + 4, false);
try_copy(0, 4, false);
try_copy(4 + 1, 8, true);
try_copy(64 + 2, 12, true);
try_copy(256 + 3, 16, true);
try_copy(1024 + 4, 4, false);
}
const BUFFER_SIZE: BufferAddress = 1234;

View File

@ -1,25 +1,35 @@
//! Tests for buffer usages validation.
use wasm_bindgen_test::*;
use wgpu_test::{fail_if, initialize_test, TestParameters};
use wgpu::BufferUsages as Bu;
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration, TestParameters};
use wgt::BufferAddress;
const BUFFER_SIZE: BufferAddress = 1234;
#[test]
#[wasm_bindgen_test]
fn buffer_usage() {
fn try_create(enable_mappable_primary_buffers: bool, usages: &[(bool, &[wgpu::BufferUsages])]) {
let mut parameters = TestParameters::default();
if enable_mappable_primary_buffers {
parameters = parameters.features(wgpu::Features::MAPPABLE_PRIMARY_BUFFERS);
}
const ALWAYS_VALID: &[Bu; 4] = &[
Bu::MAP_READ,
Bu::MAP_WRITE,
Bu::MAP_READ.union(Bu::COPY_DST),
Bu::MAP_WRITE.union(Bu::COPY_SRC),
];
// MAP_READ can only be paired with COPY_DST and MAP_WRITE can only be paired with COPY_SRC
// (unless Features::MAPPABlE_PRIMARY_BUFFERS is enabled).
const NEEDS_MAPPABLE_PRIMARY_BUFFERS: &[Bu; 7] = &[
Bu::MAP_READ.union(Bu::COPY_DST.union(Bu::COPY_SRC)),
Bu::MAP_WRITE.union(Bu::COPY_SRC.union(Bu::COPY_DST)),
Bu::MAP_READ.union(Bu::MAP_WRITE),
Bu::MAP_WRITE.union(Bu::MAP_READ),
Bu::MAP_READ.union(Bu::COPY_DST.union(Bu::STORAGE)),
Bu::MAP_WRITE.union(Bu::COPY_SRC.union(Bu::STORAGE)),
Bu::all(),
];
const INVALID_BITS: Bu = Bu::from_bits_retain(0b1111111111111);
const ALWAYS_FAIL: &[Bu; 2] = &[Bu::empty(), INVALID_BITS];
initialize_test(parameters, |ctx| {
for (expect_validation_error, usage) in
usages.iter().flat_map(|&(expect_error, usages)| {
usages.iter().copied().map(move |u| (expect_error, u))
})
fn try_create(ctx: wgpu_test::TestingContext, usages: &[(bool, &[wgpu::BufferUsages])]) {
for (expect_validation_error, usage) in usages
.iter()
.flat_map(|&(expect_error, usages)| usages.iter().copied().map(move |u| (expect_error, u)))
{
fail_if(&ctx.device, expect_validation_error, || {
let _buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
@ -30,45 +40,30 @@ fn buffer_usage() {
});
});
}
}
#[gpu_test]
static BUFFER_USAGE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
try_create(
ctx,
&[
(false, ALWAYS_VALID),
(true, NEEDS_MAPPABLE_PRIMARY_BUFFERS),
(true, ALWAYS_FAIL),
],
);
});
}
use wgpu::BufferUsages as Bu;
let always_valid = &[
Bu::MAP_READ,
Bu::MAP_WRITE,
Bu::MAP_READ | Bu::COPY_DST,
Bu::MAP_WRITE | Bu::COPY_SRC,
];
// MAP_READ can only be paired with COPY_DST and MAP_WRITE can only be paired with COPY_SRC
// (unless Features::MAPPABlE_PRIMARY_BUFFERS is enabled).
let needs_mappable_primary_buffers = &[
Bu::MAP_READ | Bu::COPY_DST | Bu::COPY_SRC,
Bu::MAP_WRITE | Bu::COPY_SRC | Bu::COPY_DST,
Bu::MAP_READ | Bu::MAP_WRITE,
Bu::MAP_WRITE | Bu::MAP_READ,
Bu::MAP_READ | Bu::COPY_DST | Bu::STORAGE,
Bu::MAP_WRITE | Bu::COPY_SRC | Bu::STORAGE,
Bu::all(),
];
let invalid_bits = Bu::from_bits_retain(0b1111111111111);
let always_fail = &[Bu::empty(), invalid_bits];
#[gpu_test]
static BUFFER_USAGE_MAPPABLE_PRIMARY_BUFFERS: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().features(wgpu::Features::MAPPABLE_PRIMARY_BUFFERS))
.run_sync(|ctx| {
try_create(
false,
ctx,
&[
(false, always_valid),
(true, needs_mappable_primary_buffers),
(true, always_fail),
(false, ALWAYS_VALID),
(false, NEEDS_MAPPABLE_PRIMARY_BUFFERS),
(true, ALWAYS_FAIL),
],
);
try_create(
true, // enable Features::MAPPABLE_PRIMARY_BUFFERS
&[
(false, always_valid),
(false, needs_mappable_primary_buffers),
(true, always_fail),
],
);
}
});

View File

@ -1,6 +1,6 @@
use wasm_bindgen_test::*;
use wgpu_test::{
image::ReadbackBuffers, initialize_test, FailureCase, TestParameters, TestingContext,
gpu_test, image::ReadbackBuffers, FailureCase, GpuTestConfiguration, TestParameters,
TestingContext,
};
static TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT: &[wgpu::TextureFormat] = &[
@ -326,81 +326,71 @@ fn clear_texture_tests(ctx: &TestingContext, formats: &[wgpu::TextureFormat]) {
}
}
#[test]
#[wasm_bindgen_test]
fn clear_texture_uncompressed_gles_compat() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_UNCOMPRESSED_GLES: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.skip(FailureCase::webgl2())
.features(wgpu::Features::CLEAR_TEXTURE),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT);
},
.features(wgpu::Features::CLEAR_TEXTURE)
.skip(FailureCase::webgl2()),
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT);
});
#[test]
#[wasm_bindgen_test]
fn clear_texture_uncompressed() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_UNCOMPRESSED: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.skip(FailureCase::webgl2())
.expect_fail(FailureCase::backend(wgpu::Backends::GL))
.features(wgpu::Features::CLEAR_TEXTURE),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED);
});
#[test]
#[wasm_bindgen_test]
fn clear_texture_depth() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_DEPTH: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.skip(FailureCase::webgl2())
.downlevel_flags(
wgpu::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES
| wgpu::DownlevelFlags::COMPUTE_SHADERS,
)
.skip(FailureCase::webgl2())
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::CLEAR_TEXTURE),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_DEPTH);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_DEPTH);
});
#[test]
#[wasm_bindgen_test]
fn clear_texture_d32_s8() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_DEPTH32_STENCIL8: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::DEPTH32FLOAT_STENCIL8),
|ctx| {
clear_texture_tests(&ctx, &[wgpu::TextureFormat::Depth32FloatStencil8]);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, &[wgpu::TextureFormat::Depth32FloatStencil8]);
});
#[test]
fn clear_texture_bc() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_COMPRESSED_BCN: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::TEXTURE_COMPRESSION_BC)
// https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_BC);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_BC);
});
#[test]
fn clear_texture_astc() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_COMPRESSED_ASTC: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::TEXTURE_COMPRESSION_ASTC)
.limits(wgpu::Limits {
@ -411,23 +401,21 @@ fn clear_texture_astc() {
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ASTC);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ASTC);
});
#[test]
fn clear_texture_etc2() {
initialize_test(
#[gpu_test]
static CLEAR_TEXTURE_COMPRESSED_ETC2: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::TEXTURE_COMPRESSION_ETC2)
// https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ETC2);
},
)
}
.run_sync(|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ETC2);
});

1
tests/tests/cpu.rs Normal file
View File

@ -0,0 +1 @@
mod example_wgsl;

View File

@ -5,18 +5,17 @@
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
#[wasm_bindgen_test::wasm_bindgen_test]
fn canvas_get_context_returned_null() {
// Not using initialize_test() because that goes straight to creating the canvas for us.
// Not using the normal testing infrastructure because that goes straight to creating the canvas for us.
let instance = wgpu_test::initialize_instance();
// Create canvas and cleanup on drop
let canvas_g = wgpu_test::SurfaceGuard {
canvas: wgpu_test::create_html_canvas(),
};
// Create canvas
let canvas = wgpu_test::initialize_html_canvas();
// Using a context id that is not "webgl2" or "webgpu" will render the canvas unusable by wgpu.
canvas_g.canvas.get_context("2d").unwrap();
canvas.get_context("2d").unwrap();
#[allow(clippy::redundant_clone)] // false positive — can't and shouldn't move out.
let error = instance
.create_surface_from_canvas(canvas_g.canvas.clone())
.create_surface_from_canvas(canvas.clone())
.unwrap_err();
assert!(

View File

@ -1,22 +1,10 @@
use wasm_bindgen_test::*;
use wgpu_test::{fail, gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
use wgpu_test::{fail, initialize_test, FailureCase, TestParameters};
#[test]
#[wasm_bindgen_test]
fn device_initialization() {
initialize_test(TestParameters::default(), |_ctx| {
// intentionally empty
})
}
#[test]
fn device_mismatch() {
initialize_test(
// https://github.com/gfx-rs/wgpu/issues/3927
TestParameters::default().expect_fail(FailureCase::always()),
|ctx| {
// Create a bind group uisng a lyaout from another device. This should be a validation
#[gpu_test]
static CROSS_DEVICE_BIND_GROUP_USAGE: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::always()))
.run_sync(|ctx| {
// Create a bind group uisng a layout from another device. This should be a validation
// error but currently crashes.
let (device2, _) =
pollster::block_on(ctx.adapter.request_device(&Default::default(), None)).unwrap();
@ -36,9 +24,7 @@ fn device_mismatch() {
}
ctx.device.poll(wgpu::Maintain::Poll);
},
);
}
});
#[cfg(not(all(target_arch = "wasm32", not(target_os = "emscripten"))))]
#[test]
@ -53,7 +39,7 @@ fn request_device_error_on_native() {
async fn request_device_error_message() {
// Not using initialize_test() because that doesn't let us catch the error
// nor .await anything
let (adapter, _surface_guard) = wgpu_test::initialize_adapter();
let (adapter, _surface_guard) = wgpu_test::initialize_adapter(0).await;
let device_error = adapter
.request_device(
@ -91,8 +77,6 @@ async fn request_device_error_message() {
assert!(device_error.contains(expected), "{device_error}");
}
#[test]
fn device_destroy_then_more() {
// This is a test of device behavior after device.destroy. Specifically, all operations
// should trigger errors since the device is lost.
//
@ -103,11 +87,14 @@ fn device_destroy_then_more() {
// open even after they return an error. For now, this test is skipped on DX12.
//
// The DX12 issue may be related to https://github.com/gfx-rs/wgpu/issues/3193.
initialize_test(
#[gpu_test]
static DEVICE_DESTROY_THEN_MORE: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE)
.skip(FailureCase::backend(wgpu::Backends::DX12)),
|ctx| {
.expect_fail(FailureCase::backend(wgpu::Backends::DX12)),
)
.run_sync(|ctx| {
// Create some resources on the device that we will attempt to use *after* losing
// the device.
@ -461,6 +448,4 @@ fn device_destroy_then_more() {
fail(&ctx.device, || {
buffer_for_unmap.unmap();
});
},
)
}
});

View File

@ -1,20 +1,13 @@
use wasm_bindgen_test::*;
use wgpu::RenderPassDescriptor;
use wgpu_test::{fail, initialize_test, FailureCase, TestParameters};
use wgpu_test::{fail, gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
#[test]
#[wasm_bindgen_test]
fn drop_encoder() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static DROP_ENCODER: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let encoder = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
drop(encoder);
})
}
});
#[test]
fn drop_encoder_after_error() {
// This test crashes on DX12 with the exception:
//
// ID3D12CommandAllocator::Reset: The command allocator cannot be reset because a
@ -22,9 +15,10 @@ fn drop_encoder_after_error() {
// #543: COMMAND_ALLOCATOR_CANNOT_RESET]
//
// For now, we mark the test as failing on DX12.
let parameters =
TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12));
initialize_test(parameters, |ctx| {
#[gpu_test]
static DROP_ENCODER_AFTER_ERROR: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12)))
.run_sync(|ctx| {
let mut encoder = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
@ -45,7 +39,7 @@ fn drop_encoder_after_error() {
});
let target_view = target_tex.create_view(&wgpu::TextureViewDescriptor::default());
let mut renderpass = encoder.begin_render_pass(&RenderPassDescriptor {
let mut renderpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("renderpass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
ops: wgpu::Operations::default(),
@ -67,5 +61,4 @@ fn drop_encoder_after_error() {
// a CommandEncoder which errored out when processing a command.
// The encoder is still open!
drop(encoder);
})
}
});

View File

@ -3,7 +3,7 @@ use std::{fs, path::PathBuf};
/// Runs through all example shaders and ensures they are valid wgsl.
#[test]
fn parse_example_wgsl() {
pub fn parse_example_wgsl() {
let read_dir = match PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("examples")
.read_dir()

View File

@ -1,12 +1,12 @@
#![cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
use wasm_bindgen::JsCast;
use wasm_bindgen_test::*;
use wgpu::ExternalImageSource;
use wgpu_test::{fail_if, initialize_test, TestParameters};
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration};
#[wasm_bindgen_test]
async fn image_bitmap_import() {
#[gpu_test]
static IMAGE_BITMAP_IMPORT: GpuTestConfiguration =
GpuTestConfiguration::new().run_async(|ctx| async move {
let image_encoded = include_bytes!("3x3_colors.png");
// Create an array-of-arrays for Blob's constructor
@ -132,7 +132,6 @@ async fn image_bitmap_import() {
TestCase::SecondSliceCopy,
];
initialize_test(TestParameters::default(), |ctx| {
for source in sources {
for case in cases {
// Copy the data, so we can modify it for tests
@ -346,5 +345,4 @@ async fn image_bitmap_import() {
}
}
}
})
}
});

View File

@ -1,5 +1,3 @@
use wasm_bindgen_test::wasm_bindgen_test_configure;
mod regression {
mod issue_3457;
mod issue_4024;
@ -15,7 +13,6 @@ mod clear_texture;
mod create_surface_error;
mod device;
mod encoder;
mod example_wgsl;
mod external_texture;
mod instance;
mod occlusion_query;
@ -36,4 +33,4 @@ mod vertex_indices;
mod write_texture;
mod zero_init_texture_after_discard;
wasm_bindgen_test_configure!(run_in_browser);
wgpu_test::gpu_test_main!();

View File

@ -1,38 +1,4 @@
use wasm_bindgen_test::*;
use wgpu_test::{gpu_test, GpuTestConfiguration};
#[test]
#[wasm_bindgen_test]
fn initialize() {
let _ = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all),
flags: wgpu::InstanceFlags::debugging().with_env(),
dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(),
gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(),
});
}
fn request_adapter_inner(power: wgt::PowerPreference) {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all),
flags: wgpu::InstanceFlags::debugging().with_env(),
dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(),
gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(),
});
let _adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: power,
force_fallback_adapter: false,
compatible_surface: None,
}))
.unwrap();
}
#[test]
fn request_adapter_low_power() {
request_adapter_inner(wgt::PowerPreference::LowPower);
}
#[test]
fn request_adapter_high_power() {
request_adapter_inner(wgt::PowerPreference::HighPerformance);
}
#[gpu_test]
static INITIALIZE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|_ctx| {});

View File

@ -1,9 +1,10 @@
use std::borrow::Cow;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
#[test]
fn occlusion_query() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static OCCLUSION_QUERY: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::webgl2()))
.run_sync(|ctx| {
// Create depth texture
let depth_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("Depth texture"),
@ -124,5 +125,4 @@ fn occlusion_query() {
assert_ne!(query_data[0], 0);
assert_ne!(query_data[1], 0);
assert_eq!(query_data[2], 0);
})
}
});

View File

@ -1,12 +1,10 @@
use std::{borrow::Cow, num::NonZeroU32};
use wasm_bindgen_test::*;
use wgpu_test::{image::ReadbackBuffers, initialize_test, TestParameters};
use wgpu_test::{gpu_test, image::ReadbackBuffers, GpuTestConfiguration, TestParameters};
#[test]
#[wasm_bindgen_test]
fn partially_bounded_array() {
initialize_test(
#[gpu_test]
static PARTIALLY_BOUNDED_ARRAY: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(
wgpu::Features::TEXTURE_BINDING_ARRAY
@ -15,7 +13,8 @@ fn partially_bounded_array() {
| wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
)
.limits(wgpu::Limits::downlevel_defaults()),
|ctx| {
)
.run_sync(|ctx| {
let device = &ctx.device;
let texture_extent = wgpu::Extent3d {
@ -39,8 +38,7 @@ fn partially_bounded_array() {
let texture_view = storage_texture.create_view(&wgpu::TextureViewDescriptor::default());
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
@ -66,8 +64,7 @@ fn partially_bounded_array() {
push_constant_ranges: &[],
});
let compute_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
module: &cs_module,
@ -105,6 +102,4 @@ fn partially_bounded_array() {
.check_buffer_contents(device, bytemuck::bytes_of(&[4.0f32, 3.0, 2.0, 1.0])),
"texture storage values are incorrect!"
);
},
)
}
});

View File

@ -1,17 +1,16 @@
use wasm_bindgen_test::*;
use wgpu_test::{fail, initialize_test, FailureCase, TestParameters};
use wgpu_test::{fail, gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
#[test]
#[wasm_bindgen_test]
fn pipeline_default_layout_bad_module() {
// Create an invalid shader and a compute pipeline that uses it
// with a default bindgroup layout, and then ask for that layout.
// Validation should fail, but wgpu should not panic.
let parameters = TestParameters::default()
.skip(FailureCase::webgl2())
#[gpu_test]
static PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
// https://github.com/gfx-rs/wgpu/issues/4167
.expect_fail(FailureCase::always());
initialize_test(parameters, |ctx| {
.expect_fail(FailureCase::always()),
)
.run_sync(|ctx| {
ctx.device.push_error_scope(wgpu::ErrorFilter::Validation);
fail(&ctx.device, || {
@ -34,4 +33,3 @@ fn pipeline_default_layout_bad_module() {
pipeline.get_bind_group_layout(0);
});
});
}

View File

@ -1,15 +1,23 @@
use std::num::NonZeroU64;
use wgpu::{
BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry,
BindingResource, BindingType, BufferBindingType, BufferDescriptor, BufferUsages, CommandBuffer,
CommandEncoderDescriptor, ComputePassDescriptor, Maintain, ShaderStages,
BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor,
BindGroupLayoutEntry, BindingResource, BindingType, Buffer, BufferBindingType,
BufferDescriptor, BufferUsages, CommandBuffer, CommandEncoderDescriptor, ComputePassDescriptor,
Maintain, ShaderStages,
};
use wasm_bindgen_test::*;
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestingContext};
fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer {
struct DummyWorkData {
_buffer: Buffer,
_bgl: BindGroupLayout,
_bg: BindGroup,
cmd_buf: CommandBuffer,
}
impl DummyWorkData {
fn new(ctx: &TestingContext) -> Self {
let buffer = ctx.device.create_buffer(&BufferDescriptor {
label: None,
size: 16,
@ -50,81 +58,58 @@ fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer {
cpass.set_bind_group(0, &bind_group, &[]);
drop(cpass);
cmd_buf.finish()
Self {
_buffer: buffer,
_bgl: bind_group_layout,
_bg: bind_group,
cmd_buf: cmd_buf.finish(),
}
}
}
#[test]
#[wasm_bindgen_test]
fn wait() {
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
#[gpu_test]
static WAIT: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let data = DummyWorkData::new(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.queue.submit(Some(data.cmd_buf));
ctx.device.poll(Maintain::Wait);
},
)
}
});
#[test]
#[wasm_bindgen_test]
fn double_wait() {
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
#[gpu_test]
static DOUBLE_WAIT: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let data = DummyWorkData::new(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.queue.submit(Some(data.cmd_buf));
ctx.device.poll(Maintain::Wait);
ctx.device.poll(Maintain::Wait);
},
)
}
});
#[test]
#[wasm_bindgen_test]
fn wait_on_submission() {
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
#[gpu_test]
static WAIT_ON_SUBMISSION: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let data = DummyWorkData::new(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
let index = ctx.queue.submit(Some(data.cmd_buf));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
},
)
}
});
#[test]
#[wasm_bindgen_test]
fn double_wait_on_submission() {
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
#[gpu_test]
static DOUBLE_WAIT_ON_SUBMISSION: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
let data = DummyWorkData::new(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
let index = ctx.queue.submit(Some(data.cmd_buf));
ctx.device
.poll(Maintain::WaitForSubmissionIndex(index.clone()));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
},
)
}
});
#[test]
#[wasm_bindgen_test]
fn wait_out_of_order() {
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf1 = generate_dummy_work(&ctx);
let cmd_buf2 = generate_dummy_work(&ctx);
#[gpu_test]
static WAIT_OUT_OF_ORDER: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let data1 = DummyWorkData::new(&ctx);
let data2 = DummyWorkData::new(&ctx);
let index1 = ctx.queue.submit(Some(cmd_buf1));
let index2 = ctx.queue.submit(Some(cmd_buf2));
let index1 = ctx.queue.submit(Some(data1.cmd_buf));
let index2 = ctx.queue.submit(Some(data2.cmd_buf));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index2));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index1));
},
)
}
});

View File

@ -1,11 +1,13 @@
use wgpu_test::{initialize_test, FailureCase, TestParameters};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
#[test]
fn drop_failed_timestamp_query_set() {
let parameters = TestParameters::default()
#[gpu_test]
static DROP_FAILED_TIMESTAMP_QUERY_SET: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
// https://github.com/gfx-rs/wgpu/issues/4139
.expect_fail(FailureCase::always());
initialize_test(parameters, |ctx| {
.expect_fail(FailureCase::always()),
)
.run_sync(|ctx| {
// Enter an error scope, so the validation catch-all doesn't
// report the error too early.
ctx.device.push_error_scope(wgpu::ErrorFilter::Validation);
@ -23,4 +25,3 @@ fn drop_failed_timestamp_query_set() {
assert!(pollster::block_on(ctx.device.pop_error_scope()).is_some());
});
}

View File

@ -1,12 +1,10 @@
//! Tests for buffer copy validation.
use wasm_bindgen_test::*;
use wgpu_test::{fail, initialize_test, TestParameters};
use wgpu_test::{fail, gpu_test, GpuTestConfiguration};
#[test]
#[wasm_bindgen_test]
fn queue_write_texture_overflow() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static QUEUE_WRITE_TEXTURE_OVERFLOW: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
let texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
@ -47,4 +45,3 @@ fn queue_write_texture_overflow() {
);
});
});
}

View File

@ -1,6 +1,5 @@
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration};
use wasm_bindgen_test::wasm_bindgen_test;
use wgpu::*;
/// The core issue here was that we weren't properly disabling vertex attributes on GL
@ -15,10 +14,9 @@ use wgpu::*;
///
/// We use non-consecutive vertex attribute locations (0 and 5) in order to also test
/// that we unset the correct locations (see PR #3706).
#[wasm_bindgen_test]
#[test]
fn pass_reset_vertex_buffer() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static PASS_RESET_VERTEX_BUFFER: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
let module = ctx
.device
.create_shader_module(include_wgsl!("issue_3457.wgsl"));
@ -190,5 +188,4 @@ fn pass_reset_vertex_buffer() {
drop(single_rpass);
ctx.queue.submit(Some(encoder2.finish()));
})
}
});

View File

@ -1,9 +1,8 @@
use std::sync::Arc;
use parking_lot::Mutex;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration};
use wasm_bindgen_test::wasm_bindgen_test;
use wgpu::*;
/// The WebGPU specification has very specific requirements about the ordering of map_async
@ -13,10 +12,9 @@ use wgpu::*;
///
/// We previously immediately invoked on_submitted_work_done callbacks if there was no active submission
/// to add them to. This is incorrect, as we do not immediatley invoke map_async callbacks.
#[wasm_bindgen_test]
#[test]
fn queue_submitted_callback_ordering() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static QUEUE_SUBMITTED_CALLBACK_ORDERING: GpuTestConfiguration = GpuTestConfiguration::new()
.run_sync(|ctx| {
// Create a mappable buffer
let buffer = ctx.device.create_buffer(&BufferDescriptor {
label: Some("mappable buffer"),
@ -87,5 +85,4 @@ fn queue_submitted_callback_ordering() {
assert_eq!(ordering.value_read_map_async, Some(0));
// The queue submitted work done callback was invoked second.
assert_eq!(ordering.value_read_queue_submitted, Some(1));
})
}
});

View File

@ -1,7 +1,6 @@
use std::{num::NonZeroU64, ops::Range};
use wasm_bindgen_test::wasm_bindgen_test;
use wgpu_test::{initialize_test, TestParameters, TestingContext};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestingContext};
fn fill_test(ctx: &TestingContext, range: Range<u64>, size: u64) -> bool {
let gpu_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
@ -88,10 +87,9 @@ fn fill_test(ctx: &TestingContext, range: Range<u64>, size: u64) -> bool {
/// certain conditions. See https://github.com/gfx-rs/wgpu/issues/4122 for more information.
///
/// This test will fail on nvidia if the bug is not properly worked around.
#[wasm_bindgen_test]
#[test]
fn clear_buffer_bug() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static CLEAR_BUFFER_RANGE_RESPECTED: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
// This hits most of the cases in nvidia's clear buffer bug
let mut succeeded = true;
for power in 4..14 {
@ -107,4 +105,3 @@ fn clear_buffer_bug() {
}
assert!(succeeded);
});
}

View File

@ -1,11 +1,7 @@
use wasm_bindgen_test::*;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration};
/// Buffer's size and usage can be read back.
#[test]
#[wasm_bindgen_test]
fn buffer_size_and_usage() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static BUFFER_SIZE_AND_USAGE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: 1234,
@ -18,5 +14,4 @@ fn buffer_size_and_usage() {
buffer.usage(),
wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST
);
})
}
});

View File

@ -1,12 +1,9 @@
use wasm_bindgen_test::*;
use wgpu_test::{fail, initialize_test, valid, TestParameters};
use wgpu_test::{fail, gpu_test, valid, GpuTestConfiguration};
#[test]
#[wasm_bindgen_test]
fn bad_buffer() {
#[gpu_test]
static BAD_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
// Create a buffer with bad parameters and call a few methods.
// Validation should fail but there should be not panic.
initialize_test(TestParameters::default(), |ctx| {
let buffer = fail(&ctx.device, || {
ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: None,
@ -23,14 +20,9 @@ fn bad_buffer() {
valid(&ctx.device, || buffer.destroy());
valid(&ctx.device, || buffer.destroy());
});
}
#[test]
#[wasm_bindgen_test]
fn bad_texture() {
// Create a texture with bad parameters and call a few methods.
// Validation should fail but there should be not panic.
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static BAD_TEXTURE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let texture = fail(&ctx.device, || {
ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,
@ -54,4 +46,3 @@ fn bad_texture() {
valid(&ctx.device, || texture.destroy());
valid(&ctx.device, || texture.destroy());
});
}

View File

@ -1,4 +1,4 @@
use wgpu_test::{image, initialize_test, TestParameters, TestingContext};
use wgpu_test::{gpu_test, image, GpuTestConfiguration, TestingContext};
struct Rect {
x: u32,
@ -97,9 +97,8 @@ fn scissor_test_impl(ctx: &TestingContext, scissor_rect: Rect, expected_data: [u
assert!(readback_buffer.check_buffer_contents(&ctx.device, &expected_data));
}
#[test]
fn scissor_test_full_rect() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static SCISSOR_TEST_FULL_RECT: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
scissor_test_impl(
&ctx,
Rect {
@ -110,12 +109,11 @@ fn scissor_test_full_rect() {
},
[255; BUFFER_SIZE],
);
})
}
});
#[test]
fn scissor_test_empty_rect() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static SCISSOR_TEST_EMPTY_RECT: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
scissor_test_impl(
&ctx,
Rect {
@ -126,12 +124,11 @@ fn scissor_test_empty_rect() {
},
[0; BUFFER_SIZE],
);
})
}
});
#[test]
fn scissor_test_empty_rect_with_offset() {
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static SCISSOR_TEST_EMPTY_RECT_WITH_OFFSET: GpuTestConfiguration = GpuTestConfiguration::new()
.run_sync(|ctx| {
scissor_test_impl(
&ctx,
Rect {
@ -142,15 +139,15 @@ fn scissor_test_empty_rect_with_offset() {
},
[0; BUFFER_SIZE],
);
})
}
});
#[test]
fn scissor_test_custom_rect() {
#[gpu_test]
static SCISSOR_TEST_CUSTOM_RECT: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
let mut expected_result = [0; BUFFER_SIZE];
expected_result[((3 * BUFFER_SIZE) / 4)..][..BUFFER_SIZE / 4]
.copy_from_slice(&[255; BUFFER_SIZE / 4]);
initialize_test(TestParameters::default(), |ctx| {
scissor_test_impl(
&ctx,
Rect {
@ -161,5 +158,4 @@ fn scissor_test_custom_rect() {
},
expected_result,
);
})
}
});

View File

@ -15,9 +15,9 @@ use wgpu::{
use wgpu_test::TestingContext;
mod numeric_builtins;
mod struct_layout;
mod zero_init_workgroup_mem;
pub mod numeric_builtins;
pub mod struct_layout;
pub mod zero_init_workgroup_mem;
#[derive(Clone, Copy, PartialEq)]
enum InputStorageType {

View File

@ -1,8 +1,7 @@
use wasm_bindgen_test::*;
use wgpu::{DownlevelFlags, Limits};
use crate::shader::{shader_input_output_test, InputStorageType, ShaderTest};
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters};
fn create_numeric_builtin_test() -> Vec<ShaderTest> {
let mut tests = Vec::new();
@ -38,19 +37,17 @@ fn create_numeric_builtin_test() -> Vec<ShaderTest> {
tests
}
#[test]
#[wasm_bindgen_test]
fn numeric_builtins() {
initialize_test(
#[gpu_test]
static NUMERIC_BUILTINS: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
.limits(Limits::downlevel_defaults()),
|ctx| {
)
.run_sync(|ctx| {
shader_input_output_test(
ctx,
InputStorageType::Storage,
create_numeric_builtin_test(),
);
},
);
}
});

View File

@ -1,10 +1,9 @@
use std::fmt::Write;
use wasm_bindgen_test::*;
use wgpu::{Backends, DownlevelFlags, Features, Limits};
use crate::shader::{shader_input_output_test, InputStorageType, ShaderTest, MAX_BUFFER_SIZE};
use wgpu_test::{initialize_test, FailureCase, TestParameters};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
fn create_struct_layout_tests(storage_type: InputStorageType) -> Vec<ShaderTest> {
let input_values: Vec<_> = (0..(MAX_BUFFER_SIZE as u32 / 4)).collect();
@ -175,46 +174,41 @@ fn create_struct_layout_tests(storage_type: InputStorageType) -> Vec<ShaderTest>
tests
}
#[test]
#[wasm_bindgen_test]
fn uniform_input() {
initialize_test(
#[gpu_test]
static UNIFORM_INPUT: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
// Validation errors thrown by the SPIR-V validator https://github.com/gfx-rs/naga/issues/2034
.expect_fail(FailureCase::backend(wgpu::Backends::VULKAN))
.limits(Limits::downlevel_defaults()),
|ctx| {
)
.run_sync(|ctx| {
shader_input_output_test(
ctx,
InputStorageType::Uniform,
create_struct_layout_tests(InputStorageType::Uniform),
);
},
);
}
});
#[test]
#[wasm_bindgen_test]
fn storage_input() {
initialize_test(
#[gpu_test]
static STORAGE_INPUT: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
.limits(Limits::downlevel_defaults()),
|ctx| {
)
.run_sync(|ctx| {
shader_input_output_test(
ctx,
InputStorageType::Storage,
create_struct_layout_tests(InputStorageType::Storage),
);
},
);
}
});
#[test]
#[wasm_bindgen_test]
fn push_constant_input() {
initialize_test(
#[gpu_test]
static PUSH_CONSTANT_INPUT: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.features(Features::PUSH_CONSTANTS)
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
@ -223,12 +217,11 @@ fn push_constant_input() {
..Limits::downlevel_defaults()
})
.expect_fail(FailureCase::backend(Backends::GL)),
|ctx| {
)
.run_sync(|ctx| {
shader_input_output_test(
ctx,
InputStorageType::PushConstant,
create_struct_layout_tests(InputStorageType::PushConstant),
);
},
);
}
});

View File

@ -8,45 +8,24 @@ use wgpu::{
ShaderStages,
};
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
#[test]
fn zero_init_workgroup_mem() {
initialize_test(
#[gpu_test]
static ZERO_INIT_WORKGROUP_MEMORY: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
.limits(Limits::downlevel_defaults())
// remove both of these once we get to https://github.com/gfx-rs/wgpu/issues/3193 or
// https://github.com/gfx-rs/wgpu/issues/3160
// remove once we get to https://github.com/gfx-rs/wgpu/issues/3193
.skip(FailureCase {
backends: Some(Backends::DX12),
vendor: Some(5140),
adapter: Some("Microsoft Basic Render Driver"),
..FailureCase::default()
})
.skip(FailureCase::backend_adapter(
Backends::VULKAN,
"swiftshader",
)),
zero_init_workgroup_mem_impl,
);
}
const DISPATCH_SIZE: (u32, u32, u32) = (64, 64, 64);
const TOTAL_WORK_GROUPS: u32 = DISPATCH_SIZE.0 * DISPATCH_SIZE.1 * DISPATCH_SIZE.2;
/// nr of bytes we use in the shader
const SHADER_WORKGROUP_MEMORY: u32 = 512 * 4 + 4;
// assume we have this much workgroup memory (2GB)
const MAX_DEVICE_WORKGROUP_MEMORY: u32 = i32::MAX as u32;
const NR_OF_DISPATCHES: u32 =
MAX_DEVICE_WORKGROUP_MEMORY / (SHADER_WORKGROUP_MEMORY * TOTAL_WORK_GROUPS) + 1; // TODO: use div_ceil once stabilized
const OUTPUT_ARRAY_SIZE: u32 = TOTAL_WORK_GROUPS * NR_OF_DISPATCHES;
const BUFFER_SIZE: u64 = OUTPUT_ARRAY_SIZE as u64 * 4;
const BUFFER_BINDING_SIZE: u32 = TOTAL_WORK_GROUPS * 4;
fn zero_init_workgroup_mem_impl(ctx: TestingContext) {
.skip(FailureCase::backend_adapter(Backends::VULKAN, "llvmpipe")),
)
.run_sync(|ctx| {
let bgl = ctx
.device
.create_bind_group_layout(&BindGroupLayoutDescriptor {
@ -174,4 +153,18 @@ fn zero_init_workgroup_mem_impl(ctx: TestingContext) {
drop(mapped);
mapping_buffer.unmap();
}
});
const DISPATCH_SIZE: (u32, u32, u32) = (64, 64, 64);
const TOTAL_WORK_GROUPS: u32 = DISPATCH_SIZE.0 * DISPATCH_SIZE.1 * DISPATCH_SIZE.2;
/// nr of bytes we use in the shader
const SHADER_WORKGROUP_MEMORY: u32 = 512 * 4 + 4;
// assume we have this much workgroup memory (2GB)
const MAX_DEVICE_WORKGROUP_MEMORY: u32 = i32::MAX as u32;
const NR_OF_DISPATCHES: u32 =
MAX_DEVICE_WORKGROUP_MEMORY / (SHADER_WORKGROUP_MEMORY * TOTAL_WORK_GROUPS) + 1; // TODO: use div_ceil once stabilized
const OUTPUT_ARRAY_SIZE: u32 = TOTAL_WORK_GROUPS * NR_OF_DISPATCHES;
const BUFFER_SIZE: u64 = OUTPUT_ARRAY_SIZE as u64 * 4;
const BUFFER_BINDING_SIZE: u32 = TOTAL_WORK_GROUPS * 4;

View File

@ -1,7 +1,5 @@
use wasm_bindgen_test::*;
use wgpu::util::DeviceExt;
use wgpu_test::{image, initialize_test, TestParameters, TestingContext};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters, TestingContext};
//
// These tests render two triangles to a 2x2 render target. The first triangle
@ -37,9 +35,15 @@ use wgpu_test::{image, initialize_test, TestParameters, TestingContext};
// draw_indexed() draws the triangles in the opposite order, using index
// buffer [3, 4, 5, 0, 1, 2]. This also swaps the resulting pixel colors.
//
#[test]
#[wasm_bindgen_test]
fn draw() {
#[gpu_test]
static DRAW: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.test_features_limits()
.features(wgpu::Features::SHADER_PRIMITIVE_INDEX),
)
.run_sync(|ctx| {
//
// +-----+-----+
// |white|blue |
@ -50,21 +54,19 @@ fn draw() {
let expected = [
255, 255, 255, 255, 0, 0, 255, 255, 255, 0, 0, 255, 255, 255, 255, 255,
];
initialize_test(
TestParameters::default()
.test_features_limits()
.features(wgpu::Features::SHADER_PRIMITIVE_INDEX),
|ctx| {
pulling_common(ctx, &expected, |rpass| {
rpass.draw(0..6, 0..1);
})
},
);
}
});
#[test]
#[wasm_bindgen_test]
fn draw_indexed() {
#[gpu_test]
static DRAW_INDEXED: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.test_features_limits()
.features(wgpu::Features::SHADER_PRIMITIVE_INDEX),
)
.run_sync(|ctx| {
//
// +-----+-----+
// |white| red |
@ -75,17 +77,10 @@ fn draw_indexed() {
let expected = [
255, 255, 255, 255, 255, 0, 0, 255, 0, 0, 255, 255, 255, 255, 255, 255,
];
initialize_test(
TestParameters::default()
.test_features_limits()
.features(wgpu::Features::SHADER_PRIMITIVE_INDEX),
|ctx| {
pulling_common(ctx, &expected, |rpass| {
rpass.draw_indexed(0..6, 0, 0..1);
})
},
);
}
});
fn pulling_common(
ctx: TestingContext,
@ -169,7 +164,7 @@ fn pulling_common(
});
let color_view = color_texture.create_view(&wgpu::TextureViewDescriptor::default());
let readback_buffer = image::ReadbackBuffers::new(&ctx.device, &color_texture);
let readback_buffer = wgpu_test::image::ReadbackBuffers::new(&ctx.device, &color_texture);
let mut encoder = ctx
.device

View File

@ -1,18 +1,14 @@
use wgpu::{util::DeviceExt, DownlevelFlags, Limits, TextureFormat};
use wgpu_test::{
image::calc_difference, initialize_test, FailureCase, TestParameters, TestingContext,
};
use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters, TestingContext};
#[test]
fn reinterpret_srgb_ness() {
let parameters = TestParameters::default()
#[gpu_test]
static REINTERPRET_SRGB: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::VIEW_FORMATS)
.limits(Limits::downlevel_defaults())
.skip(FailureCase {
backends: Some(wgpu::Backends::GL),
..FailureCase::default()
});
initialize_test(parameters, |ctx| {
.limits(Limits::downlevel_defaults()),
)
.run_sync(|ctx| {
let unorm_data: [[u8; 4]; 4] = [
[180, 0, 0, 255],
[0, 84, 0, 127],
@ -58,7 +54,6 @@ fn reinterpret_srgb_ness() {
&unorm_data,
);
});
}
fn reinterpret(
ctx: &TestingContext,
@ -193,10 +188,10 @@ fn reinterpret(
let expect = expect_data[(h * size.width + w) as usize];
let tolerance = tolerance_data[(h * size.width + w) as usize];
let index = (w * 4 + offset) as usize;
if calc_difference(expect[0], data[index]) > tolerance[0]
|| calc_difference(expect[1], data[index + 1]) > tolerance[1]
|| calc_difference(expect[2], data[index + 2]) > tolerance[2]
|| calc_difference(expect[3], data[index + 3]) > tolerance[3]
if expect[0].abs_diff(data[index]) > tolerance[0]
|| expect[1].abs_diff(data[index + 1]) > tolerance[1]
|| expect[2].abs_diff(data[index + 2]) > tolerance[2]
|| expect[3].abs_diff(data[index + 3]) > tolerance[3]
{
panic!(
"Reinterpret {:?} as {:?} mismatch! expect {:?} get [{}, {}, {}, {}]",

View File

@ -1,15 +1,10 @@
//! Tests for texture copy bounds checks.
use wasm_bindgen_test::*;
use wgpu_test::{fail_if, initialize_test, TestParameters};
use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration};
#[test]
#[wasm_bindgen_test]
fn bad_copy_origin() {
fn try_origin(origin: wgpu::Origin3d, size: wgpu::Extent3d, should_panic: bool) {
let parameters = TestParameters::default();
initialize_test(parameters, |ctx| {
#[gpu_test]
static BAD_COPY_ORIGIN_TEST: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let try_origin = |origin, size, should_panic| {
let texture = ctx.device.create_texture(&TEXTURE_DESCRIPTOR);
let data = vec![255; BUFFER_SIZE as usize];
@ -26,8 +21,7 @@ fn bad_copy_origin() {
size,
)
});
});
}
};
try_origin(wgpu::Origin3d { x: 0, y: 0, z: 0 }, TEXTURE_SIZE, false);
try_origin(wgpu::Origin3d { x: 1, y: 0, z: 0 }, TEXTURE_SIZE, true);
@ -86,7 +80,7 @@ fn bad_copy_origin() {
},
true,
);
}
});
const TEXTURE_SIZE: wgpu::Extent3d = wgpu::Extent3d {
width: 64,

View File

@ -1,10 +1,7 @@
use wgpu_test::{fail, initialize_test, TestParameters};
use wgpu_test::{fail, gpu_test, GpuTestConfiguration};
#[test]
fn copy_overflow_z() {
// A simple crash test exercising validation that used to happen a bit too
// late, letting an integer overflow slip through.
initialize_test(TestParameters::default(), |ctx| {
#[gpu_test]
static COPY_OVERFLOW_Z: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(|ctx| {
let mut encoder = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
@ -65,5 +62,4 @@ fn copy_overflow_z() {
);
ctx.queue.submit(Some(encoder.finish()));
});
})
}
});

View File

@ -1,9 +1,8 @@
use std::num::NonZeroU64;
use wasm_bindgen_test::*;
use wgpu::util::DeviceExt;
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters, TestingContext};
fn pulling_common(
ctx: TestingContext,
@ -134,54 +133,48 @@ fn pulling_common(
assert_eq!(data, expected);
}
#[test]
#[wasm_bindgen_test]
fn draw() {
initialize_test(TestParameters::default().test_features_limits(), |ctx| {
#[gpu_test]
static DRAW: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().test_features_limits())
.run_sync(|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..6, 0..1);
})
})
}
});
#[test]
#[wasm_bindgen_test]
fn draw_vertex_offset() {
initialize_test(
TestParameters::default()
.test_features_limits()
.expect_fail(FailureCase::backend(wgpu::Backends::DX11)),
|ctx| {
#[gpu_test]
static DRAW_VERTEX: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().test_features_limits())
.run_sync(|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..1);
cmb.draw(3..6, 0..1);
})
},
)
}
});
#[test]
#[wasm_bindgen_test]
fn draw_instanced() {
initialize_test(TestParameters::default().test_features_limits(), |ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..2);
})
})
}
#[test]
#[wasm_bindgen_test]
fn draw_instanced_offset() {
initialize_test(
#[gpu_test]
static DRAW_INSTANCED: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.test_features_limits()
.expect_fail(FailureCase::backend(wgpu::Backends::DX11)),
|ctx| {
)
.run_sync(|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..2);
})
});
#[gpu_test]
static DRAW_INSTANCED_OFFSET: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.test_features_limits()
.expect_fail(FailureCase::backend(wgpu::Backends::DX11)),
)
.run_sync(|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..1);
cmb.draw(0..3, 1..2);
})
},
)
}
});

View File

@ -1,16 +1,13 @@
//! Tests for texture copy
use wgpu_test::{initialize_test, FailureCase, TestParameters};
use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters};
use wasm_bindgen_test::*;
#[test]
#[wasm_bindgen_test]
fn write_texture_subset_2d() {
#[gpu_test]
static WRITE_TEXTURE_SUBSET_2D: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12)))
.run_sync(|ctx| {
let size = 256;
let parameters =
TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12));
initialize_test(parameters, |ctx| {
let tex = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,
dimension: wgpu::TextureDimension::D2,
@ -98,15 +95,12 @@ fn write_texture_subset_2d() {
assert_eq!(*byte, 0);
}
});
}
#[test]
#[wasm_bindgen_test]
fn write_texture_subset_3d() {
#[gpu_test]
static WRITE_TEXTURE_SUBSET_3D: GpuTestConfiguration =
GpuTestConfiguration::new().run_sync(|ctx| {
let size = 256;
let depth = 4;
let parameters = TestParameters::default();
initialize_test(parameters, |ctx| {
let tex = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,
dimension: wgpu::TextureDimension::D3,
@ -194,4 +188,3 @@ fn write_texture_subset_3d() {
assert_eq!(*byte, 0);
}
});
}

View File

@ -1,16 +1,15 @@
use wasm_bindgen_test::*;
use wgpu::*;
use wgpu_test::{
image::ReadbackBuffers, initialize_test, FailureCase, TestParameters, TestingContext,
gpu_test, image::ReadbackBuffers, FailureCase, GpuTestConfiguration, TestParameters,
TestingContext,
};
// Checks if discarding a color target resets its init state, causing a zero read of this texture when copied in after submit of the encoder.
#[test]
#[wasm_bindgen_test]
fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_after_submit() {
initialize_test(
TestParameters::default().skip(FailureCase::webgl2()),
|mut ctx| {
#[gpu_test]
static DISCARDING_COLOR_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_AFTER_SUBMIT:
GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::webgl2()))
.run_sync(|mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
@ -21,17 +20,13 @@ fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_after
case.submit_command_encoder();
case.assert_buffers_are_zero();
},
);
}
});
// Checks if discarding a color target resets its init state, causing a zero read of this texture when copied in the same encoder to a buffer.
#[test]
#[wasm_bindgen_test]
fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_in_same_encoder() {
initialize_test(
TestParameters::default().skip(FailureCase::webgl2()),
|mut ctx| {
#[gpu_test]
static DISCARDING_COLOR_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_IN_SAME_ENCODER:
GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(TestParameters::default().expect_fail(FailureCase::webgl2()))
.run_sync(|mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
@ -39,20 +34,19 @@ fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_in_sa
case.submit_command_encoder();
case.assert_buffers_are_zero();
},
);
}
});
#[test]
#[wasm_bindgen_test]
fn discarding_depth_target_resets_texture_init_state_check_visible_on_copy_in_same_encoder() {
initialize_test(
#[gpu_test]
static DISCARDING_DEPTH_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_IN_SAME_ENCODER:
GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(
DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES | DownlevelFlags::COMPUTE_SHADERS,
)
.limits(Limits::downlevel_defaults()),
|mut ctx| {
)
.run_sync(|mut ctx| {
for format in [
TextureFormat::Stencil8,
TextureFormat::Depth16Unorm,
@ -68,21 +62,28 @@ fn discarding_depth_target_resets_texture_init_state_check_visible_on_copy_in_sa
case.assert_buffers_are_zero();
}
},
);
}
});
#[test]
#[wasm_bindgen_test]
fn discarding_either_depth_or_stencil_aspect() {
initialize_test(
#[gpu_test]
static DISCARDING_EITHER_DEPTH_OR_STENCIL_ASPECT_TEST: GpuTestConfiguration =
GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(
DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES | DownlevelFlags::COMPUTE_SHADERS,
DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES
| DownlevelFlags::COMPUTE_SHADERS,
)
.limits(Limits::downlevel_defaults()),
|mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Depth24PlusStencil8);
)
.run_sync(|mut ctx| {
for format in [
TextureFormat::Stencil8,
TextureFormat::Depth16Unorm,
TextureFormat::Depth24Plus,
TextureFormat::Depth24PlusStencil8,
TextureFormat::Depth32Float,
] {
let mut case = TestCase::new(&mut ctx, format);
case.create_command_encoder();
case.discard_depth();
case.submit_command_encoder();
@ -96,9 +97,8 @@ fn discarding_either_depth_or_stencil_aspect() {
case.submit_command_encoder();
case.assert_buffers_are_zero();
},
);
}
});
struct TestCase<'ctx> {
ctx: &'ctx mut TestingContext,

View File

@ -80,7 +80,8 @@ impl Instance {
impl crate::Instance<Api> for Instance {
unsafe fn init(_desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
//TODO: enable `METAL_DEVICE_WRAPPER_TYPE` environment based on the flags?
// We do not enable metal validation based on the validation flags as it affects the entire
// process. Instead, we enable the validation inside the test harness itself in tests/src/native.rs.
Ok(Instance {
managed_metal_layer_delegate: surface::HalManagedMetalLayerDelegate::new(),
})

View File

@ -7,6 +7,9 @@ use wgpu::{
use crate::texture;
/// Report specifying the capabilities of the GPUs on the system.
///
/// Must be synchronized with the definition on tests/src/report.rs.
#[derive(Deserialize, Serialize)]
pub struct GpuReport {
pub devices: Vec<AdapterReport>,
@ -48,6 +51,9 @@ impl GpuReport {
}
}
/// A single report of the capabilities of an Adapter.
///
/// Must be synchronized with the definition on tests/src/report.rs.
#[derive(Deserialize, Serialize)]
pub struct AdapterReport {
pub info: AdapterInfo,

20
wgpu-macros/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "wgpu-macros"
version.workspace = true
authors.workspace = true
edition.workspace = true
description = "Macros for wgpu"
homepage.workspace = true
repository.workspace = true
keywords.workspace = true
license.workspace = true
exclude = ["Cargo.lock"]
publish = false
[lib]
proc-macro = true
[dependencies]
heck = "0.4"
quote = "1"
syn = { version = "2", features = ["full"] }

44
wgpu-macros/src/lib.rs Normal file
View File

@ -0,0 +1,44 @@
use heck::ToSnakeCase;
use proc_macro::TokenStream;
use quote::quote;
use syn::Ident;
/// Creates a test that will run on all gpus on a given system.
///
/// Apply this macro to a static variable with a type that can be converted to a `GpuTestConfiguration`.
#[proc_macro_attribute]
pub fn gpu_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
let input_static = syn::parse_macro_input!(item as syn::ItemStatic);
let expr = &input_static.expr;
let ident = &input_static.ident;
let ident_str = ident.to_string();
let ident_lower = ident_str.to_snake_case();
let register_test_name = Ident::new(&format!("{}_initializer", ident_lower), ident.span());
let test_name_webgl = Ident::new(&format!("{}_webgl", ident_lower), ident.span());
quote! {
#[cfg(not(target_arch = "wasm32"))]
#[::wgpu_test::ctor]
fn #register_test_name() {
struct S;
::wgpu_test::native::TEST_LIST.lock().push(
// Allow any type that can be converted to a GpuTestConfiguration
::wgpu_test::GpuTestConfiguration::from(#expr).name_from_init_function_typename::<S>(#ident_lower)
)
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen_test::wasm_bindgen_test]
async fn #test_name_webgl() {
struct S;
// Allow any type that can be converted to a GpuTestConfiguration
let test_config = ::wgpu_test::GpuTestConfiguration::from(#expr).name_from_init_function_typename::<S>(#ident_lower);
::wgpu_test::execute_test(test_config, None, 0).await;
}
}
.into()
}

View File

@ -95,6 +95,8 @@ optional = true
[dependencies]
arrayvec.workspace = true
cfg-if.workspace = true
flume.workspace = true
log.workspace = true
parking_lot.workspace = true
profiling.workspace = true
@ -102,7 +104,6 @@ raw-window-handle.workspace = true
serde = { workspace = true, features = ["derive"], optional = true }
smallvec.workspace = true
static_assertions.workspace = true
cfg-if.workspace = true
[dependencies.naga]
workspace = true

View File

@ -3,7 +3,7 @@ use crate::{
BufferViewMut, CommandEncoder, Device, MapMode,
};
use std::fmt;
use std::sync::{mpsc, Arc};
use std::sync::Arc;
struct Chunk {
buffer: Arc<Buffer>,
@ -36,9 +36,9 @@ pub struct StagingBelt {
/// into `active_chunks`.
free_chunks: Vec<Chunk>,
/// When closed chunks are mapped again, the map callback sends them here.
sender: mpsc::Sender<Chunk>,
sender: flume::Sender<Chunk>,
/// Free chunks are received here to be put on `self.free_chunks`.
receiver: mpsc::Receiver<Chunk>,
receiver: flume::Receiver<Chunk>,
}
impl StagingBelt {
@ -53,7 +53,7 @@ impl StagingBelt {
/// (per [`StagingBelt::finish()`]); and
/// * bigger is better, within these bounds.
pub fn new(chunk_size: BufferAddress) -> Self {
let (sender, receiver) = mpsc::channel();
let (sender, receiver) = flume::unbounded();
StagingBelt {
chunk_size,
active_chunks: Vec::new(),

16
xtask/Cargo.lock generated
View File

@ -626,6 +626,21 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "xshell"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "962c039b3a7b16cf4e9a4248397c6585c07547412e7d6a6e035389a802dcfe90"
dependencies = [
"xshell-macros",
]
[[package]]
name = "xshell-macros"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dbabb1cbd15a1d6d12d9ed6b35cc6777d4af87ab3ba155ea37215f20beab80c"
[[package]]
name = "xtask"
version = "0.1.0"
@ -635,4 +650,5 @@ dependencies = [
"env_logger",
"log",
"pico-args",
"xshell",
]

View File

@ -12,5 +12,6 @@ cargo-run-wasm = { version = "0.3.2", git = "https://github.com/ErichDonGubler/c
env_logger = "0.10.0"
log = "0.4.18"
pico-args = { version = "0.5.0", features = ["eq-separator", "short-space-opt", "combined-flags"] }
xshell = "0.2.3"
[workspace]

View File

@ -8,6 +8,8 @@ Usage: xtask <COMMAND>
Commands:
run-wasm
test
--llvm-cov Run tests with LLVM code coverage using the llvm-cov tool
Options:
-h, --help Print help
@ -40,6 +42,7 @@ impl Args {
pub(crate) enum Subcommand {
RunWasm { args: Arguments },
Test { args: Arguments },
}
impl Subcommand {
@ -50,6 +53,7 @@ impl Subcommand {
.context("no subcommand specified; see `--help` for more details")?;
match &*subcmd {
"run-wasm" => Ok(Self::RunWasm { args }),
"test" => Ok(Self::Test { args }),
other => {
bail!("unrecognized subcommand {other:?}; see `--help` for more details")
}

View File

@ -5,6 +5,7 @@ use cli::{Args, Subcommand};
use pico_args::Arguments;
mod cli;
mod test;
fn main() -> ExitCode {
env_logger::builder()
@ -29,7 +30,8 @@ fn run(args: Args) -> anyhow::Result<()> {
match subcommand {
Subcommand::RunWasm { mut args } => {
// Use top-level Cargo.toml instead of xtask/Cargo.toml by default
let manifest_path = args.value_from_str("--manifest-path")
let manifest_path = args
.value_from_str("--manifest-path")
.unwrap_or_else(|_| "../Cargo.toml".to_string());
let mut arg_vec = args.finish();
arg_vec.push("--manifest-path".into());
@ -44,5 +46,6 @@ fn run(args: Args) -> anyhow::Result<()> {
);
Ok(())
}
Subcommand::Test { args } => test::run_tests(args),
}
}

58
xtask/src/test.rs Normal file
View File

@ -0,0 +1,58 @@
use anyhow::Context;
use pico_args::Arguments;
pub fn run_tests(mut args: Arguments) -> anyhow::Result<()> {
let llvm_cov = args.contains("--llvm-cov");
let llvm_cov_flags: &[_] = if llvm_cov {
&["llvm-cov", "--no-cfg-coverage", "--no-report"]
} else {
&[]
};
let llvm_cov_nextest_flags: &[_] = if llvm_cov {
&["llvm-cov", "--no-cfg-coverage", "--no-report", "nextest"]
} else {
&["nextest", "run"]
};
let shell = xshell::Shell::new().context("Couldn't create xshell shell")?;
shell.change_dir(String::from(env!("CARGO_MANIFEST_DIR")) + "/..");
log::info!("Generating .gpuconfig file based on gpus on the system");
xshell::cmd!(
shell,
"cargo {llvm_cov_flags...} run --bin wgpu-info -- --json -o .gpuconfig"
)
.quiet()
.run()
.context("Failed to run wgpu-info to generate .gpuconfig")?;
let gpu_count = shell
.read_file(".gpuconfig")
.unwrap()
.lines()
.filter(|line| line.contains("name"))
.count();
log::info!(
"Found {} gpu{}",
gpu_count,
if gpu_count == 1 { "" } else { "s" }
);
log::info!("Running cargo tests");
xshell::cmd!(
shell,
"cargo {llvm_cov_nextest_flags...} --no-fail-fast --retries 2"
)
.args(args.finish())
.quiet()
.run()
.context("Tests failed")?;
log::info!("Finished tests");
Ok(())
}