mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-24 15:54:00 +00:00
Only Initialize a Single Backend in Tests (#6315)
* Only Initialize a Single Backend in Tests * Update tests/tests/create_surface_error.rs
This commit is contained in:
parent
23fa0ae6d0
commit
859dd8817e
@ -1,6 +1,8 @@
|
||||
use wgpu::{Adapter, Device, Instance, Queue};
|
||||
use wgt::{Backends, Features, Limits};
|
||||
|
||||
use crate::report::AdapterReport;
|
||||
|
||||
/// Initialize the logger for the test runner.
|
||||
pub fn init_logger() {
|
||||
// We don't actually care if it fails
|
||||
@ -11,7 +13,7 @@ pub fn init_logger() {
|
||||
}
|
||||
|
||||
/// Initialize a wgpu instance with the options from the environment.
|
||||
pub fn initialize_instance(force_fxc: bool) -> Instance {
|
||||
pub fn initialize_instance(backends: wgpu::Backends, force_fxc: bool) -> Instance {
|
||||
// We ignore `WGPU_BACKEND` for now, merely using test filtering to only run a single backend's tests.
|
||||
//
|
||||
// We can potentially work support back into the test runner in the future, but as the adapters are matched up
|
||||
@ -23,9 +25,9 @@ pub fn initialize_instance(force_fxc: bool) -> Instance {
|
||||
// To "disable" webgpu regardless, we do this by removing the webgpu backend whenever we see
|
||||
// the webgl feature.
|
||||
let backends = if cfg!(feature = "webgl") {
|
||||
Backends::all() - Backends::BROWSER_WEBGPU
|
||||
backends - wgpu::Backends::BROWSER_WEBGPU
|
||||
} else {
|
||||
Backends::all()
|
||||
backends
|
||||
};
|
||||
// Some tests need to be able to force demote to FXC, to specifically test workarounds for FXC
|
||||
// behavior.
|
||||
@ -43,12 +45,16 @@ pub fn initialize_instance(force_fxc: bool) -> Instance {
|
||||
})
|
||||
}
|
||||
|
||||
/// Initialize a wgpu adapter, taking the `n`th adapter from the instance.
|
||||
/// Initialize a wgpu adapter, using the given adapter report to match the adapter.
|
||||
pub async fn initialize_adapter(
|
||||
adapter_index: usize,
|
||||
adapter_report: Option<&AdapterReport>,
|
||||
force_fxc: bool,
|
||||
) -> (Instance, Adapter, Option<SurfaceGuard>) {
|
||||
let instance = initialize_instance(force_fxc);
|
||||
let backends = adapter_report
|
||||
.map(|report| Backends::from(report.info.backend))
|
||||
.unwrap_or_default();
|
||||
|
||||
let instance = initialize_instance(backends, force_fxc);
|
||||
#[allow(unused_variables)]
|
||||
let surface: Option<wgpu::Surface>;
|
||||
let surface_guard: Option<SurfaceGuard>;
|
||||
@ -82,13 +88,24 @@ pub async fn initialize_adapter(
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(not(target_arch = "wasm32"))] {
|
||||
let adapter_iter = instance.enumerate_adapters(wgpu::Backends::all());
|
||||
let adapter_count = adapter_iter.len();
|
||||
let adapter_iter = instance.enumerate_adapters(backends);
|
||||
let adapter = adapter_iter.into_iter()
|
||||
.nth(adapter_index)
|
||||
.unwrap_or_else(|| panic!("Tried to get index {adapter_index} adapter, but adapter list was only {adapter_count} long. Is .gpuconfig out of date?"));
|
||||
// If we have a report, we only want to match the adapter with the same info.
|
||||
//
|
||||
// If we don't have a report, we just take the first adapter.
|
||||
.find(|adapter| if let Some(adapter_report) = adapter_report {
|
||||
adapter.get_info() == adapter_report.info
|
||||
} else {
|
||||
true
|
||||
});
|
||||
let Some(adapter) = adapter else {
|
||||
panic!(
|
||||
"Could not find adapter with info {:#?} in {:#?}",
|
||||
adapter_report.map(|r| &r.info),
|
||||
instance.enumerate_adapters(backends).into_iter().map(|a| a.get_info()).collect::<Vec<_>>(),
|
||||
);
|
||||
};
|
||||
} else {
|
||||
assert_eq!(adapter_index, 0);
|
||||
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
|
||||
compatible_surface: surface.as_ref(),
|
||||
..Default::default()
|
||||
|
@ -19,15 +19,16 @@ struct NativeTest {
|
||||
}
|
||||
|
||||
impl NativeTest {
|
||||
/// Adapter index is only used for naming the test, the adapters are matched based on the adapter info.
|
||||
fn from_configuration(
|
||||
config: GpuTestConfiguration,
|
||||
adapter: &AdapterReport,
|
||||
adapter_report: AdapterReport,
|
||||
adapter_index: usize,
|
||||
) -> Self {
|
||||
let backend = adapter.info.backend;
|
||||
let device_name = &adapter.info.name;
|
||||
let backend = adapter_report.info.backend;
|
||||
let device_name = &adapter_report.info.name;
|
||||
|
||||
let test_info = TestInfo::from_configuration(&config, adapter);
|
||||
let test_info = TestInfo::from_configuration(&config, &adapter_report);
|
||||
|
||||
let full_name = format!(
|
||||
"[{running_msg}] [{backend:?}/{device_name}/{adapter_index}] {base_name}",
|
||||
@ -50,10 +51,12 @@ impl NativeTest {
|
||||
|
||||
let env_value = if metal_validation { "1" } else { "0" };
|
||||
std::env::set_var("MTL_DEBUG_LAYER", env_value);
|
||||
// Metal Shader Validation is entirely broken in the paravirtualized CI environment.
|
||||
// std::env::set_var("MTL_SHADER_VALIDATION", env_value);
|
||||
if std::env::var("GITHUB_ACTIONS").as_deref() != Ok("true") {
|
||||
// Metal Shader Validation is entirely broken in the paravirtualized CI environment.
|
||||
std::env::set_var("MTL_SHADER_VALIDATION", env_value);
|
||||
}
|
||||
|
||||
execute_test(config, Some(test_info), adapter_index).await;
|
||||
execute_test(Some(&adapter_report), config, Some(test_info)).await;
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -83,16 +86,24 @@ pub fn main() -> MainResult {
|
||||
&std::fs::read_to_string(format!("{}/../.gpuconfig", env!("CARGO_MANIFEST_DIR")))
|
||||
.context("Failed to read .gpuconfig, did you run the tests via `cargo xtask test`?")?
|
||||
};
|
||||
let report = GpuReport::from_json(config_text).context("Could not parse .gpuconfig JSON")?;
|
||||
let mut report =
|
||||
GpuReport::from_json(config_text).context("Could not parse .gpuconfig JSON")?;
|
||||
|
||||
// Filter out the adapters that are not part of WGPU_BACKEND.
|
||||
let wgpu_backends = wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all());
|
||||
report
|
||||
.devices
|
||||
.retain(|report| wgpu_backends.contains(wgpu::Backends::from(report.info.backend)));
|
||||
|
||||
let mut test_guard = TEST_LIST.lock();
|
||||
// Iterate through all the tests. Creating a test per adapter.
|
||||
execute_native(test_guard.drain(..).flat_map(|test| {
|
||||
report
|
||||
.devices
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(adapter_index, adapter)| {
|
||||
NativeTest::from_configuration(test.clone(), adapter, adapter_index)
|
||||
.map(move |(adapter_index, adapter_report)| {
|
||||
NativeTest::from_configuration(test.clone(), adapter_report.clone(), adapter_index)
|
||||
})
|
||||
}));
|
||||
|
||||
|
@ -25,8 +25,8 @@ impl GpuReport {
|
||||
/// A single report of the capabilities of an Adapter.
|
||||
///
|
||||
/// Must be synchronized with the definition on wgpu-info/src/report.rs.
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct AdapterReport {
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct AdapterReport {
|
||||
pub info: AdapterInfo,
|
||||
pub features: Features,
|
||||
pub limits: Limits,
|
||||
|
@ -24,14 +24,14 @@ pub struct TestingContext {
|
||||
pub queue: Queue,
|
||||
}
|
||||
|
||||
/// Execute the given test configuration with the given adapter index.
|
||||
/// Execute the given test configuration with the given adapter report.
|
||||
///
|
||||
/// If test_info is specified, will use the information whether to skip the test.
|
||||
/// If it is not, we'll create the test info from the adapter itself.
|
||||
pub async fn execute_test(
|
||||
adapter_report: Option<&AdapterReport>,
|
||||
config: GpuTestConfiguration,
|
||||
test_info: Option<TestInfo>,
|
||||
adapter_index: usize,
|
||||
) {
|
||||
// If we get information externally, skip based on that information before we do anything.
|
||||
if let Some(TestInfo { skip: true, .. }) = test_info {
|
||||
@ -43,7 +43,7 @@ pub async fn execute_test(
|
||||
let _test_guard = isolation::OneTestPerProcessGuard::new();
|
||||
|
||||
let (instance, adapter, _surface_guard) =
|
||||
initialize_adapter(adapter_index, config.params.force_fxc).await;
|
||||
initialize_adapter(adapter_report, config.params.force_fxc).await;
|
||||
|
||||
let adapter_info = adapter.get_info();
|
||||
let adapter_downlevel_capabilities = adapter.get_downlevel_capabilities();
|
||||
|
@ -6,7 +6,7 @@
|
||||
#[wasm_bindgen_test::wasm_bindgen_test]
|
||||
fn canvas_get_context_returned_null() {
|
||||
// Not using the normal testing infrastructure because that goes straight to creating the canvas for us.
|
||||
let instance = wgpu_test::initialize_instance(false);
|
||||
let instance = wgpu_test::initialize_instance(wgpu::Backends::all(), false);
|
||||
// Create canvas
|
||||
let canvas = wgpu_test::initialize_html_canvas();
|
||||
|
||||
|
@ -87,7 +87,7 @@ static REQUEST_DEVICE_ERROR_MESSAGE_NATIVE: GpuTestConfiguration =
|
||||
async fn request_device_error_message() {
|
||||
// Not using initialize_test() because that doesn't let us catch the error
|
||||
// nor .await anything
|
||||
let (_instance, adapter, _surface_guard) = wgpu_test::initialize_adapter(0, false).await;
|
||||
let (_instance, adapter, _surface_guard) = wgpu_test::initialize_adapter(None, false).await;
|
||||
|
||||
let device_error = adapter
|
||||
.request_device(
|
||||
|
@ -37,7 +37,7 @@ pub fn gpu_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
|
||||
// Allow any type that can be converted to a GpuTestConfiguration
|
||||
let test_config = ::wgpu_test::GpuTestConfiguration::from(#expr).name_from_init_function_typename::<S>(#ident_lower);
|
||||
|
||||
::wgpu_test::execute_test(test_config, None, 0).await;
|
||||
::wgpu_test::execute_test(None, test_config, None).await;
|
||||
}
|
||||
}
|
||||
.into()
|
||||
|
Loading…
Reference in New Issue
Block a user