Join threads in MULTITHREADED_COMPUTE example. (#5129)

Join all threads before returning from the test case, to ensure that
we don't return from `main` until all open `Device`s have been
dropped.

This avoids a race condition in glibc in which a thread calling
`dlclose` can unmap a shared library's code even while the main thread
is still running its finalization functions. (See #5084 for details.)
Joining all threads before returning from the test ensures that the
Vulkan loader has finished `dlclose`-ing the Vulkan validation layer
shared library before `main` returns.

Remove `skip` for this test on GL/llvmpipe. With this change, that has
not been observed to crash. Without it, the test crashes within ten
runs or so.

Fixes #5084.
Fixed #4285.
This commit is contained in:
Jim Blandy 2024-01-23 19:18:21 -08:00 committed by GitHub
parent 8d64915b3c
commit 6440af03a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 25 additions and 17 deletions

View File

@ -75,6 +75,10 @@ Bottom level categories:
- In Surface::configure and Surface::present, fix the current GL context not being unset when releasing the lock that guards access to making the context current. This was causing other threads to panic when trying to make the context current. By @Imberflur in [#5087](https://github.com/gfx-rs/wgpu/pull/5087).
#### Tests
- Fix intermittent crashes on Linux in the `multithreaded_compute` test. By @jimblandy in [#5129](https://github.com/gfx-rs/wgpu/pull/5129).
## v0.19.0 (2024-01-17)
This release includes:

View File

@ -57,9 +57,7 @@ static MULTITHREADED_COMPUTE: GpuTestConfiguration = GpuTestConfiguration::new()
TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.skip(FailureCase::adapter("V3D"))
// Segfaults on linux CI only https://github.com/gfx-rs/wgpu/issues/4285
.skip(FailureCase::backend_adapter(wgpu::Backends::GL, "llvmpipe")),
.skip(FailureCase::adapter("V3D")),
)
.run_sync(|ctx| {
use std::{sync::mpsc, sync::Arc, thread, time::Duration};
@ -69,25 +67,31 @@ static MULTITHREADED_COMPUTE: GpuTestConfiguration = GpuTestConfiguration::new()
let thread_count = 8;
let (tx, rx) = mpsc::channel();
for _ in 0..thread_count {
let tx = tx.clone();
let ctx = Arc::clone(&ctx);
thread::spawn(move || {
let input = &[100, 100, 100];
pollster::block_on(assert_execute_gpu(
&ctx.device,
&ctx.queue,
input,
&[25, 25, 25],
));
tx.send(true).unwrap();
});
}
let workers: Vec<_> = (0..thread_count)
.map(move |_| {
let tx = tx.clone();
let ctx = Arc::clone(&ctx);
thread::spawn(move || {
let input = &[100, 100, 100];
pollster::block_on(assert_execute_gpu(
&ctx.device,
&ctx.queue,
input,
&[25, 25, 25],
));
tx.send(true).unwrap();
})
})
.collect();
for _ in 0..thread_count {
rx.recv_timeout(Duration::from_secs(10))
.expect("A thread never completed.");
}
for worker in workers {
worker.join().unwrap();
}
});
async fn assert_execute_gpu(