Allow the "gles" backend to (theoretically) compile on no_std targets.

This entirely consists of conditionally replacing `Mutex` with `RefCell`,
then making sure that this doesn’t accidentally happen if we are also
exposing `Send + Sync`.
This commit is contained in:
Kevin Reid 2025-03-04 20:12:11 -08:00 committed by Andreas Reich
parent 67f1554e77
commit 289d0e5c5a
6 changed files with 62 additions and 25 deletions

View File

@ -104,6 +104,7 @@ gles = [
"naga/glsl-out",
"dep:arrayvec",
"dep:bytemuck",
"dep:cfg-if",
"dep:glow",
"dep:glutin_wgl_sys",
"dep:hashbrown",
@ -217,6 +218,7 @@ rustc-hash = { workspace = true, optional = true }
# Backend: GLES
bytemuck = { workspace = true, optional = true }
glow = { workspace = true, optional = true }
cfg-if = { workspace = true, optional = true }
########################
### Platform: Native ###

View File

@ -9,6 +9,18 @@ fn main() {
Emscripten: { all(target_os = "emscripten", gles) },
dx12: { all(target_os = "windows", feature = "dx12") },
gles: { all(feature = "gles") },
// Within the GL ES backend, use `std` and be Send + Sync only if we are using a target
// that, among the ones where the GL ES backend is supported, has `std`.
gles_with_std: { all(
feature = "gles",
any(
not(target_arch = "wasm32"),
// Accept wasm32-unknown-unknown, which uniquely has a stub `std`
all(target_vendor = "unknown", target_os = "unknown"),
// Accept wasm32-unknown-emscripten and similar, which has a real `std`
target_os = "emscripten"
)
) },
metal: { all(target_vendor = "apple", feature = "metal") },
vulkan: { all(not(target_arch = "wasm32"), feature = "vulkan") },
// ⚠️ Keep in sync with target.cfg() definition in Cargo.toml and cfg_alias in `wgpu` crate ⚠️

View File

@ -2,13 +2,12 @@ use alloc::{
borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
};
use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
use std::sync::Mutex;
use arrayvec::ArrayVec;
use glow::HasContext;
use naga::FastHashMap;
use super::{conv, PrivateCapabilities};
use super::{conv, lock, MaybeMutex, PrivateCapabilities};
use crate::auxil::map_naga_stage;
use crate::TlasInstance;
@ -526,8 +525,8 @@ impl crate::Device for super::Device {
target,
size: desc.size,
map_flags: 0,
data: Some(Arc::new(Mutex::new(vec![0; desc.size as usize]))),
offset_of_current_mapping: Arc::new(Mutex::new(0)),
data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
});
}
@ -614,7 +613,7 @@ impl crate::Device for super::Device {
}
let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
Some(Arc::new(Mutex::new(vec![0; desc.size as usize])))
Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
} else {
None
};
@ -627,7 +626,7 @@ impl crate::Device for super::Device {
size: desc.size,
map_flags,
data,
offset_of_current_mapping: Arc::new(Mutex::new(0)),
offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
})
}
@ -652,7 +651,7 @@ impl crate::Device for super::Device {
let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
let ptr = match buffer.raw {
None => {
let mut vec = buffer.data.as_ref().unwrap().lock().unwrap();
let mut vec = lock(buffer.data.as_ref().unwrap());
let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
slice.as_mut_ptr()
}
@ -660,12 +659,12 @@ impl crate::Device for super::Device {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
let ptr = if let Some(ref map_read_allocation) = buffer.data {
let mut guard = map_read_allocation.lock().unwrap();
let mut guard = lock(map_read_allocation);
let slice = guard.as_mut_slice();
unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
slice.as_mut_ptr()
} else {
*buffer.offset_of_current_mapping.lock().unwrap() = range.start;
*lock(&buffer.offset_of_current_mapping) = range.start;
unsafe {
gl.map_buffer_range(
buffer.target,
@ -691,7 +690,7 @@ impl crate::Device for super::Device {
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
unsafe { gl.unmap_buffer(buffer.target) };
unsafe { gl.bind_buffer(buffer.target, None) };
*buffer.offset_of_current_mapping.lock().unwrap() = 0;
*lock(&buffer.offset_of_current_mapping) = 0;
}
}
}
@ -704,8 +703,7 @@ impl crate::Device for super::Device {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
for range in ranges {
let offset_of_current_mapping =
*buffer.offset_of_current_mapping.lock().unwrap();
let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
unsafe {
gl.flush_mapped_buffer_range(
buffer.target,

View File

@ -346,8 +346,8 @@ pub struct Buffer {
target: BindTarget,
size: wgt::BufferAddress,
map_flags: u32,
data: Option<Arc<std::sync::Mutex<Vec<u8>>>>,
offset_of_current_mapping: Arc<std::sync::Mutex<wgt::BufferAddress>>,
data: Option<Arc<MaybeMutex<Vec<u8>>>>,
offset_of_current_mapping: Arc<MaybeMutex<wgt::BufferAddress>>,
}
#[cfg(send_sync)]
@ -1093,3 +1093,26 @@ fn gl_debug_message_callback(source: u32, gltype: u32, id: u32, severity: u32, m
crate::VALIDATION_CANARY.add(message.to_string());
}
}
// If we are using `std`, then use `Mutex` to provide `Send` and `Sync`
cfg_if::cfg_if! {
if #[cfg(gles_with_std)] {
type MaybeMutex<T> = std::sync::Mutex<T>;
fn lock<T>(mutex: &MaybeMutex<T>) -> std::sync::MutexGuard<'_, T> {
mutex.lock().unwrap()
}
} else {
// It should be impossible for any build configuration to trigger this error
// It is intended only as a guard against changes elsewhere causing the use of
// `RefCell` here to become unsound.
#[cfg(all(send_sync, not(feature = "fragile-send-sync-non-atomic-wasm")))]
compile_error!("cannot provide non-fragile Send+Sync without std");
type MaybeMutex<T> = core::cell::RefCell<T>;
fn lock<T>(mutex: &MaybeMutex<T>) -> core::cell::RefMut<'_, T> {
mutex.borrow_mut()
}
}
}

View File

@ -1,9 +1,12 @@
use super::{conv::is_layered_target, Command as C, PrivateCapabilities};
use alloc::sync::Arc;
use arrayvec::ArrayVec;
use alloc::vec;
use core::{slice, sync::atomic::Ordering};
use arrayvec::ArrayVec;
use glow::HasContext;
use super::{conv::is_layered_target, lock, Command as C, PrivateCapabilities};
const DEBUG_ID: u32 = 0;
fn extract_marker<'a>(data: &'a [u8], range: &core::ops::Range<u32>) -> &'a str {
@ -340,7 +343,7 @@ impl super::Queue {
}
}
None => {
dst.data.as_ref().unwrap().lock().unwrap().as_mut_slice()
lock(dst.data.as_ref().unwrap()).as_mut_slice()
[range.start as usize..range.end as usize]
.fill(0);
}
@ -382,7 +385,7 @@ impl super::Queue {
};
}
(Some(src), None) => {
let mut data = dst.data.as_ref().unwrap().lock().unwrap();
let mut data = lock(dst.data.as_ref().unwrap());
let dst_data = &mut data.as_mut_slice()
[copy.dst_offset as usize..copy.dst_offset as usize + size];
@ -397,7 +400,7 @@ impl super::Queue {
};
}
(None, Some(dst)) => {
let data = src.data.as_ref().unwrap().lock().unwrap();
let data = lock(src.data.as_ref().unwrap());
let src_data = &data.as_slice()
[copy.src_offset as usize..copy.src_offset as usize + size];
unsafe { gl.bind_buffer(copy_dst_target, Some(dst)) };
@ -738,7 +741,7 @@ impl super::Queue {
glow::PixelUnpackData::BufferOffset(copy.buffer_layout.offset as u32)
}
None => {
buffer_data = src.data.as_ref().unwrap().lock().unwrap();
buffer_data = lock(src.data.as_ref().unwrap());
let src_data =
&buffer_data.as_slice()[copy.buffer_layout.offset as usize..];
glow::PixelUnpackData::Slice(Some(src_data))
@ -802,7 +805,7 @@ impl super::Queue {
)
}
None => {
buffer_data = src.data.as_ref().unwrap().lock().unwrap();
buffer_data = lock(src.data.as_ref().unwrap());
let src_data = &buffer_data.as_slice()
[(offset as usize)..(offset + bytes_in_upload) as usize];
glow::CompressedPixelUnpackData::Slice(src_data)
@ -883,7 +886,7 @@ impl super::Queue {
glow::PixelPackData::BufferOffset(offset as u32)
}
None => {
buffer_data = dst.data.as_ref().unwrap().lock().unwrap();
buffer_data = lock(dst.data.as_ref().unwrap());
let dst_data = &mut buffer_data.as_mut_slice()[offset as usize..];
glow::PixelPackData::Slice(Some(dst_data))
}
@ -1054,7 +1057,7 @@ impl super::Queue {
};
}
None => {
let data = &mut dst.data.as_ref().unwrap().lock().unwrap();
let data = &mut lock(dst.data.as_ref().unwrap());
let len = query_data.len().min(data.len());
data[..len].copy_from_slice(&query_data[..len]);
}

View File

@ -241,8 +241,7 @@
extern crate alloc;
extern crate wgpu_types as wgt;
// Each of these backends needs `std` in some fashion; usually `std::thread` functions.
// TODO(https://github.com/gfx-rs/wgpu/issues/6826): gles-WebGL backend should be made no-std
#[cfg(any(dx12, gles, metal, vulkan))]
#[cfg(any(dx12, gles_with_std, metal, vulkan))]
#[macro_use]
extern crate std;