Rework wgpu-rs Context (#6619)

This commit is contained in:
Connor Fitzgerald 2024-12-04 11:00:14 -06:00 committed by GitHub
parent 41ad18f107
commit 4e139ed199
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
50 changed files with 6089 additions and 8647 deletions

60
Cargo.lock generated
View File

@ -1320,6 +1320,12 @@ dependencies = [
"bytemuck",
]
[[package]]
name = "glob"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "glow"
version = "0.16.0"
@ -1844,7 +1850,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets 0.48.5",
"windows-targets 0.52.6",
]
[[package]]
@ -2875,6 +2881,15 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
[[package]]
name = "serde_v8"
version = "0.181.0"
@ -3108,6 +3123,12 @@ dependencies = [
"syn",
]
[[package]]
name = "target-triple"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078"
[[package]]
name = "termcolor"
version = "1.4.1"
@ -3240,11 +3261,26 @@ dependencies = [
"syn",
]
[[package]]
name = "toml"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
@ -3253,6 +3289,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
@ -3324,7 +3362,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1"
dependencies = [
"cc",
"windows-targets 0.48.5",
"windows-targets 0.52.6",
]
[[package]]
name = "trybuild"
version = "1.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4"
dependencies = [
"glob",
"serde",
"serde_derive",
"serde_json",
"target-triple",
"termcolor",
"toml",
]
[[package]]
@ -3926,6 +3979,7 @@ dependencies = [
"serde",
"serde_json",
"strum",
"trybuild",
"wasm-bindgen",
"wasm-bindgen-futures",
"wasm-bindgen-test",
@ -3981,7 +4035,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.48.0",
"windows-sys 0.59.0",
]
[[package]]

View File

@ -124,6 +124,7 @@ serde_json = "1.0.133"
smallvec = "1"
static_assertions = "1.1.0"
strum = { version = "0.25.0", features = ["derive"] }
trybuild = "1"
tracy-client = "0.17"
thiserror = "1.0.69"
wgpu = { version = "23.0.1", path = "./wgpu", default-features = false }

View File

@ -16,6 +16,11 @@ name = "wgpu-test"
path = "tests/root.rs"
harness = false
[[test]]
name = "wgpu-compile-test"
path = "compile_tests/root.rs"
harness = true
[features]
webgl = ["wgpu/webgl"]
@ -27,6 +32,7 @@ bytemuck.workspace = true
cfg-if.workspace = true
ctor.workspace = true
futures-lite.workspace = true
glam.workspace = true
itertools.workspace = true
libtest-mimic.workspace = true
log.workspace = true
@ -37,10 +43,10 @@ profiling.workspace = true
serde_json.workspace = true
serde.workspace = true
strum = { workspace = true, features = ["derive"] }
wgpu-macros.workspace = true
trybuild.workspace = true
wgpu = { workspace = true, features = ["wgsl"] }
wgpu-macros.workspace = true
wgt = { workspace = true, features = ["serde"] }
glam.workspace = true
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
env_logger.workspace = true

View File

@ -0,0 +1,18 @@
// Test to ensure that ComputePass without forget_lifetime does not compile
// when the ComputePass is dropped before the CommandBuffer is finished.
//
// See #6145 for more info.
fn main() {
let instance = wgpu::Instance::new(Default::default());
let adapter = pollster::block_on(instance.request_adapter(&Default::default())).unwrap();
let (device, queue) =
pollster::block_on(adapter.request_device(&Default::default(), None)).unwrap();
let mut encoder = device.create_command_encoder(&Default::default());
let _compute_pass = encoder.begin_compute_pass(&Default::default());
// set up the compute pass...
let cmd_buffer = encoder.finish();
queue.submit([cmd_buffer]);
}

View File

@ -0,0 +1,13 @@
error[E0505]: cannot move out of `encoder` because it is borrowed
--> compile_tests/fail/cpass_lifetime.rs:16:22
|
12 | let mut encoder = device.create_command_encoder(&Default::default());
| ----------- binding `encoder` declared here
13 | let _compute_pass = encoder.begin_compute_pass(&Default::default());
| ------- borrow of `encoder` occurs here
...
16 | let cmd_buffer = encoder.finish();
| ^^^^^^^ move out of `encoder` occurs here
17 | queue.submit([cmd_buffer]);
18 | }
| - borrow might be used here, when `_compute_pass` is dropped and runs the destructor for type `wgpu::ComputePass<'_>`

View File

@ -0,0 +1,18 @@
// Test to ensure that ComputePass without forget_lifetime does not compile
// when the ComputePass is dropped before the CommandBuffer is finished.
//
// See #6145 for more info.
fn main() {
let instance = wgpu::Instance::new(Default::default());
let adapter = pollster::block_on(instance.request_adapter(&Default::default())).unwrap();
let (device, queue) =
pollster::block_on(adapter.request_device(&Default::default(), None)).unwrap();
let mut encoder = device.create_command_encoder(&Default::default());
let _render_pass = encoder.begin_render_pass(&Default::default());
// set up the render pass...
let cmd_buffer = encoder.finish();
queue.submit([cmd_buffer]);
}

View File

@ -0,0 +1,13 @@
error[E0505]: cannot move out of `encoder` because it is borrowed
--> compile_tests/fail/rpass_lifetime.rs:16:22
|
12 | let mut encoder = device.create_command_encoder(&Default::default());
| ----------- binding `encoder` declared here
13 | let _render_pass = encoder.begin_render_pass(&Default::default());
| ------- borrow of `encoder` occurs here
...
16 | let cmd_buffer = encoder.finish();
| ^^^^^^^ move out of `encoder` occurs here
17 | queue.submit([cmd_buffer]);
18 | }
| - borrow might be used here, when `_render_pass` is dropped and runs the destructor for type `wgpu::RenderPass<'_>`

View File

@ -0,0 +1,7 @@
// Tests that ensure that various constructs that should not compile do not compile.
#[test]
fn compile_fail() {
let t = trybuild::TestCases::new();
t.compile_fail("compile_tests/fail/*.rs");
}

View File

@ -274,7 +274,7 @@ static IMAGE_BITMAP_IMPORT: GpuTestConfiguration =
origin: src_origin,
flip_y: src_flip_y,
},
wgpu::CopyExternalImageDestInfo {
wgt::CopyExternalImageDestInfo {
texture: &texture,
mip_level: 0,
origin: dest_origin,

View File

@ -1,4 +1,4 @@
use std::sync::Arc;
use std::{fmt, sync::Arc};
use crate::{
hal_api::HalApi,
@ -85,6 +85,12 @@ impl Global {
}
}
impl fmt::Debug for Global {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Global").finish()
}
}
impl Drop for Global {
fn drop(&mut self) {
profiling::scope!("Global::drop");

View File

@ -35,7 +35,7 @@ serde = ["dep:serde"]
counters = []
[dependencies]
bitflags.workspace = true
bitflags = { workspace = true, features = ["serde"] }
serde = { workspace = true, features = ["derive"], optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]

View File

@ -4,7 +4,7 @@ fn main() {
webgl: { all(target_arch = "wasm32", not(target_os = "emscripten"), feature = "webgl") },
webgpu: { all(target_arch = "wasm32", not(target_os = "emscripten"), feature = "webgpu") },
Emscripten: { all(target_arch = "wasm32", target_os = "emscripten") },
wgpu_core: { any(native, webgl, emscripten) },
wgpu_core: { any(native, webgl, Emscripten) },
send_sync: { any(
not(target_arch = "wasm32"),
all(feature = "fragile-send-sync-non-atomic-wasm", not(target_feature = "atomics"))

View File

@ -1,6 +1,5 @@
use std::{future::Future, sync::Arc, thread};
use std::future::Future;
use crate::context::{DeviceRequest, DynContext};
use crate::*;
/// Handle to a physical graphics and/or compute device.
@ -16,19 +15,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUAdapter`](https://gpuweb.github.io/gpuweb/#gpu-adapter).
#[derive(Debug)]
pub struct Adapter {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchAdapter,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Adapter: Send, Sync);
impl Drop for Adapter {
fn drop(&mut self) {
if !thread::panicking() {
self.context.adapter_drop(self.data.as_ref())
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(Adapter => .inner);
pub use wgt::RequestAdapterOptions as RequestAdapterOptionsBase;
/// Additional information required when requesting an adapter.
@ -71,31 +63,11 @@ impl Adapter {
desc: &DeviceDescriptor<'_>,
trace_path: Option<&std::path::Path>,
) -> impl Future<Output = Result<(Device, Queue), RequestDeviceError>> + WasmNotSend {
let context = Arc::clone(&self.context);
let device = DynContext::adapter_request_device(
&*self.context,
self.data.as_ref(),
desc,
trace_path,
);
let device = self.inner.request_device(desc, trace_path);
async move {
device.await.map(
|DeviceRequest {
device_data,
queue_data,
}| {
(
Device {
context: Arc::clone(&context),
data: device_data,
},
Queue {
context,
data: queue_data,
},
)
},
)
device
.await
.map(|(device, queue)| (Device { inner: device }, Queue { inner: queue }))
}
}
@ -112,33 +84,21 @@ impl Adapter {
desc: &DeviceDescriptor<'_>,
trace_path: Option<&std::path::Path>,
) -> Result<(Device, Queue), RequestDeviceError> {
let context = Arc::clone(&self.context);
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
// Part of the safety requirements is that the device was generated from the same adapter.
// Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices.
.unwrap()
.create_device_from_hal(
crate::context::downcast_ref(self.data.as_ref()),
hal_device,
desc,
trace_path,
)
}
.map(|(device, queue)| {
(
Device {
context: Arc::clone(&context),
data: Box::new(device),
},
Queue {
context,
data: Box::new(queue),
},
)
})
let core_adapter = self.inner.as_core();
let (device, queue) = unsafe {
core_adapter
.context
.create_device_from_hal(core_adapter, hal_device, desc, trace_path)
}?;
Ok((
Device {
inner: device.into(),
},
Queue {
inner: queue.into(),
},
))
}
/// Apply a callback to this `Adapter`'s underlying backend adapter.
@ -165,16 +125,11 @@ impl Adapter {
&self,
hal_adapter_callback: F,
) -> R {
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
if let Some(adapter) = self.inner.as_core_opt() {
unsafe {
ctx.adapter_as_hal::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_adapter_callback,
)
adapter
.context
.adapter_as_hal::<A, F, R>(adapter, hal_adapter_callback)
}
} else {
hal_adapter_callback(None)
@ -183,31 +138,27 @@ impl Adapter {
/// Returns whether this adapter may present to the passed surface.
pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool {
DynContext::adapter_is_surface_supported(
&*self.context,
self.data.as_ref(),
surface.surface_data.as_ref(),
)
self.inner.is_surface_supported(&surface.inner)
}
/// The features which can be used to create devices on this adapter.
pub fn features(&self) -> Features {
DynContext::adapter_features(&*self.context, self.data.as_ref())
self.inner.features()
}
/// The best limits which can be used to create devices on this adapter.
pub fn limits(&self) -> Limits {
DynContext::adapter_limits(&*self.context, self.data.as_ref())
self.inner.limits()
}
/// Get info about the adapter itself.
pub fn get_info(&self) -> AdapterInfo {
DynContext::adapter_get_info(&*self.context, self.data.as_ref())
self.inner.get_info()
}
/// Get info about the adapter itself.
pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref())
self.inner.downlevel_capabilities()
}
/// Returns the features supported for a given texture format by this adapter.
@ -215,7 +166,7 @@ impl Adapter {
/// Note that the WebGPU spec further restricts the available usages/features.
/// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature.
pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format)
self.inner.get_texture_format_features(format)
}
/// Generates a timestamp using the clock used by the presentation engine.
@ -240,6 +191,6 @@ impl Adapter {
//
/// [Instant]: std::time::Instant
pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref())
self.inner.get_presentation_timestamp()
}
}

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a binding group.
@ -12,21 +10,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUBindGroup`](https://gpuweb.github.io/gpuweb/#gpubindgroup).
#[derive(Debug)]
pub struct BindGroup {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchBindGroup,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroup: Send, Sync);
super::impl_partialeq_eq_hash!(BindGroup);
impl Drop for BindGroup {
fn drop(&mut self) {
if !thread::panicking() {
self.context.bind_group_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(BindGroup => .inner);
/// Resource that can be bound to a pipeline.
///

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a binding group layout.
@ -15,21 +13,12 @@ use crate::*;
/// https://gpuweb.github.io/gpuweb/#gpubindgrouplayout).
#[derive(Debug)]
pub struct BindGroupLayout {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchBindGroupLayout,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
super::impl_partialeq_eq_hash!(BindGroupLayout);
impl Drop for BindGroupLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context.bind_group_layout_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(BindGroupLayout => .inner);
/// Describes a [`BindGroupLayout`].
///

View File

@ -1,7 +1,6 @@
use crate::context::{Context, DynContext};
use crate::{Buffer, Data, Label, C};
use crate::dispatch;
use crate::{Buffer, Label};
use std::sync::Arc;
use std::thread;
use wgt::WasmNotSendSync;
/// Descriptor for the size defining attributes of a triangle geometry, for a bottom level acceleration structure.
@ -88,22 +87,6 @@ impl TlasInstance {
}
}
pub(crate) struct DynContextTlasInstance<'a> {
pub(crate) blas: &'a Data,
pub(crate) transform: &'a [f32; 12],
pub(crate) custom_index: u32,
pub(crate) mask: u8,
}
/// Context version of [TlasInstance].
#[allow(dead_code)]
pub struct ContextTlasInstance<'a, T: Context> {
pub(crate) blas_data: &'a T::BlasData,
pub(crate) transform: &'a [f32; 12],
pub(crate) custom_index: u32,
pub(crate) mask: u8,
}
#[derive(Debug)]
/// Definition for a triangle geometry for a Bottom Level Acceleration Structure (BLAS).
///
@ -147,8 +130,7 @@ static_assertions::assert_impl_all!(BlasBuildEntry<'_>: WasmNotSendSync);
#[derive(Debug)]
pub(crate) struct BlasShared {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchBlas,
}
static_assertions::assert_impl_all!(BlasShared: WasmNotSendSync);
@ -166,6 +148,8 @@ pub struct Blas {
}
static_assertions::assert_impl_all!(Blas: WasmNotSendSync);
crate::cmp::impl_eq_ord_hash_proxy!(Blas => .shared.inner);
impl Blas {
/// Raw handle to the acceleration structure, used inside raw instance buffers.
pub fn handle(&self) -> Option<u64> {
@ -173,45 +157,17 @@ impl Blas {
}
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::blas_destroy(&*self.shared.context, self.shared.data.as_ref());
self.shared.inner.destroy();
}
}
impl Drop for BlasShared {
fn drop(&mut self) {
if !thread::panicking() {
self.context.blas_drop(self.data.as_ref());
}
}
}
pub(crate) struct DynContextBlasTriangleGeometry<'a> {
pub(crate) size: &'a BlasTriangleGeometrySizeDescriptor,
pub(crate) vertex_buffer: &'a Data,
pub(crate) index_buffer: Option<&'a Data>,
pub(crate) transform_buffer: Option<&'a Data>,
pub(crate) first_vertex: u32,
pub(crate) vertex_stride: wgt::BufferAddress,
pub(crate) index_buffer_offset: Option<wgt::BufferAddress>,
pub(crate) transform_buffer_offset: Option<wgt::BufferAddress>,
}
pub(crate) enum DynContextBlasGeometries<'a> {
TriangleGeometries(Box<dyn Iterator<Item = DynContextBlasTriangleGeometry<'a>> + 'a>),
}
pub(crate) struct DynContextBlasBuildEntry<'a> {
pub(crate) blas_data: &'a Data,
pub(crate) geometries: DynContextBlasGeometries<'a>,
}
/// Context version of [BlasTriangleGeometry].
#[allow(dead_code)]
pub struct ContextBlasTriangleGeometry<'a, T: Context> {
pub struct ContextBlasTriangleGeometry<'a> {
pub(crate) size: &'a BlasTriangleGeometrySizeDescriptor,
pub(crate) vertex_buffer: &'a T::BufferData,
pub(crate) index_buffer: Option<&'a T::BufferData>,
pub(crate) transform_buffer: Option<&'a T::BufferData>,
pub(crate) vertex_buffer: &'a dispatch::DispatchBuffer,
pub(crate) index_buffer: Option<&'a dispatch::DispatchBuffer>,
pub(crate) transform_buffer: Option<&'a dispatch::DispatchBuffer>,
pub(crate) first_vertex: u32,
pub(crate) vertex_stride: wgt::BufferAddress,
pub(crate) index_buffer_offset: Option<wgt::BufferAddress>,
@ -219,14 +175,14 @@ pub struct ContextBlasTriangleGeometry<'a, T: Context> {
}
/// Context version of [BlasGeometries].
pub enum ContextBlasGeometries<'a, T: Context> {
pub enum ContextBlasGeometries<'a> {
/// Triangle geometries.
TriangleGeometries(Box<dyn Iterator<Item = ContextBlasTriangleGeometry<'a, T>> + 'a>),
TriangleGeometries(Box<dyn Iterator<Item = ContextBlasTriangleGeometry<'a>> + 'a>),
}
/// Context version see [BlasBuildEntry].
#[allow(dead_code)]
pub struct ContextBlasBuildEntry<'a, T: Context> {
pub(crate) blas_data: &'a T::BlasData,
pub(crate) geometries: ContextBlasGeometries<'a, T>,
pub struct ContextBlasBuildEntry<'a> {
pub(crate) blas: &'a dispatch::DispatchBlas,
pub(crate) geometries: ContextBlasGeometries<'a>,
}

View File

@ -1,13 +1,10 @@
use std::{
error, fmt,
ops::{Bound, Deref, DerefMut, Range, RangeBounds},
sync::Arc,
thread,
};
use parking_lot::Mutex;
use crate::context::DynContext;
use crate::*;
/// Handle to a GPU-accessible buffer.
@ -172,8 +169,7 @@ use crate::*;
/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
#[derive(Debug)]
pub struct Buffer {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchBuffer,
pub(crate) map_context: Mutex<MapContext>,
pub(crate) size: wgt::BufferAddress,
pub(crate) usage: BufferUsages,
@ -182,7 +178,7 @@ pub struct Buffer {
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Buffer: Send, Sync);
super::impl_partialeq_eq_hash!(Buffer);
crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
impl Buffer {
/// Return the binding view of the entire buffer.
@ -210,16 +206,11 @@ impl Buffer {
&self,
hal_buffer_callback: F,
) -> R {
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
if let Some(buffer) = self.inner.as_core_opt() {
unsafe {
ctx.buffer_as_hal::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_buffer_callback,
)
buffer
.context
.buffer_as_hal::<A, F, R>(buffer, hal_buffer_callback)
}
} else {
hal_buffer_callback(None)
@ -253,12 +244,12 @@ impl Buffer {
/// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) {
self.map_context.lock().reset();
DynContext::buffer_unmap(&*self.context, self.data.as_ref());
self.inner.unmap();
}
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::buffer_destroy(&*self.context, self.data.as_ref());
self.inner.destroy();
}
/// Returns the length of the buffer allocation in bytes.
@ -347,13 +338,9 @@ impl<'a> BufferSlice<'a> {
};
mc.initial_range = self.offset..end;
DynContext::buffer_map_async(
&*self.buffer.context,
self.buffer.data.as_ref(),
mode,
self.offset..end,
Box::new(callback),
)
self.buffer
.inner
.map_async(mode, self.offset..end, Box::new(callback));
}
/// Gain read-only access to the bytes of a [mapped] [`Buffer`].
@ -372,12 +359,11 @@ impl<'a> BufferSlice<'a> {
/// [mapped]: Buffer#mapping-buffers
pub fn get_mapped_range(&self) -> BufferView<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
self.buffer.data.as_ref(),
self.offset..end,
);
BufferView { slice: *self, data }
let range = self.buffer.inner.get_mapped_range(self.offset..end);
BufferView {
slice: *self,
inner: range,
}
}
/// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable
@ -390,15 +376,11 @@ impl<'a> BufferSlice<'a> {
/// This is only available on WebGPU, on any other backends this will return `None`.
#[cfg(webgpu)]
pub fn get_mapped_range_as_array_buffer(&self) -> Option<js_sys::ArrayBuffer> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
self.buffer
.context
.as_any()
.downcast_ref::<crate::backend::ContextWebGpu>()
.map(|ctx| {
let buffer_data = crate::context::downcast_ref(self.buffer.data.as_ref());
let end = self.buffer.map_context.lock().add(self.offset, self.size);
ctx.buffer_get_mapped_range_as_array_buffer(buffer_data, self.offset..end)
})
.inner
.get_mapped_range_as_array_buffer(self.offset..end)
}
/// Gain write access to the bytes of a [mapped] [`Buffer`].
@ -417,14 +399,10 @@ impl<'a> BufferSlice<'a> {
/// [mapped]: Buffer#mapping-buffers
pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
self.buffer.data.as_ref(),
self.offset..end,
);
let range = self.buffer.inner.get_mapped_range(self.offset..end);
BufferViewMut {
slice: *self,
data,
inner: range,
readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
}
}
@ -577,7 +555,7 @@ static_assertions::assert_impl_all!(MapMode: Send, Sync);
#[derive(Debug)]
pub struct BufferView<'a> {
slice: BufferSlice<'a>,
data: Box<dyn crate::context::BufferMappedRange>,
inner: dispatch::DispatchBufferMappedRange,
}
impl std::ops::Deref for BufferView<'_> {
@ -585,14 +563,14 @@ impl std::ops::Deref for BufferView<'_> {
#[inline]
fn deref(&self) -> &[u8] {
self.data.slice()
self.inner.slice()
}
}
impl AsRef<[u8]> for BufferView<'_> {
#[inline]
fn as_ref(&self) -> &[u8] {
self.data.slice()
self.inner.slice()
}
}
@ -617,14 +595,14 @@ impl AsRef<[u8]> for BufferView<'_> {
#[derive(Debug)]
pub struct BufferViewMut<'a> {
slice: BufferSlice<'a>,
data: Box<dyn crate::context::BufferMappedRange>,
inner: dispatch::DispatchBufferMappedRange,
readable: bool,
}
impl AsMut<[u8]> for BufferViewMut<'_> {
#[inline]
fn as_mut(&mut self) -> &mut [u8] {
self.data.slice_mut()
self.inner.slice_mut()
}
}
@ -636,13 +614,13 @@ impl Deref for BufferViewMut<'_> {
log::warn!("Reading from a BufferViewMut is slow and not recommended.");
}
self.data.slice()
self.inner.slice()
}
}
impl DerefMut for BufferViewMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data.slice_mut()
self.inner.slice_mut()
}
}
@ -666,14 +644,6 @@ impl Drop for BufferViewMut<'_> {
}
}
impl Drop for Buffer {
fn drop(&mut self) {
if !thread::panicking() {
self.context.buffer_drop(self.data.as_ref());
}
}
}
fn check_buffer_bounds(
buffer_size: BufferAddress,
offset: BufferAddress,

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a command buffer on the GPU.
@ -11,18 +9,9 @@ use crate::*;
/// Corresponds to [WebGPU `GPUCommandBuffer`](https://gpuweb.github.io/gpuweb/#command-buffer).
#[derive(Debug)]
pub struct CommandBuffer {
pub(crate) context: Arc<C>,
pub(crate) data: Option<Box<Data>>,
pub(crate) inner: Option<dispatch::DispatchCommandBuffer>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
impl Drop for CommandBuffer {
fn drop(&mut self) {
if !thread::panicking() {
if let Some(data) = self.data.take() {
self.context.command_buffer_drop(data.as_ref());
}
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(CommandBuffer => .inner);

View File

@ -1,7 +1,12 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use std::ops::Range;
use crate::context::DynContext;
use crate::*;
use crate::{
api::{
blas::BlasBuildEntry,
tlas::{TlasBuildEntry, TlasPackage},
},
*,
};
/// Encodes a series of GPU operations.
///
@ -14,19 +19,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUCommandEncoder`](https://gpuweb.github.io/gpuweb/#command-encoder).
#[derive(Debug)]
pub struct CommandEncoder {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchCommandEncoder,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
impl Drop for CommandEncoder {
fn drop(&mut self) {
if !thread::panicking() {
self.context.command_encoder_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(CommandEncoder => .inner);
/// Describes a [`CommandEncoder`].
///
@ -55,31 +53,13 @@ pub type TexelCopyTextureInfo<'a> = TexelCopyTextureInfoBase<&'a Texture>;
#[cfg(send_sync)]
static_assertions::assert_impl_all!(TexelCopyTextureInfo<'_>: Send, Sync);
use crate::api::blas::{
BlasBuildEntry, BlasGeometries, BlasTriangleGeometry, DynContextBlasBuildEntry,
DynContextBlasGeometries, DynContextBlasTriangleGeometry, DynContextTlasInstance, TlasInstance,
};
use crate::api::tlas::{
DynContextTlasBuildEntry, DynContextTlasPackage, TlasBuildEntry, TlasPackage,
};
pub use wgt::CopyExternalImageDestInfo as CopyExternalImageDestInfoBase;
/// View of a texture which can be used to copy to a texture, including
/// color space and alpha premultiplication information.
///
/// Corresponds to [WebGPU `GPUCopyExternalImageDestInfo`](
/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexturetagged).
pub type CopyExternalImageDestInfo<'a> = CopyExternalImageDestInfoBase<&'a Texture>;
#[cfg(send_sync)]
static_assertions::assert_impl_all!(TexelCopyTextureInfo<'_>: Send, Sync);
impl CommandEncoder {
/// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution.
pub fn finish(mut self) -> CommandBuffer {
let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut());
let buffer = self.inner.finish();
CommandBuffer {
context: Arc::clone(&self.context),
data: Some(data),
inner: Some(buffer),
}
}
@ -97,14 +77,10 @@ impl CommandEncoder {
&'encoder mut self,
desc: &RenderPassDescriptor<'_>,
) -> RenderPass<'encoder> {
let data =
DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc);
let rpass = self.inner.begin_render_pass(desc);
RenderPass {
inner: RenderPassInner {
data,
context: self.context.clone(),
},
encoder_guard: PhantomData,
inner: rpass,
_encoder_guard: api::PhantomDrop::default(),
}
}
@ -122,17 +98,10 @@ impl CommandEncoder {
&'encoder mut self,
desc: &ComputePassDescriptor<'_>,
) -> ComputePass<'encoder> {
let data = DynContext::command_encoder_begin_compute_pass(
&*self.context,
self.data.as_ref(),
desc,
);
let cpass = self.inner.begin_compute_pass(desc);
ComputePass {
inner: ComputePassInner {
data,
context: self.context.clone(),
},
encoder_guard: PhantomData,
inner: cpass,
_encoder_guard: api::PhantomDrop::default(),
}
}
@ -151,12 +120,10 @@ impl CommandEncoder {
destination_offset: BufferAddress,
copy_size: BufferAddress,
) {
DynContext::command_encoder_copy_buffer_to_buffer(
&*self.context,
self.data.as_ref(),
source.data.as_ref(),
self.inner.copy_buffer_to_buffer(
&source.inner,
source_offset,
destination.data.as_ref(),
&destination.inner,
destination_offset,
copy_size,
);
@ -169,13 +136,8 @@ impl CommandEncoder {
destination: TexelCopyTextureInfo<'_>,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_buffer_to_texture(
&*self.context,
self.data.as_ref(),
source,
destination,
copy_size,
);
self.inner
.copy_buffer_to_texture(source, destination, copy_size);
}
/// Copy data from a texture to a buffer.
@ -185,13 +147,8 @@ impl CommandEncoder {
destination: TexelCopyBufferInfo<'_>,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_texture_to_buffer(
&*self.context,
self.data.as_ref(),
source,
destination,
copy_size,
);
self.inner
.copy_texture_to_buffer(source, destination, copy_size);
}
/// Copy data from one texture to another.
@ -207,13 +164,8 @@ impl CommandEncoder {
destination: TexelCopyTextureInfo<'_>,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_texture_to_texture(
&*self.context,
self.data.as_ref(),
source,
destination,
copy_size,
);
self.inner
.copy_texture_to_texture(source, destination, copy_size);
}
/// Clears texture to zero.
@ -230,12 +182,7 @@ impl CommandEncoder {
/// - `CLEAR_TEXTURE` extension not enabled
/// - Range is out of bounds
pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
DynContext::command_encoder_clear_texture(
&*self.context,
self.data.as_ref(),
texture.data.as_ref(),
subresource_range,
);
self.inner.clear_texture(&texture.inner, subresource_range);
}
/// Clears buffer to zero.
@ -250,28 +197,22 @@ impl CommandEncoder {
offset: BufferAddress,
size: Option<BufferAddress>,
) {
DynContext::command_encoder_clear_buffer(
&*self.context,
self.data.as_ref(),
buffer.data.as_ref(),
offset,
size,
);
self.inner.clear_buffer(&buffer.inner, offset, size);
}
/// Inserts debug marker.
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label);
self.inner.insert_debug_marker(label);
}
/// Start record commands and group it into debug marker group.
pub fn push_debug_group(&mut self, label: &str) {
DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label);
self.inner.push_debug_group(label);
}
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref());
self.inner.pop_debug_group();
}
/// Resolves a query set, writing the results into the supplied destination buffer.
@ -285,15 +226,13 @@ impl CommandEncoder {
destination: &Buffer,
destination_offset: BufferAddress,
) {
DynContext::command_encoder_resolve_query_set(
&*self.context,
self.data.as_ref(),
query_set.data.as_ref(),
self.inner.resolve_query_set(
&query_set.inner,
query_range.start,
query_range.end - query_range.start,
destination.data.as_ref(),
&destination.inner,
destination_offset,
)
);
}
/// Returns the inner hal CommandEncoder using a callback. The hal command encoder will be `None` if the
@ -312,16 +251,16 @@ impl CommandEncoder {
>(
&mut self,
hal_command_encoder_callback: F,
) -> Option<R> {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.command_encoder_as_hal_mut::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_command_encoder_callback,
)
})
) -> R {
if let Some(encoder) = self.inner.as_core_mut_opt() {
unsafe {
encoder
.context
.command_encoder_as_hal_mut::<A, F, R>(encoder, hal_command_encoder_callback)
}
} else {
hal_command_encoder_callback(None)
}
}
}
@ -340,12 +279,7 @@ impl CommandEncoder {
/// recorded so far and all before all commands recorded after.
/// This may depend both on the backend and the driver.
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::command_encoder_write_timestamp(
&*self.context,
self.data.as_mut(),
query_set.data.as_ref(),
query_index,
)
self.inner.write_timestamp(&query_set.inner, query_index);
}
}
@ -387,61 +321,8 @@ impl CommandEncoder {
blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>,
tlas: impl IntoIterator<Item = &'a TlasPackage>,
) {
let mut blas = blas.into_iter().map(|e: &BlasBuildEntry<'_>| {
let geometries = match &e.geometry {
BlasGeometries::TriangleGeometries(triangle_geometries) => {
let iter = triangle_geometries
.iter()
.map(
|tg: &BlasTriangleGeometry<'_>| DynContextBlasTriangleGeometry {
size: tg.size,
vertex_buffer: tg.vertex_buffer.data.as_ref(),
index_buffer: tg
.index_buffer
.map(|index_buffer| index_buffer.data.as_ref()),
transform_buffer: tg
.transform_buffer
.map(|transform_buffer| transform_buffer.data.as_ref()),
first_vertex: tg.first_vertex,
vertex_stride: tg.vertex_stride,
index_buffer_offset: tg.index_buffer_offset,
transform_buffer_offset: tg.transform_buffer_offset,
},
);
DynContextBlasGeometries::TriangleGeometries(Box::new(iter))
}
};
DynContextBlasBuildEntry {
blas_data: e.blas.shared.data.as_ref(),
geometries,
}
});
let mut tlas = tlas.into_iter().map(|e: &TlasPackage| {
let instances = e.instances.iter().map(|instance: &Option<TlasInstance>| {
instance.as_ref().map(|instance| DynContextTlasInstance {
blas: instance.blas.data.as_ref(),
transform: &instance.transform,
custom_index: instance.custom_index,
mask: instance.mask,
})
});
DynContextTlasPackage {
tlas_data: e.tlas.data.as_ref(),
instances: Box::new(instances),
lowest_unmodified: e.lowest_unmodified,
}
});
DynContext::command_encoder_build_acceleration_structures(
&*self.context,
self.data.as_ref(),
&mut blas,
&mut tlas,
);
self.inner
.build_acceleration_structures(&mut blas.into_iter(), &mut tlas.into_iter());
}
/// Build bottom and top level acceleration structures.
@ -460,52 +341,9 @@ impl CommandEncoder {
blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>,
tlas: impl IntoIterator<Item = &'a TlasBuildEntry<'a>>,
) {
let mut blas = blas.into_iter().map(|e: &BlasBuildEntry<'_>| {
let geometries = match &e.geometry {
BlasGeometries::TriangleGeometries(triangle_geometries) => {
let iter = triangle_geometries
.iter()
.map(
|tg: &BlasTriangleGeometry<'_>| DynContextBlasTriangleGeometry {
size: tg.size,
vertex_buffer: tg.vertex_buffer.data.as_ref(),
index_buffer: tg
.index_buffer
.map(|index_buffer| index_buffer.data.as_ref()),
transform_buffer: tg
.transform_buffer
.map(|transform_buffer| transform_buffer.data.as_ref()),
first_vertex: tg.first_vertex,
vertex_stride: tg.vertex_stride,
index_buffer_offset: tg.index_buffer_offset,
transform_buffer_offset: tg.transform_buffer_offset,
},
);
DynContextBlasGeometries::TriangleGeometries(Box::new(iter))
}
};
DynContextBlasBuildEntry {
blas_data: e.blas.shared.data.as_ref(),
geometries,
}
});
let mut tlas = tlas
.into_iter()
.map(|e: &TlasBuildEntry<'_>| DynContextTlasBuildEntry {
tlas_data: e.tlas.data.as_ref(),
instance_buffer_data: e.instance_buffer.data.as_ref(),
instance_count: e.instance_count,
});
DynContext::command_encoder_build_acceleration_structures_unsafe_tlas(
&*self.context,
self.data.as_ref(),
&mut blas,
&mut tlas,
self.inner.build_acceleration_structures_unsafe_tlas(
&mut blas.into_iter(),
&mut tlas.into_iter(),
);
}
}

View File

@ -1,6 +1,3 @@
use std::{marker::PhantomData, sync::Arc, thread};
use crate::context::DynContext;
use crate::*;
/// In-progress recording of a compute pass.
@ -11,14 +8,19 @@ use crate::*;
/// https://gpuweb.github.io/gpuweb/#compute-pass-encoder).
#[derive(Debug)]
pub struct ComputePass<'encoder> {
/// The inner data of the compute pass, separated out so it's easy to replace the lifetime with 'static if desired.
pub(crate) inner: ComputePassInner,
pub(crate) inner: dispatch::DispatchComputePass,
/// This lifetime is used to protect the [`CommandEncoder`] from being used
/// while the pass is alive.
pub(crate) encoder_guard: PhantomData<&'encoder ()>,
/// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime
/// from being shortened.
pub(crate) _encoder_guard: crate::api::PhantomDrop<&'encoder ()>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(ComputePass<'_>: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(ComputePass<'_> => .inner);
impl ComputePass<'_> {
/// Drops the lifetime relationship to the parent command encoder, making usage of
/// the encoder while this pass is recorded a run-time error instead.
@ -35,7 +37,7 @@ impl ComputePass<'_> {
pub fn forget_lifetime(self) -> ComputePass<'static> {
ComputePass {
inner: self.inner,
encoder_guard: PhantomData,
_encoder_guard: crate::api::PhantomDrop::default(),
}
}
@ -45,65 +47,40 @@ impl ComputePass<'_> {
/// If the bind group have dynamic offsets, provide them in the binding order.
/// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
/// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
pub fn set_bind_group<'a>(
&mut self,
index: u32,
bind_group: impl Into<Option<&'a BindGroup>>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.into().map(|x| x.data.as_ref());
DynContext::compute_pass_set_bind_group(
&*self.inner.context,
self.inner.data.as_mut(),
index,
bg,
offsets,
);
pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset])
where
Option<&'a BindGroup>: From<BG>,
{
let bg: Option<&BindGroup> = bind_group.into();
let bg = bg.map(|bg| &bg.inner);
self.inner.set_bind_group(index, bg, offsets);
}
/// Sets the active compute pipeline.
pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
DynContext::compute_pass_set_pipeline(
&*self.inner.context,
self.inner.data.as_mut(),
pipeline.data.as_ref(),
);
self.inner.set_pipeline(&pipeline.inner);
}
/// Inserts debug marker.
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::compute_pass_insert_debug_marker(
&*self.inner.context,
self.inner.data.as_mut(),
label,
);
self.inner.insert_debug_marker(label);
}
/// Start record commands and group it into debug marker group.
pub fn push_debug_group(&mut self, label: &str) {
DynContext::compute_pass_push_debug_group(
&*self.inner.context,
self.inner.data.as_mut(),
label,
);
self.inner.push_debug_group(label);
}
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
self.inner.pop_debug_group();
}
/// Dispatches compute work operations.
///
/// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension.
pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
DynContext::compute_pass_dispatch_workgroups(
&*self.inner.context,
self.inner.data.as_mut(),
x,
y,
z,
);
self.inner.dispatch_workgroups(x, y, z);
}
/// Dispatches compute work operations, based on the contents of the `indirect_buffer`.
@ -114,12 +91,8 @@ impl ComputePass<'_> {
indirect_buffer: &Buffer,
indirect_offset: BufferAddress,
) {
DynContext::compute_pass_dispatch_workgroups_indirect(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
);
self.inner
.dispatch_workgroups_indirect(&indirect_buffer.inner, indirect_offset);
}
}
@ -134,12 +107,7 @@ impl ComputePass<'_> {
/// For example, if `offset` is `4` and `data` is eight bytes long, this
/// call will write `data` to bytes `4..12` of push constant storage.
pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
DynContext::compute_pass_set_push_constants(
&*self.inner.context,
self.inner.data.as_mut(),
offset,
data,
);
self.inner.set_push_constants(offset, data);
}
}
@ -152,12 +120,7 @@ impl ComputePass<'_> {
/// but timestamps can be subtracted to get the time it takes
/// for a string of operations to complete.
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_write_timestamp(
&*self.inner.context,
self.inner.data.as_mut(),
query_set.data.as_ref(),
query_index,
)
self.inner.write_timestamp(&query_set.inner, query_index);
}
}
@ -166,35 +129,14 @@ impl ComputePass<'_> {
/// Start a pipeline statistics query on this compute pass. It can be ended with
/// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_begin_pipeline_statistics_query(
&*self.inner.context,
self.inner.data.as_mut(),
query_set.data.as_ref(),
query_index,
);
self.inner
.begin_pipeline_statistics_query(&query_set.inner, query_index);
}
/// End the pipeline statistics query on this compute pass. It can be started with
/// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::compute_pass_end_pipeline_statistics_query(
&*self.inner.context,
self.inner.data.as_mut(),
);
}
}
#[derive(Debug)]
pub(crate) struct ComputePassInner {
pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>,
}
impl Drop for ComputePassInner {
fn drop(&mut self) {
if !thread::panicking() {
self.context.compute_pass_end(self.data.as_mut());
}
self.inner.end_pipeline_statistics_query();
}
}

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a compute pipeline.
@ -10,13 +8,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUComputePipeline`](https://gpuweb.github.io/gpuweb/#compute-pipeline).
#[derive(Debug)]
pub struct ComputePipeline {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchComputePipeline,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
super::impl_partialeq_eq_hash!(ComputePipeline);
crate::cmp::impl_eq_ord_hash_proxy!(ComputePipeline => .inner);
impl ComputePipeline {
/// Get an object representing the bind group layout at a given index.
@ -27,19 +24,8 @@ impl ComputePipeline {
///
/// This method will raise a validation error if there is no bind group layout at `index`.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let data = self
.context
.compute_pipeline_get_bind_group_layout(self.data.as_ref(), index);
BindGroupLayout { context, data }
}
}
impl Drop for ComputePipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context.compute_pipeline_drop(self.data.as_ref());
}
let bind_group = self.inner.get_bind_group_layout(index);
BindGroupLayout { inner: bind_group }
}
}

View File

@ -1,10 +1,9 @@
use std::{error, fmt, future::Future, sync::Arc, thread};
use std::{error, fmt, future::Future, sync::Arc};
use parking_lot::Mutex;
use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, BlasShared, CreateBlasDescriptor};
use crate::api::tlas::{CreateTlasDescriptor, Tlas};
use crate::context::DynContext;
use crate::*;
/// Open connection to a graphics and/or compute device.
@ -17,12 +16,13 @@ use crate::*;
/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
#[derive(Debug)]
pub struct Device {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchDevice,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Device: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
/// Describes a [`Device`].
///
/// For use with [`Adapter::request_device`].
@ -43,7 +43,7 @@ impl Device {
///
/// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
pub fn poll(&self, maintain: Maintain) -> MaintainResult {
DynContext::device_poll(&*self.context, self.data.as_ref(), maintain)
self.inner.poll(maintain)
}
/// The features which can be used on this device.
@ -51,7 +51,7 @@ impl Device {
/// No additional features can be used, even if the underlying adapter can support them.
#[must_use]
pub fn features(&self) -> Features {
DynContext::device_features(&*self.context, self.data.as_ref())
self.inner.features()
}
/// The limits which can be used on this device.
@ -59,7 +59,7 @@ impl Device {
/// No better limits can be used, even if the underlying adapter can support them.
#[must_use]
pub fn limits(&self) -> Limits {
DynContext::device_limits(&*self.context, self.data.as_ref())
self.inner.limits()
}
/// Creates a shader module from either SPIR-V or WGSL source code.
@ -78,16 +78,10 @@ impl Device {
/// </div>
#[must_use]
pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
let data = DynContext::device_create_shader_module(
&*self.context,
self.data.as_ref(),
desc,
wgt::ShaderBoundChecks::new(),
);
ShaderModule {
context: Arc::clone(&self.context),
data,
}
let module = self
.inner
.create_shader_module(desc, wgt::ShaderBoundChecks::new());
ShaderModule { inner: module }
}
/// Creates a shader module from either SPIR-V or WGSL source code without runtime checks.
@ -105,16 +99,10 @@ impl Device {
&self,
desc: ShaderModuleDescriptor<'_>,
) -> ShaderModule {
let data = DynContext::device_create_shader_module(
&*self.context,
self.data.as_ref(),
desc,
unsafe { wgt::ShaderBoundChecks::unchecked() },
);
ShaderModule {
context: Arc::clone(&self.context),
data,
}
let module = self
.inner
.create_shader_module(desc, unsafe { wgt::ShaderBoundChecks::unchecked() });
ShaderModule { inner: module }
}
/// Creates a shader module from SPIR-V binary directly.
@ -130,53 +118,35 @@ impl Device {
&self,
desc: &ShaderModuleDescriptorSpirV<'_>,
) -> ShaderModule {
let data = unsafe {
DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc)
};
ShaderModule {
context: Arc::clone(&self.context),
data,
}
let module = unsafe { self.inner.create_shader_module_spirv(desc) };
ShaderModule { inner: module }
}
/// Creates an empty [`CommandEncoder`].
#[must_use]
pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
let data =
DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc);
CommandEncoder {
context: Arc::clone(&self.context),
data,
}
let encoder = self.inner.create_command_encoder(desc);
CommandEncoder { inner: encoder }
}
/// Creates an empty [`RenderBundleEncoder`].
#[must_use]
pub fn create_render_bundle_encoder(
pub fn create_render_bundle_encoder<'a>(
&self,
desc: &RenderBundleEncoderDescriptor<'_>,
) -> RenderBundleEncoder<'_> {
let data = DynContext::device_create_render_bundle_encoder(
&*self.context,
self.data.as_ref(),
desc,
);
) -> RenderBundleEncoder<'a> {
let encoder = self.inner.create_render_bundle_encoder(desc);
RenderBundleEncoder {
context: Arc::clone(&self.context),
data,
parent: self,
_p: Default::default(),
inner: encoder,
_p: std::marker::PhantomData,
}
}
/// Creates a new [`BindGroup`].
#[must_use]
pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc);
BindGroup {
context: Arc::clone(&self.context),
data,
}
let group = self.inner.create_bind_group(desc);
BindGroup { inner: group }
}
/// Creates a [`BindGroupLayout`].
@ -185,45 +155,29 @@ impl Device {
&self,
desc: &BindGroupLayoutDescriptor<'_>,
) -> BindGroupLayout {
let data =
DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc);
BindGroupLayout {
context: Arc::clone(&self.context),
data,
}
let layout = self.inner.create_bind_group_layout(desc);
BindGroupLayout { inner: layout }
}
/// Creates a [`PipelineLayout`].
#[must_use]
pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
let data =
DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc);
PipelineLayout {
context: Arc::clone(&self.context),
data,
}
let layout = self.inner.create_pipeline_layout(desc);
PipelineLayout { inner: layout }
}
/// Creates a [`RenderPipeline`].
#[must_use]
pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
let data =
DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc);
RenderPipeline {
context: Arc::clone(&self.context),
data,
}
let pipeline = self.inner.create_render_pipeline(desc);
RenderPipeline { inner: pipeline }
}
/// Creates a [`ComputePipeline`].
#[must_use]
pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
let data =
DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc);
ComputePipeline {
context: Arc::clone(&self.context),
data,
}
let pipeline = self.inner.create_compute_pipeline(desc);
ComputePipeline { inner: pipeline }
}
/// Creates a [`Buffer`].
@ -234,11 +188,10 @@ impl Device {
map_context.initial_range = 0..desc.size;
}
let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc);
let buffer = self.inner.create_buffer(desc);
Buffer {
context: Arc::clone(&self.context),
data,
inner: buffer,
map_context: Mutex::new(map_context),
size: desc.size,
usage: desc.usage,
@ -250,10 +203,10 @@ impl Device {
/// `desc` specifies the general format of the texture.
#[must_use]
pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc);
let texture = self.inner.create_texture(desc);
Texture {
context: Arc::clone(&self.context),
data,
inner: texture,
descriptor: TextureDescriptor {
label: None,
view_formats: &[],
@ -277,21 +230,13 @@ impl Device {
desc: &TextureDescriptor<'_>,
) -> Texture {
let texture = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
// Part of the safety requirements is that the texture was generated from the same hal device.
// Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal textures.
.unwrap()
.create_texture_from_hal::<A>(
hal_texture,
crate::context::downcast_ref(self.data.as_ref()),
desc,
)
let core_device = self.inner.as_core();
core_device
.context
.create_texture_from_hal::<A>(hal_texture, core_device, desc)
};
Texture {
context: Arc::clone(&self.context),
data: Box::new(texture),
inner: texture.into(),
descriptor: TextureDescriptor {
label: None,
view_formats: &[],
@ -320,22 +265,14 @@ impl Device {
}
let buffer = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
// Part of the safety requirements is that the buffer was generated from the same hal device.
// Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal buffers.
.unwrap()
.create_buffer_from_hal::<A>(
hal_buffer,
crate::context::downcast_ref(self.data.as_ref()),
desc,
)
let core_device = self.inner.as_core();
core_device
.context
.create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
};
Buffer {
context: Arc::clone(&self.context),
data: Box::new(buffer),
inner: buffer.into(),
map_context: Mutex::new(map_context),
size: desc.size,
usage: desc.usage,
@ -347,48 +284,40 @@ impl Device {
/// `desc` specifies the behavior of the sampler.
#[must_use]
pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc);
Sampler {
context: Arc::clone(&self.context),
data,
}
let sampler = self.inner.create_sampler(desc);
Sampler { inner: sampler }
}
/// Creates a new [`QuerySet`].
#[must_use]
pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc);
QuerySet {
context: Arc::clone(&self.context),
data,
}
let query_set = self.inner.create_query_set(desc);
QuerySet { inner: query_set }
}
/// Set a callback for errors that are not handled in error scopes.
pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
self.context
.device_on_uncaptured_error(self.data.as_ref(), handler);
self.inner.on_uncaptured_error(handler)
}
/// Push an error scope.
pub fn push_error_scope(&self, filter: ErrorFilter) {
self.context
.device_push_error_scope(self.data.as_ref(), filter);
self.inner.push_error_scope(filter)
}
/// Pop an error scope.
pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
self.context.device_pop_error_scope(self.data.as_ref())
self.inner.pop_error_scope()
}
/// Starts frame capture.
pub fn start_capture(&self) {
DynContext::device_start_capture(&*self.context, self.data.as_ref())
self.inner.start_capture()
}
/// Stops frame capture.
pub fn stop_capture(&self) {
DynContext::device_stop_capture(&*self.context, self.data.as_ref())
self.inner.stop_capture()
}
/// Query internal counters from the native backend for debugging purposes.
@ -399,7 +328,7 @@ impl Device {
/// If a counter is not set, its contains its default value (zero).
#[must_use]
pub fn get_internal_counters(&self) -> wgt::InternalCounters {
DynContext::device_get_internal_counters(&*self.context, self.data.as_ref())
self.inner.get_internal_counters()
}
/// Generate an GPU memory allocation report if the underlying backend supports it.
@ -409,7 +338,7 @@ impl Device {
/// for example as a workaround for driver issues.
#[must_use]
pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
DynContext::generate_allocator_report(&*self.context, self.data.as_ref())
self.inner.generate_allocator_report()
}
/// Apply a callback to this `Device`'s underlying backend device.
@ -435,21 +364,21 @@ impl Device {
pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
&self,
hal_device_callback: F,
) -> Option<R> {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.device_as_hal::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_device_callback,
)
})
) -> R {
if let Some(core_device) = self.inner.as_core_opt() {
unsafe {
core_device
.context
.device_as_hal::<A, F, R>(core_device, hal_device_callback)
}
} else {
hal_device_callback(None)
}
}
/// Destroy this device.
pub fn destroy(&self) {
DynContext::device_destroy(&*self.context, self.data.as_ref())
self.inner.destroy()
}
/// Set a DeviceLostCallback on this device.
@ -457,11 +386,7 @@ impl Device {
&self,
callback: impl Fn(DeviceLostReason, String) + Send + 'static,
) {
DynContext::device_set_device_lost_callback(
&*self.context,
self.data.as_ref(),
Box::new(callback),
)
self.inner.set_device_lost_callback(Box::new(callback))
}
/// Create a [`PipelineCache`] with initial data
@ -506,13 +431,8 @@ impl Device {
&self,
desc: &PipelineCacheDescriptor<'_>,
) -> PipelineCache {
let data = unsafe {
DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc)
};
PipelineCache {
context: Arc::clone(&self.context),
data,
}
let cache = unsafe { self.inner.create_pipeline_cache(desc) };
PipelineCache { inner: cache }
}
}
@ -540,15 +460,10 @@ impl Device {
desc: &CreateBlasDescriptor<'_>,
sizes: BlasGeometrySizeDescriptors,
) -> Blas {
let (handle, data) =
DynContext::device_create_blas(&*self.context, self.data.as_ref(), desc, sizes);
let (handle, blas) = self.inner.create_blas(desc, sizes);
Blas {
#[allow(clippy::arc_with_non_send_sync)]
shared: Arc::new(BlasShared {
context: Arc::clone(&self.context),
data,
}),
shared: Arc::new(BlasShared { inner: blas }),
handle,
}
}
@ -564,24 +479,15 @@ impl Device {
/// [Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
#[must_use]
pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
let data = DynContext::device_create_tlas(&*self.context, self.data.as_ref(), desc);
let tlas = self.inner.create_tlas(desc);
Tlas {
context: Arc::clone(&self.context),
data,
inner: tlas,
max_instances: desc.max_instances,
}
}
}
impl Drop for Device {
fn drop(&mut self) {
if !thread::panicking() {
self.context.device_drop(self.data.as_ref());
}
}
}
/// Requesting a device from an [`Adapter`] failed.
#[derive(Clone, Debug)]
pub struct RequestDeviceError {

View File

@ -1,8 +1,8 @@
use parking_lot::Mutex;
use crate::*;
use crate::{dispatch::InstanceInterface, *};
use std::{future::Future, sync::Arc};
use std::future::Future;
/// Context for all other wgpu objects. Instance of wgpu.
///
@ -14,11 +14,13 @@ use std::{future::Future, sync::Arc};
/// Corresponds to [WebGPU `GPU`](https://gpuweb.github.io/gpuweb/#gpu-interface).
#[derive(Debug)]
pub struct Instance {
context: Arc<C>,
inner: dispatch::DispatchInstance,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Instance: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(Instance => .inner);
impl Default for Instance {
/// Creates a new instance of wgpu with default options.
///
@ -129,7 +131,7 @@ impl Instance {
if is_only_available_backend || (requested_webgpu && support_webgpu) {
return Self {
context: Arc::from(crate::backend::ContextWebGpu::init(_instance_desc)),
inner: crate::backend::ContextWebGpu::new(_instance_desc).into(),
};
}
}
@ -137,7 +139,7 @@ impl Instance {
#[cfg(wgpu_core)]
{
return Self {
context: Arc::from(crate::backend::ContextWgpuCore::init(_instance_desc)),
inner: crate::backend::ContextWgpuCore::new(_instance_desc).into(),
};
}
@ -158,9 +160,9 @@ impl Instance {
#[cfg(wgpu_core)]
pub unsafe fn from_hal<A: wgc::hal_api::HalApi>(hal_instance: A::Instance) -> Self {
Self {
context: Arc::new(unsafe {
crate::backend::ContextWgpuCore::from_hal_instance::<A>(hal_instance)
}),
inner: unsafe {
crate::backend::ContextWgpuCore::from_hal_instance::<A>(hal_instance).into()
},
}
}
@ -176,10 +178,8 @@ impl Instance {
/// [`Instance`]: hal::Api::Instance
#[cfg(wgpu_core)]
pub unsafe fn as_hal<A: wgc::hal_api::HalApi>(&self) -> Option<&A::Instance> {
self.context
.as_any()
// If we don't have a wgpu-core instance, we don't have a hal instance either.
.downcast_ref::<crate::backend::ContextWgpuCore>()
self.inner
.as_core_opt()
.and_then(|ctx| unsafe { ctx.instance_as_hal::<A>() })
}
@ -195,9 +195,9 @@ impl Instance {
#[cfg(wgpu_core)]
pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self {
Self {
context: Arc::new(unsafe {
crate::backend::ContextWgpuCore::from_core_instance(core_instance)
}),
inner: unsafe {
crate::backend::ContextWgpuCore::from_core_instance(core_instance).into()
},
}
}
@ -208,20 +208,21 @@ impl Instance {
/// - `backends` - Backends from which to enumerate adapters.
#[cfg(native)]
pub fn enumerate_adapters(&self, backends: Backends) -> Vec<Adapter> {
let context = Arc::clone(&self.context);
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| {
ctx.enumerate_adapters(backends)
.into_iter()
.map(move |adapter| crate::Adapter {
context: Arc::clone(&context),
data: Box::new(adapter),
})
.collect()
let Some(core_instance) = self.inner.as_core_opt() else {
return Vec::new();
};
core_instance
.enumerate_adapters(backends)
.into_iter()
.map(|adapter| {
let core = backend::wgpu_core::CoreAdapter {
context: core_instance.clone(),
id: adapter,
};
crate::Adapter { inner: core.into() }
})
.unwrap()
.collect()
}
/// Retrieves an [`Adapter`] which matches the given [`RequestAdapterOptions`].
@ -235,9 +236,8 @@ impl Instance {
&self,
options: &RequestAdapterOptions<'_, '_>,
) -> impl Future<Output = Option<Adapter>> + WasmNotSend {
let context = Arc::clone(&self.context);
let adapter = self.context.instance_request_adapter(options);
async move { adapter.await.map(|data| Adapter { context, data }) }
let future = self.inner.request_adapter(options);
async move { future.await.map(|inner| Adapter { inner }) }
}
/// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`].
@ -250,18 +250,14 @@ impl Instance {
&self,
hal_adapter: hal::ExposedAdapter<A>,
) -> Adapter {
let context = Arc::clone(&self.context);
let adapter = unsafe {
context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.unwrap()
.create_adapter_from_hal(hal_adapter)
let core_instance = self.inner.as_core();
let adapter = unsafe { core_instance.create_adapter_from_hal(hal_adapter) };
let core = backend::wgpu_core::CoreAdapter {
context: core_instance.clone(),
id: adapter,
};
Adapter {
context,
data: Box::new(adapter),
}
Adapter { inner: core.into() }
}
/// Creates a new surface targeting a given window/canvas/surface/etc..
@ -352,12 +348,11 @@ impl Instance {
&self,
target: SurfaceTargetUnsafe,
) -> Result<Surface<'window>, CreateSurfaceError> {
let data = unsafe { self.context.instance_create_surface(target) }?;
let surface = unsafe { self.inner.create_surface(target)? };
Ok(Surface {
context: Arc::clone(&self.context),
_handle_source: None,
surface_data: data,
inner: surface,
config: Mutex::new(None),
})
}
@ -379,7 +374,7 @@ impl Instance {
///
/// [`Queue`s]: Queue
pub fn poll_all(&self, force_wait: bool) -> bool {
self.context.instance_poll_all_devices(force_wait)
self.inner.poll_all_devices(force_wait)
}
/// Generates memory report.
@ -388,9 +383,6 @@ impl Instance {
/// which happens only when WebGPU is pre-selected by the instance creation.
#[cfg(wgpu_core)]
pub fn generate_report(&self) -> Option<wgc::global::GlobalReport> {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| ctx.generate_report())
self.inner.as_core_opt().map(|ctx| ctx.generate_report())
}
}

View File

@ -19,16 +19,15 @@
//! - Avoid having to write out a long list of imports for each module.
//! - Allow docs to be written naturally, without needing to worry about needing dedicated doc imports.
//! - Treat wgpu-types types and wgpu-core types as a single set.
//!
mod adapter;
mod bind_group;
mod bind_group_layout;
mod blas;
mod buffer;
mod command_buffer;
mod command_encoder;
// Not a root type, but common descriptor types for pipelines.
mod blas;
mod common_pipeline;
mod compute_pass;
mod compute_pipeline;
@ -81,34 +80,19 @@ pub use tlas::*;
/// Object debugging label.
pub type Label<'a> = Option<&'a str>;
macro_rules! impl_partialeq_eq_hash {
($ty:ty) => {
impl PartialEq for $ty {
fn eq(&self, other: &Self) -> bool {
std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref())
}
}
impl Eq for $ty {}
/// A cute utility type that works just like PhantomData, but also
/// implements Drop. This forces any lifetimes that are associated
/// with the type to be used until the Drop impl is ran. This prevents
/// lifetimes from being shortened.
#[derive(Debug)]
pub(crate) struct PhantomDrop<T>(std::marker::PhantomData<T>);
impl std::hash::Hash for $ty {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let ptr = self.data.as_ref() as *const Data as *const ();
ptr.hash(state);
}
}
impl PartialOrd for $ty {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for $ty {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let a = self.data.as_ref() as *const Data as *const ();
let b = other.data.as_ref() as *const Data as *const ();
a.cmp(&b)
}
}
};
impl<T> Default for PhantomDrop<T> {
fn default() -> Self {
Self(std::marker::PhantomData)
}
}
impl<T> Drop for PhantomDrop<T> {
fn drop(&mut self) {}
}
pub(crate) use impl_partialeq_eq_hash;

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a pipeline cache, which is used to accelerate
@ -66,13 +64,14 @@ use crate::*;
/// [renaming]: std::fs::rename
#[derive(Debug)]
pub struct PipelineCache {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchPipelineCache,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(PipelineCache: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(PipelineCache => .inner);
impl PipelineCache {
/// Get the data associated with this pipeline cache.
/// The data format is an implementation detail of `wgpu`.
@ -81,14 +80,6 @@ impl PipelineCache {
///
/// This function is unique to the Rust API of `wgpu`.
pub fn get_data(&self) -> Option<Vec<u8>> {
self.context.pipeline_cache_get_data(self.data.as_ref())
}
}
impl Drop for PipelineCache {
fn drop(&mut self) {
if !thread::panicking() {
self.context.pipeline_cache_drop(self.data.as_ref());
}
self.inner.get_data()
}
}

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a pipeline layout.
@ -10,21 +8,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUPipelineLayout`](https://gpuweb.github.io/gpuweb/#gpupipelinelayout).
#[derive(Debug)]
pub struct PipelineLayout {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchPipelineLayout,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
super::impl_partialeq_eq_hash!(PipelineLayout);
impl Drop for PipelineLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context.pipeline_layout_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(PipelineLayout => .inner);
/// Describes a [`PipelineLayout`].
///

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a query set.
@ -9,22 +7,13 @@ use crate::*;
/// Corresponds to [WebGPU `GPUQuerySet`](https://gpuweb.github.io/gpuweb/#queryset).
#[derive(Debug)]
pub struct QuerySet {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchQuerySet,
}
#[cfg(send_sync)]
#[cfg(send_sync)]
static_assertions::assert_impl_all!(QuerySet: Send, Sync);
super::impl_partialeq_eq_hash!(QuerySet);
impl Drop for QuerySet {
fn drop(&mut self) {
if !thread::panicking() {
self.context.query_set_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(QuerySet => .inner);
/// Describes a [`QuerySet`].
///

View File

@ -1,10 +1,5 @@
use std::{
ops::{Deref, DerefMut},
sync::Arc,
thread,
};
use std::ops::{Deref, DerefMut};
use crate::context::{DynContext, QueueWriteBuffer};
use crate::*;
/// Handle to a command queue on a device.
@ -16,19 +11,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
#[derive(Debug)]
pub struct Queue {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchQueue,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Queue: Send, Sync);
impl Drop for Queue {
fn drop(&mut self) {
if !thread::panicking() {
self.context.queue_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(Queue => .inner);
/// Identifier for a particular call to [`Queue::submit`]. Can be used
/// as part of an argument to [`Device::poll`] to block for a particular
@ -39,7 +27,7 @@ impl Drop for Queue {
#[derive(Debug, Clone)]
pub struct SubmissionIndex {
#[cfg_attr(not(native), allow(dead_code))]
pub(crate) data: Arc<crate::Data>,
pub(crate) index: u64,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
@ -59,7 +47,7 @@ pub struct QueueWriteBufferView<'a> {
queue: &'a Queue,
buffer: &'a Buffer,
offset: BufferAddress,
inner: Box<dyn QueueWriteBuffer>,
inner: dispatch::DispatchQueueWriteBuffer,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(QueueWriteBufferView<'_>: Send, Sync);
@ -87,13 +75,9 @@ impl AsMut<[u8]> for QueueWriteBufferView<'_> {
impl Drop for QueueWriteBufferView<'_> {
fn drop(&mut self) {
DynContext::queue_write_staging_buffer(
&*self.queue.context,
self.queue.data.as_ref(),
self.buffer.data.as_ref(),
self.offset,
&*self.inner,
);
self.queue
.inner
.write_staging_buffer(&self.buffer.inner, self.offset, &self.inner);
}
}
@ -119,13 +103,7 @@ impl Queue {
/// method avoids an intermediate copy and is often able to transfer data
/// more efficiently than this one.
pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
DynContext::queue_write_buffer(
&*self.context,
self.data.as_ref(),
buffer.data.as_ref(),
offset,
data,
)
self.inner.write_buffer(&buffer.inner, offset, data);
}
/// Write to a buffer via a directly mapped staging buffer.
@ -164,15 +142,9 @@ impl Queue {
size: BufferSize,
) -> Option<QueueWriteBufferView<'a>> {
profiling::scope!("Queue::write_buffer_with");
DynContext::queue_validate_write_buffer(
&*self.context,
self.data.as_ref(),
buffer.data.as_ref(),
offset,
size,
)?;
let staging_buffer =
DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?;
self.inner
.validate_write_buffer(&buffer.inner, offset, size)?;
let staging_buffer = self.inner.create_staging_buffer(size)?;
Some(QueueWriteBufferView {
queue: self,
buffer,
@ -212,14 +184,7 @@ impl Queue {
data_layout: TexelCopyBufferLayout,
size: Extent3d,
) {
DynContext::queue_write_texture(
&*self.context,
self.data.as_ref(),
texture,
data,
data_layout,
size,
)
self.inner.write_texture(texture, data, data_layout, size);
}
/// Schedule a copy of data from `image` into `texture`.
@ -227,16 +192,11 @@ impl Queue {
pub fn copy_external_image_to_texture(
&self,
source: &wgt::CopyExternalImageSourceInfo,
dest: crate::CopyExternalImageDestInfo<'_>,
dest: wgt::CopyExternalImageDestInfo<&api::Texture>,
size: Extent3d,
) {
DynContext::queue_copy_external_image_to_texture(
&*self.context,
self.data.as_ref(),
source,
dest,
size,
)
self.inner
.copy_external_image_to_texture(source, dest, size);
}
/// Submits a series of finished command buffers for execution.
@ -246,12 +206,11 @@ impl Queue {
) -> SubmissionIndex {
let mut command_buffers = command_buffers
.into_iter()
.map(|mut comb| comb.data.take().unwrap());
.map(|mut comb| comb.inner.take().unwrap());
let data =
DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers);
let index = self.inner.submit(&mut command_buffers);
SubmissionIndex { data }
SubmissionIndex { index }
}
/// Gets the amount of nanoseconds each tick of a timestamp query represents.
@ -261,7 +220,7 @@ impl Queue {
/// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
/// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
pub fn get_timestamp_period(&self) -> f32 {
DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref())
self.inner.get_timestamp_period()
}
/// Registers a callback when the previous call to submit finishes running on the gpu. This callback
@ -276,10 +235,6 @@ impl Queue {
/// call to the function will not complete until the callback returns, so prefer keeping callbacks short
/// and used to set flags, send messages, etc.
pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
DynContext::queue_on_submitted_work_done(
&*self.context,
self.data.as_ref(),
Box::new(callback),
)
self.inner.on_submitted_work_done(Box::new(callback));
}
}

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Pre-prepared reusable bundle of GPU operations.
@ -13,21 +11,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPURenderBundle`](https://gpuweb.github.io/gpuweb/#render-bundle).
#[derive(Debug)]
pub struct RenderBundle {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchRenderBundle,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
super::impl_partialeq_eq_hash!(RenderBundle);
impl Drop for RenderBundle {
fn drop(&mut self) {
if !thread::panicking() {
self.context.render_bundle_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(RenderBundle => .inner);
/// Describes a [`RenderBundle`].
///

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc};
use std::{marker::PhantomData, num::NonZeroU32, ops::Range};
use crate::context::DynContext;
use crate::dispatch::RenderBundleEncoderInterface;
use crate::*;
/// Encodes a series of GPU operations into a reusable "render bundle".
@ -16,15 +16,15 @@ use crate::*;
/// https://gpuweb.github.io/gpuweb/#gpurenderbundleencoder).
#[derive(Debug)]
pub struct RenderBundleEncoder<'a> {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) parent: &'a Device,
pub(crate) inner: dispatch::DispatchRenderBundleEncoder,
/// This type should be !Send !Sync, because it represents an allocation on this thread's
/// command buffer.
pub(crate) _p: PhantomData<*const u8>,
pub(crate) _p: PhantomData<(*const u8, &'a ())>,
}
static_assertions::assert_not_impl_any!(RenderBundleEncoder<'_>: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(RenderBundleEncoder<'_> => .inner);
/// Describes a [`RenderBundleEncoder`].
///
/// For use with [`Device::create_render_bundle_encoder`].
@ -52,42 +52,34 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn
impl<'a> RenderBundleEncoder<'a> {
/// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes.
pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle {
let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc);
RenderBundle {
context: Arc::clone(&self.context),
data,
}
let bundle = match self.inner {
#[cfg(wgpu_core)]
dispatch::DispatchRenderBundleEncoder::Core(b) => b.finish(desc),
#[cfg(webgpu)]
dispatch::DispatchRenderBundleEncoder::WebGPU(b) => b.finish(desc),
};
RenderBundle { inner: bundle }
}
/// Sets the active bind group for a given bind group index. The bind group layout
/// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
///
/// If the bind group have dynamic offsets, provide them in the binding order.
pub fn set_bind_group<'b>(
&mut self,
index: u32,
bind_group: impl Into<Option<&'a BindGroup>>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.into().map(|x| x.data.as_ref());
DynContext::render_bundle_encoder_set_bind_group(
&*self.parent.context,
self.data.as_mut(),
index,
bg,
offsets,
)
pub fn set_bind_group<'b, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset])
where
Option<&'b BindGroup>: From<BG>,
{
let bg: Option<&'b BindGroup> = bind_group.into();
let bg = bg.map(|x| &x.inner);
self.inner.set_bind_group(index, bg, offsets);
}
/// Sets the active render pipeline.
///
/// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
DynContext::render_bundle_encoder_set_pipeline(
&*self.parent.context,
self.data.as_mut(),
pipeline.data.as_ref(),
)
self.inner.set_pipeline(&pipeline.inner);
}
/// Sets the active index buffer.
@ -95,14 +87,12 @@ impl<'a> RenderBundleEncoder<'a> {
/// Subsequent calls to [`draw_indexed`](RenderBundleEncoder::draw_indexed) on this [`RenderBundleEncoder`] will
/// use `buffer` as the source index buffer.
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
DynContext::render_bundle_encoder_set_index_buffer(
&*self.parent.context,
self.data.as_mut(),
buffer_slice.buffer.data.as_ref(),
self.inner.set_index_buffer(
&buffer_slice.buffer.inner,
index_format,
buffer_slice.offset,
buffer_slice.size,
)
);
}
/// Assign a vertex buffer to a slot.
@ -116,14 +106,12 @@ impl<'a> RenderBundleEncoder<'a> {
/// [`draw`]: RenderBundleEncoder::draw
/// [`draw_indexed`]: RenderBundleEncoder::draw_indexed
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
DynContext::render_bundle_encoder_set_vertex_buffer(
&*self.parent.context,
self.data.as_mut(),
self.inner.set_vertex_buffer(
slot,
buffer_slice.buffer.data.as_ref(),
&buffer_slice.buffer.inner,
buffer_slice.offset,
buffer_slice.size,
)
);
}
/// Draws primitives from the active vertex buffer(s).
@ -145,12 +133,7 @@ impl<'a> RenderBundleEncoder<'a> {
/// }
/// ```
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw(
&*self.parent.context,
self.data.as_mut(),
vertices,
instances,
)
self.inner.draw(vertices, instances);
}
/// Draws indexed primitives using the active index buffer and the active vertex buffer(s).
@ -175,13 +158,7 @@ impl<'a> RenderBundleEncoder<'a> {
/// }
/// ```
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw_indexed(
&*self.parent.context,
self.data.as_mut(),
indices,
base_vertex,
instances,
);
self.inner.draw_indexed(indices, base_vertex, instances);
}
/// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
@ -190,12 +167,8 @@ impl<'a> RenderBundleEncoder<'a> {
///
/// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs).
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
DynContext::render_bundle_encoder_draw_indirect(
&*self.parent.context,
self.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
);
self.inner
.draw_indirect(&indirect_buffer.inner, indirect_offset);
}
/// Draws indexed primitives using the active index buffer and the active vertex buffers,
@ -210,12 +183,8 @@ impl<'a> RenderBundleEncoder<'a> {
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
DynContext::render_bundle_encoder_draw_indexed_indirect(
&*self.parent.context,
self.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
);
self.inner
.draw_indexed_indirect(&indirect_buffer.inner, indirect_offset);
}
}
@ -250,12 +219,6 @@ impl RenderBundleEncoder<'_> {
/// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second
/// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12.
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_bundle_encoder_set_push_constants(
&*self.parent.context,
self.data.as_mut(),
stages,
offset,
data,
);
self.inner.set_push_constants(stages, offset, data);
}
}

View File

@ -1,22 +1,7 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use std::ops::Range;
use crate::context::DynContext;
use crate::*;
#[derive(Debug)]
pub(crate) struct RenderPassInner {
pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>,
}
impl Drop for RenderPassInner {
fn drop(&mut self) {
if !thread::panicking() {
self.context.render_pass_end(self.data.as_mut());
}
}
}
/// In-progress recording of a render pass: a list of render commands in a [`CommandEncoder`].
///
/// It can be created with [`CommandEncoder::begin_render_pass()`], whose [`RenderPassDescriptor`]
@ -37,14 +22,19 @@ impl Drop for RenderPassInner {
/// https://gpuweb.github.io/gpuweb/#render-pass-encoder).
#[derive(Debug)]
pub struct RenderPass<'encoder> {
/// The inner data of the render pass, separated out so it's easy to replace the lifetime with 'static if desired.
pub(crate) inner: RenderPassInner,
pub(crate) inner: dispatch::DispatchRenderPass,
/// This lifetime is used to protect the [`CommandEncoder`] from being used
/// while the pass is alive.
pub(crate) encoder_guard: PhantomData<&'encoder ()>,
/// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime
/// from being shortened.
pub(crate) _encoder_guard: PhantomDrop<&'encoder ()>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderPass<'_>: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(RenderPass<'_> => .inner);
impl RenderPass<'_> {
/// Drops the lifetime relationship to the parent command encoder, making usage of
/// the encoder while this pass is recorded a run-time error instead.
@ -61,7 +51,7 @@ impl RenderPass<'_> {
pub fn forget_lifetime(self) -> RenderPass<'static> {
RenderPass {
inner: self.inner,
encoder_guard: PhantomData,
_encoder_guard: crate::api::PhantomDrop::default(),
}
}
@ -74,31 +64,21 @@ impl RenderPass<'_> {
/// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
///
/// Subsequent draw calls shader executions will be able to access data in these bind groups.
pub fn set_bind_group<'a>(
&mut self,
index: u32,
bind_group: impl Into<Option<&'a BindGroup>>,
offsets: &[DynamicOffset],
) {
let bg = bind_group.into().map(|x| x.data.as_ref());
DynContext::render_pass_set_bind_group(
&*self.inner.context,
self.inner.data.as_mut(),
index,
bg,
offsets,
)
pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset])
where
Option<&'a BindGroup>: From<BG>,
{
let bg: Option<&'a BindGroup> = bind_group.into();
let bg = bg.map(|bg| &bg.inner);
self.inner.set_bind_group(index, bg, offsets);
}
/// Sets the active render pipeline.
///
/// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) {
DynContext::render_pass_set_pipeline(
&*self.inner.context,
self.inner.data.as_mut(),
pipeline.data.as_ref(),
)
self.inner.set_pipeline(&pipeline.inner);
}
/// Sets the blend color as used by some of the blending modes.
@ -107,11 +87,7 @@ impl RenderPass<'_> {
/// If this method has not been called, the blend constant defaults to [`Color::TRANSPARENT`]
/// (all components zero).
pub fn set_blend_constant(&mut self, color: Color) {
DynContext::render_pass_set_blend_constant(
&*self.inner.context,
self.inner.data.as_mut(),
color,
)
self.inner.set_blend_constant(color);
}
/// Sets the active index buffer.
@ -119,14 +95,12 @@ impl RenderPass<'_> {
/// Subsequent calls to [`draw_indexed`](RenderPass::draw_indexed) on this [`RenderPass`] will
/// use `buffer` as the source index buffer.
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) {
DynContext::render_pass_set_index_buffer(
&*self.inner.context,
self.inner.data.as_mut(),
buffer_slice.buffer.data.as_ref(),
self.inner.set_index_buffer(
&buffer_slice.buffer.inner,
index_format,
buffer_slice.offset,
buffer_slice.size,
)
);
}
/// Assign a vertex buffer to a slot.
@ -140,14 +114,12 @@ impl RenderPass<'_> {
/// [`draw`]: RenderPass::draw
/// [`draw_indexed`]: RenderPass::draw_indexed
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) {
DynContext::render_pass_set_vertex_buffer(
&*self.inner.context,
self.inner.data.as_mut(),
self.inner.set_vertex_buffer(
slot,
buffer_slice.buffer.data.as_ref(),
&buffer_slice.buffer.inner,
buffer_slice.offset,
buffer_slice.size,
)
);
}
/// Sets the scissor rectangle used during the rasterization stage.
@ -160,14 +132,7 @@ impl RenderPass<'_> {
/// The function of the scissor rectangle resembles [`set_viewport()`](Self::set_viewport),
/// but it does not affect the coordinate system, only which fragments are discarded.
pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
DynContext::render_pass_set_scissor_rect(
&*self.inner.context,
self.inner.data.as_mut(),
x,
y,
width,
height,
);
self.inner.set_scissor_rect(x, y, width, height);
}
/// Sets the viewport used during the rasterization stage to linearly map
@ -177,16 +142,7 @@ impl RenderPass<'_> {
/// If this method has not been called, the viewport defaults to the entire bounds of the render
/// targets.
pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
DynContext::render_pass_set_viewport(
&*self.inner.context,
self.inner.data.as_mut(),
x,
y,
w,
h,
min_depth,
max_depth,
);
self.inner.set_viewport(x, y, w, h, min_depth, max_depth);
}
/// Sets the stencil reference.
@ -194,34 +150,22 @@ impl RenderPass<'_> {
/// Subsequent stencil tests will test against this value.
/// If this method has not been called, the stencil reference value defaults to `0`.
pub fn set_stencil_reference(&mut self, reference: u32) {
DynContext::render_pass_set_stencil_reference(
&*self.inner.context,
self.inner.data.as_mut(),
reference,
);
self.inner.set_stencil_reference(reference);
}
/// Inserts debug marker.
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::render_pass_insert_debug_marker(
&*self.inner.context,
self.inner.data.as_mut(),
label,
);
self.inner.insert_debug_marker(label);
}
/// Start record commands and group it into debug marker group.
pub fn push_debug_group(&mut self, label: &str) {
DynContext::render_pass_push_debug_group(
&*self.inner.context,
self.inner.data.as_mut(),
label,
);
self.inner.push_debug_group(label);
}
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
self.inner.pop_debug_group();
}
/// Draws primitives from the active vertex buffer(s).
@ -246,12 +190,7 @@ impl RenderPass<'_> {
/// This drawing command uses the current render state, as set by preceding `set_*()` methods.
/// It is not affected by changes to the state that are performed after it is called.
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_pass_draw(
&*self.inner.context,
self.inner.data.as_mut(),
vertices,
instances,
)
self.inner.draw(vertices, instances);
}
/// Draws indexed primitives using the active index buffer and the active vertex buffers.
@ -279,13 +218,7 @@ impl RenderPass<'_> {
/// This drawing command uses the current render state, as set by preceding `set_*()` methods.
/// It is not affected by changes to the state that are performed after it is called.
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_pass_draw_indexed(
&*self.inner.context,
self.inner.data.as_mut(),
indices,
base_vertex,
instances,
);
self.inner.draw_indexed(indices, base_vertex, instances);
}
/// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
@ -302,12 +235,8 @@ impl RenderPass<'_> {
///
/// See details on the individual flags for more information.
pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) {
DynContext::render_pass_draw_indirect(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
);
self.inner
.draw_indirect(&indirect_buffer.inner, indirect_offset);
}
/// Draws indexed primitives using the active index buffer and the active vertex buffers,
@ -329,12 +258,8 @@ impl RenderPass<'_> {
indirect_buffer: &Buffer,
indirect_offset: BufferAddress,
) {
DynContext::render_pass_draw_indexed_indirect(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
);
self.inner
.draw_indexed_indirect(&indirect_buffer.inner, indirect_offset);
}
/// Execute a [render bundle][RenderBundle], which is a set of pre-recorded commands
@ -346,13 +271,9 @@ impl RenderPass<'_> {
&mut self,
render_bundles: I,
) {
let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref());
let mut render_bundles = render_bundles.into_iter().map(|rb| &rb.inner);
DynContext::render_pass_execute_bundles(
&*self.inner.context,
self.inner.data.as_mut(),
&mut render_bundles,
)
self.inner.execute_bundles(&mut render_bundles);
}
}
@ -374,13 +295,8 @@ impl RenderPass<'_> {
indirect_offset: BufferAddress,
count: u32,
) {
DynContext::render_pass_multi_draw_indirect(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
count,
);
self.inner
.multi_draw_indirect(&indirect_buffer.inner, indirect_offset, count);
}
/// Dispatches multiple draw calls from the active index buffer and the active vertex buffers,
@ -400,13 +316,8 @@ impl RenderPass<'_> {
indirect_offset: BufferAddress,
count: u32,
) {
DynContext::render_pass_multi_draw_indexed_indirect(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
indirect_offset,
count,
);
self.inner
.multi_draw_indexed_indirect(&indirect_buffer.inner, indirect_offset, count);
}
}
@ -442,12 +353,10 @@ impl RenderPass<'_> {
count_offset: BufferAddress,
max_count: u32,
) {
DynContext::render_pass_multi_draw_indirect_count(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
self.inner.multi_draw_indirect_count(
&indirect_buffer.inner,
indirect_offset,
count_buffer.data.as_ref(),
&count_buffer.inner,
count_offset,
max_count,
);
@ -486,12 +395,10 @@ impl RenderPass<'_> {
count_offset: BufferAddress,
max_count: u32,
) {
DynContext::render_pass_multi_draw_indexed_indirect_count(
&*self.inner.context,
self.inner.data.as_mut(),
indirect_buffer.data.as_ref(),
self.inner.multi_draw_indexed_indirect_count(
&indirect_buffer.inner,
indirect_offset,
count_buffer.data.as_ref(),
&count_buffer.inner,
count_offset,
max_count,
);
@ -541,13 +448,7 @@ impl RenderPass<'_> {
///
/// [`PushConstant`]: https://docs.rs/naga/latest/naga/enum.StorageClass.html#variant.PushConstant
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_pass_set_push_constants(
&*self.inner.context,
self.inner.data.as_mut(),
stages,
offset,
data,
);
self.inner.set_push_constants(stages, offset, data);
}
}
@ -561,12 +462,7 @@ impl RenderPass<'_> {
/// but timestamps can be subtracted to get the time it takes
/// for a string of operations to complete.
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_write_timestamp(
&*self.inner.context,
self.inner.data.as_mut(),
query_set.data.as_ref(),
query_index,
)
self.inner.write_timestamp(&query_set.inner, query_index);
}
}
@ -574,17 +470,13 @@ impl RenderPass<'_> {
/// Start a occlusion query on this render pass. It can be ended with
/// `end_occlusion_query`. Occlusion queries may not be nested.
pub fn begin_occlusion_query(&mut self, query_index: u32) {
DynContext::render_pass_begin_occlusion_query(
&*self.inner.context,
self.inner.data.as_mut(),
query_index,
);
self.inner.begin_occlusion_query(query_index);
}
/// End the occlusion query on this render pass. It can be started with
/// `begin_occlusion_query`. Occlusion queries may not be nested.
pub fn end_occlusion_query(&mut self) {
DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut());
self.inner.end_occlusion_query();
}
}
@ -593,21 +485,14 @@ impl RenderPass<'_> {
/// Start a pipeline statistics query on this render pass. It can be ended with
/// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_begin_pipeline_statistics_query(
&*self.inner.context,
self.inner.data.as_mut(),
query_set.data.as_ref(),
query_index,
);
self.inner
.begin_pipeline_statistics_query(&query_set.inner, query_index);
}
/// End the pipeline statistics query on this render pass. It can be started with
/// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::render_pass_end_pipeline_statistics_query(
&*self.inner.context,
self.inner.data.as_mut(),
);
self.inner.end_pipeline_statistics_query();
}
}

View File

@ -1,4 +1,4 @@
use std::{num::NonZeroU32, sync::Arc, thread};
use std::num::NonZeroU32;
use crate::*;
@ -10,21 +10,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPURenderPipeline`](https://gpuweb.github.io/gpuweb/#render-pipeline).
#[derive(Debug)]
pub struct RenderPipeline {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchRenderPipeline,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
super::impl_partialeq_eq_hash!(RenderPipeline);
impl Drop for RenderPipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context.render_pipeline_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(RenderPipeline => .inner);
impl RenderPipeline {
/// Get an object representing the bind group layout at a given index.
@ -34,11 +25,8 @@ impl RenderPipeline {
///
/// This method will raise a validation error if there is no bind group layout at `index`.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let data = self
.context
.render_pipeline_get_bind_group_layout(self.data.as_ref(), index);
BindGroupLayout { context, data }
let inner = self.inner.get_bind_group_layout(index);
BindGroupLayout { inner }
}
}

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a sampler.
@ -13,21 +11,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUSampler`](https://gpuweb.github.io/gpuweb/#sampler-interface).
#[derive(Debug)]
pub struct Sampler {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchSampler,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Sampler: Send, Sync);
super::impl_partialeq_eq_hash!(Sampler);
impl Drop for Sampler {
fn drop(&mut self) {
if !thread::panicking() {
self.context.sampler_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(Sampler => .inner);
/// Describes a [`Sampler`].
///

View File

@ -1,4 +1,4 @@
use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread};
use std::{borrow::Cow, future::Future, marker::PhantomData};
use crate::*;
@ -12,26 +12,17 @@ use crate::*;
/// Corresponds to [WebGPU `GPUShaderModule`](https://gpuweb.github.io/gpuweb/#shader-module).
#[derive(Debug)]
pub struct ShaderModule {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchShaderModule,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
super::impl_partialeq_eq_hash!(ShaderModule);
impl Drop for ShaderModule {
fn drop(&mut self) {
if !thread::panicking() {
self.context.shader_module_drop(self.data.as_ref());
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(ShaderModule => .inner);
impl ShaderModule {
/// Get the compilation info for the shader module.
pub fn get_compilation_info(&self) -> impl Future<Output = CompilationInfo> + WasmNotSend {
self.context.shader_get_compilation_info(self.data.as_ref())
self.inner.get_compilation_info()
}
}

View File

@ -1,9 +1,8 @@
use std::{error, fmt, sync::Arc, thread};
use std::{error, fmt};
use parking_lot::Mutex;
use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
use crate::context::DynContext;
use crate::*;
/// Describes a [`Surface`].
@ -24,8 +23,6 @@ static_assertions::assert_impl_all!(SurfaceConfiguration: Send, Sync);
/// [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context)
/// serves a similar role.
pub struct Surface<'window> {
pub(crate) context: Arc<C>,
/// Optionally, keep the source of the handle used for the surface alive.
///
/// This is useful for platforms where the surface is created from a window and the surface
@ -33,7 +30,7 @@ pub struct Surface<'window> {
pub(crate) _handle_source: Option<Box<dyn WindowHandle + 'window>>,
/// Additional surface data returned by [`DynContext::instance_create_surface`].
pub(crate) surface_data: Box<Data>,
pub(crate) inner: dispatch::DispatchSurface,
// Stores the latest `SurfaceConfiguration` that was set using `Surface::configure`.
// It is required to set the attributes of the `SurfaceTexture` in the
@ -49,11 +46,7 @@ impl Surface<'_> {
///
/// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter.
pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
DynContext::surface_get_capabilities(
&*self.context,
self.surface_data.as_ref(),
adapter.data.as_ref(),
)
self.inner.get_capabilities(&adapter.inner)
}
/// Return a default `SurfaceConfiguration` from width and height to use for the [`Surface`] with this adapter.
@ -86,12 +79,7 @@ impl Surface<'_> {
/// - Texture format requested is unsupported on the surface.
/// - `config.width` or `config.height` is zero.
pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
DynContext::surface_configure(
&*self.context,
self.surface_data.as_ref(),
device.data.as_ref(),
config,
);
self.inner.configure(&device.inner, config);
let mut conf = self.config.lock();
*conf = Some(config.clone());
@ -106,8 +94,7 @@ impl Surface<'_> {
/// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated,
/// recreating the swapchain will panic.
pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
let (texture_data, status, detail) =
DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref());
let (texture, status, detail) = self.inner.get_current_texture();
let suboptimal = match status {
SurfaceStatus::Good => false,
@ -137,11 +124,10 @@ impl Surface<'_> {
view_formats: &[],
};
texture_data
.map(|data| SurfaceTexture {
texture
.map(|texture| SurfaceTexture {
texture: Texture {
context: Arc::clone(&self.context),
data,
inner: texture,
descriptor,
},
suboptimal,
@ -161,16 +147,18 @@ impl Surface<'_> {
pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Surface>) -> R, R>(
&self,
hal_surface_callback: F,
) -> Option<R> {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.surface_as_hal::<A, F, R>(
crate::context::downcast_ref(self.surface_data.as_ref()),
hal_surface_callback,
)
})
) -> R {
let core_surface = self.inner.as_core_opt();
if let Some(core_surface) = core_surface {
unsafe {
core_surface
.context
.surface_as_hal::<A, F, R>(core_surface, hal_surface_callback)
}
} else {
hal_surface_callback(None)
}
}
}
@ -179,7 +167,6 @@ impl Surface<'_> {
impl fmt::Debug for Surface<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Surface")
.field("context", &self.context)
.field(
"_handle_source",
&if self._handle_source.is_some() {
@ -188,7 +175,7 @@ impl fmt::Debug for Surface<'_> {
"None"
},
)
.field("data", &self.surface_data)
.field("inner", &self.inner)
.field("config", &self.config)
.finish()
}
@ -197,13 +184,7 @@ impl fmt::Debug for Surface<'_> {
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Surface<'_>: Send, Sync);
impl Drop for Surface<'_> {
fn drop(&mut self) {
if !thread::panicking() {
self.context.surface_drop(self.surface_data.as_ref())
}
}
}
crate::cmp::impl_eq_ord_hash_proxy!(Surface<'_> => .inner);
/// Super trait for window handles as used in [`SurfaceTarget`].
pub trait WindowHandle: HasWindowHandle + HasDisplayHandle + WasmNotSendSync {}

View File

@ -1,6 +1,5 @@
use std::{error, fmt, thread};
use crate::context::DynContext;
use crate::*;
/// Surface texture that can be rendered to.
@ -17,11 +16,13 @@ pub struct SurfaceTexture {
/// but should be recreated for maximum performance.
pub suboptimal: bool,
pub(crate) presented: bool,
pub(crate) detail: Box<dyn AnyWasmNotSendSync>,
pub(crate) detail: dispatch::DispatchSurfaceOutputDetail,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(SurfaceTexture: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(SurfaceTexture => .texture.inner);
impl SurfaceTexture {
/// Schedule this texture to be presented on the owning surface.
///
@ -34,24 +35,14 @@ impl SurfaceTexture {
/// or synchronize other double buffered state, then these operations should be done before the call to `present`.
pub fn present(mut self) {
self.presented = true;
DynContext::surface_present(
&*self.texture.context,
// This call to as_ref is essential because we want the DynContext implementation to see the inner
// value of the Box (T::SurfaceOutputDetail), not the Box itself.
self.detail.as_ref(),
);
self.detail.present();
}
}
impl Drop for SurfaceTexture {
fn drop(&mut self) {
if !self.presented && !thread::panicking() {
DynContext::surface_texture_discard(
&*self.texture.context,
// This call to as_ref is essential because we want the DynContext implementation to see the inner
// value of the Box (T::SurfaceOutputDetail), not the Box itself.
self.detail.as_ref(),
);
self.detail.texture_discard();
}
}
}

View File

@ -1,6 +1,3 @@
use std::{sync::Arc, thread};
use crate::context::DynContext;
use crate::*;
/// Handle to a texture on the GPU.
@ -10,14 +7,13 @@ use crate::*;
/// Corresponds to [WebGPU `GPUTexture`](https://gpuweb.github.io/gpuweb/#texture-interface).
#[derive(Debug)]
pub struct Texture {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchTexture,
pub(crate) descriptor: TextureDescriptor<'static>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Texture: Send, Sync);
super::impl_partialeq_eq_hash!(Texture);
crate::cmp::impl_eq_ord_hash_proxy!(Texture => .inner);
impl Texture {
/// Returns the inner hal Texture using a callback. The hal texture will be `None` if the
@ -31,16 +27,10 @@ impl Texture {
&self,
hal_texture_callback: F,
) -> R {
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
if let Some(tex) = self.inner.as_core_opt() {
unsafe {
ctx.texture_as_hal::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_texture_callback,
)
tex.context
.texture_as_hal::<A, F, R>(tex, hal_texture_callback)
}
} else {
hal_texture_callback(None)
@ -49,16 +39,14 @@ impl Texture {
/// Creates a view of this texture.
pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView {
let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc);
TextureView {
context: Arc::clone(&self.context),
data,
}
let view = self.inner.create_view(desc);
TextureView { inner: view }
}
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::texture_destroy(&*self.context, self.data.as_ref());
self.inner.destroy();
}
/// Make an `TexelCopyTextureInfo` representing the whole texture.
@ -135,14 +123,6 @@ impl Texture {
}
}
impl Drop for Texture {
fn drop(&mut self) {
if !thread::panicking() {
self.context.texture_drop(self.data.as_ref());
}
}
}
/// Describes a [`Texture`].
///
/// For use with [`Device::create_texture`].

View File

@ -1,5 +1,3 @@
use std::{sync::Arc, thread};
use crate::*;
/// Handle to a texture view.
@ -10,13 +8,12 @@ use crate::*;
/// Corresponds to [WebGPU `GPUTextureView`](https://gpuweb.github.io/gpuweb/#gputextureview).
#[derive(Debug)]
pub struct TextureView {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchTextureView,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(TextureView: Send, Sync);
super::impl_partialeq_eq_hash!(TextureView);
crate::cmp::impl_eq_ord_hash_proxy!(TextureView => .inner);
impl TextureView {
/// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the
@ -30,16 +27,11 @@ impl TextureView {
&self,
hal_texture_view_callback: F,
) -> R {
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
if let Some(core_view) = self.inner.as_core_opt() {
unsafe {
ctx.texture_view_as_hal::<A, F, R>(
crate::context::downcast_ref(self.data.as_ref()),
hal_texture_view_callback,
)
core_view
.context
.texture_view_as_hal::<A, F, R>(core_view, hal_texture_view_callback)
}
} else {
hal_texture_view_callback(None)
@ -47,14 +39,6 @@ impl TextureView {
}
}
impl Drop for TextureView {
fn drop(&mut self) {
if !thread::panicking() {
self.context.texture_view_drop(self.data.as_ref());
}
}
}
/// Describes a [`TextureView`].
///
/// For use with [`Texture::create_view`].

View File

@ -1,9 +1,6 @@
use crate::api::blas::{ContextTlasInstance, DynContextTlasInstance, TlasInstance};
use crate::context::{Context, DynContext};
use crate::{BindingResource, Buffer, Data, Label, C};
use crate::{api::blas::TlasInstance, dispatch};
use crate::{BindingResource, Buffer, Label};
use std::ops::{Index, IndexMut, Range};
use std::sync::Arc;
use std::thread;
use wgt::WasmNotSendSync;
/// Descriptor to create top level acceleration structures.
@ -21,24 +18,17 @@ static_assertions::assert_impl_all!(CreateTlasDescriptor<'_>: Send, Sync);
///
/// [TLAS instances]: TlasInstance
pub struct Tlas {
pub(crate) context: Arc<C>,
pub(crate) data: Box<Data>,
pub(crate) inner: dispatch::DispatchTlas,
pub(crate) max_instances: u32,
}
static_assertions::assert_impl_all!(Tlas: WasmNotSendSync);
crate::cmp::impl_eq_ord_hash_proxy!(Tlas => .inner);
impl Tlas {
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::tlas_destroy(&*self.context, self.data.as_ref());
}
}
impl Drop for Tlas {
fn drop(&mut self) {
if !thread::panicking() {
self.context.tlas_drop(self.data.as_ref());
}
self.inner.destroy();
}
}
@ -168,31 +158,3 @@ impl IndexMut<Range<usize>> for TlasPackage {
idx
}
}
pub(crate) struct DynContextTlasBuildEntry<'a> {
pub(crate) tlas_data: &'a Data,
pub(crate) instance_buffer_data: &'a Data,
pub(crate) instance_count: u32,
}
pub(crate) struct DynContextTlasPackage<'a> {
pub(crate) tlas_data: &'a Data,
pub(crate) instances: Box<dyn Iterator<Item = Option<DynContextTlasInstance<'a>>> + 'a>,
pub(crate) lowest_unmodified: u32,
}
/// Context version see [TlasBuildEntry].
#[allow(dead_code)]
pub struct ContextTlasBuildEntry<'a, T: Context> {
pub(crate) tlas_data: &'a T::TlasData,
pub(crate) instance_buffer_data: &'a T::BufferData,
pub(crate) instance_count: u32,
}
/// Context version see [TlasPackage].
#[allow(dead_code)]
pub struct ContextTlasPackage<'a, T: Context> {
pub(crate) tlas_data: &'a T::TlasData,
pub(crate) instances: Box<dyn Iterator<Item = Option<ContextTlasInstance<'a, T>>> + 'a>,
pub(crate) lowest_unmodified: u32,
}

View File

@ -1,10 +1,10 @@
#[cfg(webgpu)]
mod webgpu;
pub mod webgpu;
#[cfg(webgpu)]
pub(crate) use webgpu::{get_browser_gpu_property, ContextWebGpu};
#[cfg(wgpu_core)]
mod wgpu_core;
pub mod wgpu_core;
#[cfg(wgpu_core)]
pub(crate) use wgpu_core::ContextWgpuCore;

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ use std::ops::{Deref, DerefMut};
use wasm_bindgen::JsValue;
/// Derefs to a [`JsValue`] that's known not to be `undefined` or `null`.
#[derive(Debug)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DefinedNonNullJsValue<T>(T);
impl<T> DefinedNonNullJsValue<T>

File diff suppressed because it is too large Load Diff

107
wgpu/src/cmp.rs Normal file
View File

@ -0,0 +1,107 @@
//! We need to impl PartialEq, Eq, PartialOrd, Ord, and Hash for all handle types in wgpu.
//!
//! For types that have some already-unique property, we can use that property to implement these traits.
//!
//! For types (like WebGPU) that don't have such a property, we generate an identifier and use that.
use std::{
num::NonZeroU64,
sync::atomic::{AtomicU64, Ordering},
};
static NEXT_ID: AtomicU64 = AtomicU64::new(1);
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Identifier {
inner: NonZeroU64,
}
impl Identifier {
pub fn create() -> Self {
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
// Safety: Will take 7000+ years of constant incrementing to overflow. It's fine.
let inner = unsafe { NonZeroU64::new_unchecked(id) };
Self { inner }
}
}
/// Implements PartialEq, Eq, PartialOrd, Ord, and Hash for a type by proxying the operations to a single field.
///
/// ```ignore
/// impl_eq_ord_hash_proxy!(MyType => .field);
/// ```
macro_rules! impl_eq_ord_hash_proxy {
($type:ty => $($access:tt)*) => {
impl PartialEq for $type {
fn eq(&self, other: &Self) -> bool {
self $($access)* == other $($access)*
}
}
impl Eq for $type {}
impl PartialOrd for $type {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for $type {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self $($access)*.cmp(&other $($access)*)
}
}
impl std::hash::Hash for $type {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self $($access)*.hash(state)
}
}
};
}
/// Implements PartialEq, Eq, PartialOrd, Ord, and Hash for a type by comparing the addresses of the Arcs.
///
/// ```ignore
/// impl_eq_ord_hash_arc_address!(MyType => .field);
/// ```
#[cfg_attr(not(wgpu_core), allow(unused_macros))]
macro_rules! impl_eq_ord_hash_arc_address {
($type:ty => $($access:tt)*) => {
impl PartialEq for $type {
fn eq(&self, other: &Self) -> bool {
let address_left = std::sync::Arc::as_ptr(&self $($access)*);
let address_right = std::sync::Arc::as_ptr(&other $($access)*);
address_left == address_right
}
}
impl Eq for $type {}
impl PartialOrd for $type {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for $type {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let address_left = std::sync::Arc::as_ptr(&self $($access)*);
let address_right = std::sync::Arc::as_ptr(&other $($access)*);
address_left.cmp(&address_right)
}
}
impl std::hash::Hash for $type {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let address = std::sync::Arc::as_ptr(&self $($access)*);
address.hash(state)
}
}
};
}
#[cfg_attr(not(wgpu_core), allow(unused_imports))]
pub(crate) use {impl_eq_ord_hash_arc_address, impl_eq_ord_hash_proxy};

File diff suppressed because it is too large Load Diff

735
wgpu/src/dispatch.rs Normal file
View File

@ -0,0 +1,735 @@
//! Infrastructure for dispatching calls to the appropriate "backend". The "backends" are:
//!
//! - `wgpu_core`: An implementation of the the wgpu api on top of various native graphics APIs.
//! - `webgpu`: An implementation of the wgpu api which calls WebGPU directly.
//!
//! The interface traits are all object safe and listed in the `InterfaceTypes` trait.
//!
//! The method for dispatching should optimize well if only one backend is compiled in,
//! as-if there was no dispatching at all.
#![allow(drop_bounds)] // This exists to remind implementors to impl drop.
#![allow(clippy::too_many_arguments)] // It's fine.
use crate::{WasmNotSend, WasmNotSendSync};
use std::{any::Any, fmt::Debug, future::Future, hash::Hash, ops::Range, pin::Pin};
use crate::backend;
/// Create a single trait with the given supertraits and a blanket impl for all types that implement them.
///
/// This is useful for creating a trait alias as a shorthand.
macro_rules! trait_alias {
($name:ident: $($bound:tt)+) => {
pub trait $name: $($bound)+ {}
impl<T: $($bound)+> $name for T {}
};
}
// Various return futures in the API.
trait_alias!(RequestAdapterFuture: Future<Output = Option<DispatchAdapter>> + WasmNotSend + 'static);
trait_alias!(RequestDeviceFuture: Future<Output = Result<(DispatchDevice, DispatchQueue), crate::RequestDeviceError>> + WasmNotSend + 'static);
trait_alias!(PopErrorScopeFuture: Future<Output = Option<crate::Error>> + WasmNotSend + 'static);
trait_alias!(ShaderCompilationInfoFuture: Future<Output = crate::CompilationInfo> + WasmNotSend + 'static);
// We can't use trait aliases here, as you can't convert from a dyn Trait to dyn Supertrait _yet_.
#[cfg(send_sync)]
pub type BoxDeviceLostCallback = Box<dyn FnOnce(crate::DeviceLostReason, String) + Send + 'static>;
#[cfg(not(send_sync))]
pub type BoxDeviceLostCallback = Box<dyn FnOnce(crate::DeviceLostReason, String) + 'static>;
#[cfg(send_sync)]
pub type BoxSubmittedWorkDoneCallback = Box<dyn FnOnce() + Send + 'static>;
#[cfg(not(send_sync))]
pub type BoxSubmittedWorkDoneCallback = Box<dyn FnOnce() + 'static>;
#[cfg(send_sync)]
pub type BufferMapCallback = Box<dyn FnOnce(Result<(), crate::BufferAsyncError>) + Send + 'static>;
#[cfg(not(send_sync))]
pub type BufferMapCallback = Box<dyn FnOnce(Result<(), crate::BufferAsyncError>) + 'static>;
// Common traits on all the interface traits
trait_alias!(CommonTraits: Any + Debug + WasmNotSendSync);
// Non-object-safe traits that are added as a bound on InterfaceTypes.
trait_alias!(ComparisonTraits: PartialEq + Eq + PartialOrd + Ord + Hash);
/// Types that represent a "Backend" for the wgpu API.
pub trait InterfaceTypes {
type Instance: InstanceInterface + ComparisonTraits;
type Adapter: AdapterInterface + ComparisonTraits;
type Device: DeviceInterface + ComparisonTraits;
type Queue: QueueInterface + ComparisonTraits;
type ShaderModule: ShaderModuleInterface + ComparisonTraits;
type BindGroupLayout: BindGroupLayoutInterface + ComparisonTraits;
type BindGroup: BindGroupInterface + ComparisonTraits;
type TextureView: TextureViewInterface + ComparisonTraits;
type Sampler: SamplerInterface + ComparisonTraits;
type Buffer: BufferInterface + ComparisonTraits;
type Texture: TextureInterface + ComparisonTraits;
type Blas: BlasInterface + ComparisonTraits;
type Tlas: TlasInterface + ComparisonTraits;
type QuerySet: QuerySetInterface + ComparisonTraits;
type PipelineLayout: PipelineLayoutInterface + ComparisonTraits;
type RenderPipeline: RenderPipelineInterface + ComparisonTraits;
type ComputePipeline: ComputePipelineInterface + ComparisonTraits;
type PipelineCache: PipelineCacheInterface + ComparisonTraits;
type CommandEncoder: CommandEncoderInterface + ComparisonTraits;
type ComputePass: ComputePassInterface + ComparisonTraits;
type RenderPass: RenderPassInterface + ComparisonTraits;
type CommandBuffer: CommandBufferInterface + ComparisonTraits;
type RenderBundleEncoder: RenderBundleEncoderInterface + ComparisonTraits;
type RenderBundle: RenderBundleInterface + ComparisonTraits;
type Surface: SurfaceInterface + ComparisonTraits;
type SurfaceOutputDetail: SurfaceOutputDetailInterface + ComparisonTraits;
type QueueWriteBuffer: QueueWriteBufferInterface + ComparisonTraits;
type BufferMappedRange: BufferMappedRangeInterface + ComparisonTraits;
}
pub trait InstanceInterface: CommonTraits {
fn new(desc: crate::InstanceDescriptor) -> Self
where
Self: Sized;
unsafe fn create_surface(
&self,
target: crate::SurfaceTargetUnsafe,
) -> Result<DispatchSurface, crate::CreateSurfaceError>;
fn request_adapter(
&self,
options: &crate::RequestAdapterOptions<'_, '_>,
) -> Pin<Box<dyn RequestAdapterFuture>>;
fn poll_all_devices(&self, force_wait: bool) -> bool;
}
pub trait AdapterInterface: CommonTraits {
fn request_device(
&self,
desc: &crate::DeviceDescriptor<'_>,
trace_dir: Option<&std::path::Path>,
) -> Pin<Box<dyn RequestDeviceFuture>>;
fn is_surface_supported(&self, surface: &DispatchSurface) -> bool;
fn features(&self) -> crate::Features;
fn limits(&self) -> crate::Limits;
fn downlevel_capabilities(&self) -> crate::DownlevelCapabilities;
fn get_info(&self) -> crate::AdapterInfo;
fn get_texture_format_features(
&self,
format: crate::TextureFormat,
) -> crate::TextureFormatFeatures;
fn get_presentation_timestamp(&self) -> crate::PresentationTimestamp;
}
pub trait DeviceInterface: CommonTraits {
fn features(&self) -> crate::Features;
fn limits(&self) -> crate::Limits;
fn create_shader_module(
&self,
desc: crate::ShaderModuleDescriptor<'_>,
shader_bound_checks: wgt::ShaderBoundChecks,
) -> DispatchShaderModule;
unsafe fn create_shader_module_spirv(
&self,
desc: &crate::ShaderModuleDescriptorSpirV<'_>,
) -> DispatchShaderModule;
fn create_bind_group_layout(
&self,
desc: &crate::BindGroupLayoutDescriptor<'_>,
) -> DispatchBindGroupLayout;
fn create_bind_group(&self, desc: &crate::BindGroupDescriptor<'_>) -> DispatchBindGroup;
fn create_pipeline_layout(
&self,
desc: &crate::PipelineLayoutDescriptor<'_>,
) -> DispatchPipelineLayout;
fn create_render_pipeline(
&self,
desc: &crate::RenderPipelineDescriptor<'_>,
) -> DispatchRenderPipeline;
fn create_compute_pipeline(
&self,
desc: &crate::ComputePipelineDescriptor<'_>,
) -> DispatchComputePipeline;
unsafe fn create_pipeline_cache(
&self,
desc: &crate::PipelineCacheDescriptor<'_>,
) -> DispatchPipelineCache;
fn create_buffer(&self, desc: &crate::BufferDescriptor<'_>) -> DispatchBuffer;
fn create_texture(&self, desc: &crate::TextureDescriptor<'_>) -> DispatchTexture;
fn create_blas(
&self,
desc: &crate::CreateBlasDescriptor<'_>,
sizes: crate::BlasGeometrySizeDescriptors,
) -> (Option<u64>, DispatchBlas);
fn create_tlas(&self, desc: &crate::CreateTlasDescriptor<'_>) -> DispatchTlas;
fn create_sampler(&self, desc: &crate::SamplerDescriptor<'_>) -> DispatchSampler;
fn create_query_set(&self, desc: &crate::QuerySetDescriptor<'_>) -> DispatchQuerySet;
fn create_command_encoder(
&self,
desc: &crate::CommandEncoderDescriptor<'_>,
) -> DispatchCommandEncoder;
fn create_render_bundle_encoder(
&self,
desc: &crate::RenderBundleEncoderDescriptor<'_>,
) -> DispatchRenderBundleEncoder;
fn set_device_lost_callback(&self, device_lost_callback: BoxDeviceLostCallback);
fn on_uncaptured_error(&self, handler: Box<dyn crate::UncapturedErrorHandler>);
fn push_error_scope(&self, filter: crate::ErrorFilter);
fn pop_error_scope(&self) -> Pin<Box<dyn PopErrorScopeFuture>>;
fn start_capture(&self);
fn stop_capture(&self);
fn poll(&self, maintain: crate::Maintain) -> crate::MaintainResult;
fn get_internal_counters(&self) -> crate::InternalCounters;
fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport>;
fn destroy(&self);
}
pub trait QueueInterface: CommonTraits {
fn write_buffer(&self, buffer: &DispatchBuffer, offset: crate::BufferAddress, data: &[u8]);
fn create_staging_buffer(&self, size: crate::BufferSize) -> Option<DispatchQueueWriteBuffer>;
fn validate_write_buffer(
&self,
buffer: &DispatchBuffer,
offset: wgt::BufferAddress,
size: wgt::BufferSize,
) -> Option<()>;
fn write_staging_buffer(
&self,
buffer: &DispatchBuffer,
offset: crate::BufferAddress,
staging_buffer: &DispatchQueueWriteBuffer,
);
fn write_texture(
&self,
texture: crate::TexelCopyTextureInfo<'_>,
data: &[u8],
data_layout: crate::TexelCopyBufferLayout,
size: crate::Extent3d,
);
#[cfg(any(webgpu, webgl))]
fn copy_external_image_to_texture(
&self,
source: &wgt::CopyExternalImageSourceInfo,
dest: wgt::CopyExternalImageDestInfo<&crate::api::Texture>,
size: crate::Extent3d,
);
fn submit(&self, command_buffers: &mut dyn Iterator<Item = DispatchCommandBuffer>) -> u64;
fn get_timestamp_period(&self) -> f32;
fn on_submitted_work_done(&self, callback: BoxSubmittedWorkDoneCallback);
}
pub trait ShaderModuleInterface: CommonTraits {
fn get_compilation_info(&self) -> Pin<Box<dyn ShaderCompilationInfoFuture>>;
}
pub trait BindGroupLayoutInterface: CommonTraits {}
pub trait BindGroupInterface: CommonTraits {}
pub trait TextureViewInterface: CommonTraits {}
pub trait SamplerInterface: CommonTraits {}
pub trait BufferInterface: CommonTraits {
fn map_async(
&self,
mode: crate::MapMode,
range: Range<crate::BufferAddress>,
callback: BufferMapCallback,
);
fn get_mapped_range(&self, sub_range: Range<crate::BufferAddress>)
-> DispatchBufferMappedRange;
#[cfg(webgpu)]
fn get_mapped_range_as_array_buffer(
&self,
sub_range: Range<wgt::BufferAddress>,
) -> Option<js_sys::ArrayBuffer>;
fn unmap(&self);
fn destroy(&self);
}
pub trait TextureInterface: CommonTraits {
fn create_view(&self, desc: &crate::TextureViewDescriptor<'_>) -> DispatchTextureView;
fn destroy(&self);
}
pub trait BlasInterface: CommonTraits {
fn destroy(&self);
}
pub trait TlasInterface: CommonTraits {
fn destroy(&self);
}
pub trait QuerySetInterface: CommonTraits {}
pub trait PipelineLayoutInterface: CommonTraits {}
pub trait RenderPipelineInterface: CommonTraits {
fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout;
}
pub trait ComputePipelineInterface: CommonTraits {
fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout;
}
pub trait PipelineCacheInterface: CommonTraits {
fn get_data(&self) -> Option<Vec<u8>>;
}
pub trait CommandEncoderInterface: CommonTraits {
fn copy_buffer_to_buffer(
&self,
source: &DispatchBuffer,
source_offset: crate::BufferAddress,
destination: &DispatchBuffer,
destination_offset: crate::BufferAddress,
copy_size: crate::BufferAddress,
);
fn copy_buffer_to_texture(
&self,
source: crate::TexelCopyBufferInfo<'_>,
destination: crate::TexelCopyTextureInfo<'_>,
copy_size: crate::Extent3d,
);
fn copy_texture_to_buffer(
&self,
source: crate::TexelCopyTextureInfo<'_>,
destination: crate::TexelCopyBufferInfo<'_>,
copy_size: crate::Extent3d,
);
fn copy_texture_to_texture(
&self,
source: crate::TexelCopyTextureInfo<'_>,
destination: crate::TexelCopyTextureInfo<'_>,
copy_size: crate::Extent3d,
);
fn begin_compute_pass(&self, desc: &crate::ComputePassDescriptor<'_>) -> DispatchComputePass;
fn begin_render_pass(&self, desc: &crate::RenderPassDescriptor<'_>) -> DispatchRenderPass;
fn finish(&mut self) -> DispatchCommandBuffer;
fn clear_texture(
&self,
texture: &DispatchTexture,
subresource_range: &crate::ImageSubresourceRange,
);
fn clear_buffer(
&self,
buffer: &DispatchBuffer,
offset: crate::BufferAddress,
size: Option<crate::BufferAddress>,
);
fn insert_debug_marker(&self, label: &str);
fn push_debug_group(&self, label: &str);
fn pop_debug_group(&self);
fn write_timestamp(&self, query_set: &DispatchQuerySet, query_index: u32);
fn resolve_query_set(
&self,
query_set: &DispatchQuerySet,
first_query: u32,
query_count: u32,
destination: &DispatchBuffer,
destination_offset: crate::BufferAddress,
);
fn build_acceleration_structures_unsafe_tlas<'a>(
&self,
blas: &mut dyn Iterator<Item = &'a crate::BlasBuildEntry<'a>>,
tlas: &mut dyn Iterator<Item = &'a crate::TlasBuildEntry<'a>>,
);
fn build_acceleration_structures<'a>(
&self,
blas: &mut dyn Iterator<Item = &'a crate::BlasBuildEntry<'a>>,
tlas: &mut dyn Iterator<Item = &'a crate::TlasPackage>,
);
}
pub trait ComputePassInterface: CommonTraits {
fn set_pipeline(&mut self, pipeline: &DispatchComputePipeline);
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&DispatchBindGroup>,
offsets: &[crate::DynamicOffset],
);
fn set_push_constants(&mut self, offset: u32, data: &[u8]);
fn insert_debug_marker(&mut self, label: &str);
fn push_debug_group(&mut self, group_label: &str);
fn pop_debug_group(&mut self);
fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32);
fn begin_pipeline_statistics_query(&mut self, query_set: &DispatchQuerySet, query_index: u32);
fn end_pipeline_statistics_query(&mut self);
fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32);
fn dispatch_workgroups_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
);
fn end(&mut self);
}
pub trait RenderPassInterface: CommonTraits {
fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline);
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&DispatchBindGroup>,
offsets: &[crate::DynamicOffset],
);
fn set_index_buffer(
&mut self,
buffer: &DispatchBuffer,
index_format: crate::IndexFormat,
offset: crate::BufferAddress,
size: Option<crate::BufferSize>,
);
fn set_vertex_buffer(
&mut self,
slot: u32,
buffer: &DispatchBuffer,
offset: crate::BufferAddress,
size: Option<crate::BufferSize>,
);
fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]);
fn set_blend_constant(&mut self, color: crate::Color);
fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32);
fn set_viewport(
&mut self,
x: f32,
y: f32,
width: f32,
height: f32,
min_depth: f32,
max_depth: f32,
);
fn set_stencil_reference(&mut self, reference: u32);
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>);
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>);
fn draw_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
);
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
);
fn multi_draw_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
count: u32,
);
fn multi_draw_indexed_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
count: u32,
);
fn multi_draw_indirect_count(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
count_buffer: &DispatchBuffer,
count_buffer_offset: crate::BufferAddress,
max_count: u32,
);
fn multi_draw_indexed_indirect_count(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
count_buffer: &DispatchBuffer,
count_buffer_offset: crate::BufferAddress,
max_count: u32,
);
fn insert_debug_marker(&mut self, label: &str);
fn push_debug_group(&mut self, group_label: &str);
fn pop_debug_group(&mut self);
fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32);
fn begin_occlusion_query(&mut self, query_index: u32);
fn end_occlusion_query(&mut self);
fn begin_pipeline_statistics_query(&mut self, query_set: &DispatchQuerySet, query_index: u32);
fn end_pipeline_statistics_query(&mut self);
fn execute_bundles(&mut self, render_bundles: &mut dyn Iterator<Item = &DispatchRenderBundle>);
fn end(&mut self);
}
pub trait RenderBundleEncoderInterface: CommonTraits {
fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline);
fn set_bind_group(
&mut self,
index: u32,
bind_group: Option<&DispatchBindGroup>,
offsets: &[crate::DynamicOffset],
);
fn set_index_buffer(
&mut self,
buffer: &DispatchBuffer,
index_format: crate::IndexFormat,
offset: crate::BufferAddress,
size: Option<crate::BufferSize>,
);
fn set_vertex_buffer(
&mut self,
slot: u32,
buffer: &DispatchBuffer,
offset: crate::BufferAddress,
size: Option<crate::BufferSize>,
);
fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]);
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>);
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>);
fn draw_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
);
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &DispatchBuffer,
indirect_offset: crate::BufferAddress,
);
fn finish(self, desc: &crate::RenderBundleDescriptor<'_>) -> DispatchRenderBundle
where
Self: Sized;
}
pub trait CommandBufferInterface: CommonTraits {}
pub trait RenderBundleInterface: CommonTraits {}
pub trait SurfaceInterface: CommonTraits {
fn get_capabilities(&self, adapter: &DispatchAdapter) -> wgt::SurfaceCapabilities;
fn configure(&self, device: &DispatchDevice, config: &crate::SurfaceConfiguration);
fn get_current_texture(
&self,
) -> (
Option<DispatchTexture>,
crate::SurfaceStatus,
DispatchSurfaceOutputDetail,
);
}
pub trait SurfaceOutputDetailInterface: CommonTraits {
fn present(&self);
fn texture_discard(&self);
}
pub trait QueueWriteBufferInterface: CommonTraits {
fn slice(&self) -> &[u8];
fn slice_mut(&mut self) -> &mut [u8];
}
pub trait BufferMappedRangeInterface: CommonTraits {
fn slice(&self) -> &[u8];
fn slice_mut(&mut self) -> &mut [u8];
}
/// Generates Dispatch types for each of the interfaces. Each type is a wrapper around the
/// wgpu_core and webgpu types, and derefs to the appropriate interface trait-object.
///
/// When there is only one backend, deviritualization fires and all dispatches should turn into
/// direct calls. If there are multiple, some dispatching will occur.
///
/// This also provides `as_*` methods so that the backend implementations can dereference other
/// arguments. These are similarly free when there is only one backend.
///
/// In the future, we may want a truly generic backend, which could be extended from this enum.
macro_rules! dispatch_types {
(
wgpu_core = $wgpu_core_context:ty;
webgpu = $webgpu_context:ty;
{$(
type $name:ident = InterfaceTypes::$subtype:ident: $trait:ident;
)*}
) => {
$(
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum $name {
#[cfg(wgpu_core)]
Core(<$wgpu_core_context as InterfaceTypes>::$subtype),
#[cfg(webgpu)]
WebGPU(<$webgpu_context as InterfaceTypes>::$subtype),
}
impl $name {
#[cfg(wgpu_core)]
#[inline]
#[allow(unused)]
pub fn as_core(&self) -> &<$wgpu_core_context as InterfaceTypes>::$subtype {
match self {
Self::Core(value) => value,
_ => panic!(concat!(stringify!($name), " is not core")),
}
}
#[cfg(wgpu_core)]
#[inline]
#[allow(unused)]
pub fn as_core_mut(&mut self) -> &mut <$wgpu_core_context as InterfaceTypes>::$subtype {
match self {
Self::Core(value) => value,
_ => panic!(concat!(stringify!($name), " is not core")),
}
}
#[cfg(wgpu_core)]
#[inline]
#[allow(unused)]
pub fn as_core_opt(&self) -> Option<&<$wgpu_core_context as InterfaceTypes>::$subtype> {
match self {
Self::Core(value) => Some(value),
_ => None,
}
}
#[cfg(wgpu_core)]
#[inline]
#[allow(unused)]
pub fn as_core_mut_opt(&mut self) -> Option<&mut <$wgpu_core_context as InterfaceTypes>::$subtype> {
match self {
Self::Core(value) => Some(value),
_ => None,
}
}
#[cfg(webgpu)]
#[inline]
#[allow(unused)]
pub fn as_webgpu(&self) -> &<$webgpu_context as InterfaceTypes>::$subtype {
match self {
Self::WebGPU(value) => value,
_ => panic!(concat!(stringify!($name), " is not webgpu")),
}
}
#[cfg(webgpu)]
#[inline]
#[allow(unused)]
pub fn as_webgpu_mut(&mut self) -> &mut <$webgpu_context as InterfaceTypes>::$subtype {
match self {
Self::WebGPU(value) => value,
_ => panic!(concat!(stringify!($name), " is not webgpu")),
}
}
#[cfg(webgpu)]
#[inline]
#[allow(unused)]
pub fn as_webgpu_opt(&self) -> Option<&<$webgpu_context as InterfaceTypes>::$subtype> {
match self {
Self::WebGPU(value) => Some(value),
_ => None,
}
}
#[cfg(webgpu)]
#[inline]
#[allow(unused)]
pub fn as_webgpu_mut_opt(&mut self) -> Option<&mut <$webgpu_context as InterfaceTypes>::$subtype> {
match self {
Self::WebGPU(value) => Some(value),
_ => None,
}
}
}
#[cfg(wgpu_core)]
impl From<<$wgpu_core_context as InterfaceTypes>::$subtype> for $name {
#[inline]
fn from(value: <$wgpu_core_context as InterfaceTypes>::$subtype) -> Self {
Self::Core(value)
}
}
#[cfg(webgpu)]
impl From<<$webgpu_context as InterfaceTypes>::$subtype> for $name {
#[inline]
fn from(value: <$webgpu_context as InterfaceTypes>::$subtype) -> Self {
Self::WebGPU(value)
}
}
impl std::ops::Deref for $name {
type Target = dyn $trait;
#[inline]
fn deref(&self) -> &Self::Target {
match self {
#[cfg(wgpu_core)]
Self::Core(value) => value,
#[cfg(webgpu)]
Self::WebGPU(value) => value,
}
}
}
impl std::ops::DerefMut for $name {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
#[cfg(wgpu_core)]
Self::Core(value) => value,
#[cfg(webgpu)]
Self::WebGPU(value) => value,
}
}
}
)*
};
}
dispatch_types! {
wgpu_core = backend::ContextWgpuCore;
webgpu = backend::ContextWebGpu;
{
type DispatchInstance = InterfaceTypes::Instance: InstanceInterface;
type DispatchAdapter = InterfaceTypes::Adapter: AdapterInterface;
type DispatchDevice = InterfaceTypes::Device: DeviceInterface;
type DispatchQueue = InterfaceTypes::Queue: QueueInterface;
type DispatchShaderModule = InterfaceTypes::ShaderModule: ShaderModuleInterface;
type DispatchBindGroupLayout = InterfaceTypes::BindGroupLayout: BindGroupLayoutInterface;
type DispatchBindGroup = InterfaceTypes::BindGroup: BindGroupInterface;
type DispatchTextureView = InterfaceTypes::TextureView: TextureViewInterface;
type DispatchSampler = InterfaceTypes::Sampler: SamplerInterface;
type DispatchBuffer = InterfaceTypes::Buffer: BufferInterface;
type DispatchTexture = InterfaceTypes::Texture: TextureInterface;
type DispatchBlas = InterfaceTypes::Blas: BlasInterface;
type DispatchTlas = InterfaceTypes::Tlas: TlasInterface;
type DispatchQuerySet = InterfaceTypes::QuerySet: QuerySetInterface;
type DispatchPipelineLayout = InterfaceTypes::PipelineLayout: PipelineLayoutInterface;
type DispatchRenderPipeline = InterfaceTypes::RenderPipeline: RenderPipelineInterface;
type DispatchComputePipeline = InterfaceTypes::ComputePipeline: ComputePipelineInterface;
type DispatchPipelineCache = InterfaceTypes::PipelineCache: PipelineCacheInterface;
type DispatchCommandEncoder = InterfaceTypes::CommandEncoder: CommandEncoderInterface;
type DispatchComputePass = InterfaceTypes::ComputePass: ComputePassInterface;
type DispatchRenderPass = InterfaceTypes::RenderPass: RenderPassInterface;
type DispatchCommandBuffer = InterfaceTypes::CommandBuffer: CommandBufferInterface;
type DispatchRenderBundleEncoder = InterfaceTypes::RenderBundleEncoder: RenderBundleEncoderInterface;
type DispatchRenderBundle = InterfaceTypes::RenderBundle: RenderBundleInterface;
type DispatchSurface = InterfaceTypes::Surface: SurfaceInterface;
type DispatchSurfaceOutputDetail = InterfaceTypes::SurfaceOutputDetail: SurfaceOutputDetailInterface;
type DispatchQueueWriteBuffer = InterfaceTypes::QueueWriteBuffer: QueueWriteBufferInterface;
type DispatchBufferMappedRange = InterfaceTypes::BufferMappedRange: BufferMappedRangeInterface;
}
}

View File

@ -17,6 +17,7 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/trunk/logo.png")]
#![warn(missing_docs, rust_2018_idioms, unsafe_op_in_unsafe_fn)]
#![allow(clippy::arc_with_non_send_sync)]
//
//
@ -26,9 +27,9 @@
mod api;
mod backend;
mod context;
mod cmp;
mod dispatch;
mod macros;
mod send_sync;
pub mod util;
//
@ -37,16 +38,6 @@ pub mod util;
//
//
#[allow(unused_imports)] // WebGPU needs this
use context::Context;
use send_sync::*;
type C = dyn context::DynContext;
#[cfg(send_sync)]
type Data = dyn std::any::Any + Send + Sync;
#[cfg(not(send_sync))]
type Data = dyn std::any::Any;
//
//
// Public re-exports

View File

@ -1,27 +0,0 @@
use std::any::Any;
use std::fmt;
use wgt::WasmNotSendSync;
pub trait AnyWasmNotSendSync: Any + WasmNotSendSync {
fn upcast_any_ref(&self) -> &dyn Any;
}
impl<T: Any + WasmNotSendSync> AnyWasmNotSendSync for T {
#[inline]
fn upcast_any_ref(&self) -> &dyn Any {
self
}
}
impl dyn AnyWasmNotSendSync + 'static {
#[inline]
pub fn downcast_ref<T: 'static>(&self) -> Option<&T> {
self.upcast_any_ref().downcast_ref::<T>()
}
}
impl fmt::Debug for dyn AnyWasmNotSendSync {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Any").finish_non_exhaustive()
}
}

View File

@ -117,7 +117,6 @@ impl StagingBelt {
} else {
let size = self.chunk_size.max(size.get());
Chunk {
#[allow(clippy::arc_with_non_send_sync)] // False positive on emscripten
buffer: Arc::new(device.create_buffer(&BufferDescriptor {
label: Some("(wgpu internal) StagingBelt staging buffer"),
size,

View File

@ -23,6 +23,8 @@ pub use wgt::{
math::*, DispatchIndirectArgs, DrawIndexedIndirectArgs, DrawIndirectArgs, TextureDataOrder,
};
use crate::dispatch;
/// Treat the given byte slice as a SPIR-V module.
///
/// # Panic
@ -86,7 +88,7 @@ pub fn make_spirv_raw(data: &[u8]) -> Cow<'_, [u32]> {
/// CPU accessible buffer used to download data back from the GPU.
pub struct DownloadBuffer {
_gpu_buffer: Arc<super::Buffer>,
mapped_range: Box<dyn crate::context::BufferMappedRange>,
mapped_range: dispatch::DispatchBufferMappedRange,
}
impl DownloadBuffer {
@ -102,7 +104,6 @@ impl DownloadBuffer {
None => buffer.buffer.map_context.lock().total_size - buffer.offset,
};
#[allow(clippy::arc_with_non_send_sync)] // False positive on emscripten
let download = Arc::new(device.create_buffer(&super::BufferDescriptor {
size,
usage: super::BufferUsages::COPY_DST | super::BufferUsages::MAP_READ,
@ -125,11 +126,7 @@ impl DownloadBuffer {
return;
}
let mapped_range = crate::context::DynContext::buffer_get_mapped_range(
&*download.context,
download.data.as_ref(),
0..size,
);
let mapped_range = download.inner.get_mapped_range(0..size);
callback(Ok(Self {
_gpu_buffer: download,
mapped_range,