From 338678ad5f66b74d1df1daee6afb028f964244b8 Mon Sep 17 00:00:00 2001 From: Teodor Tanasoaia <28601907+teoxoy@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:00:19 +0200 Subject: [PATCH] Remove IDs from wgpu traits (#6134) Remove `wgpu`'s `.global_id()` getters. Implement `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord` for wgpu resources. --- CHANGELOG.md | 10 + tests/tests/bind_group_layout_dedup.rs | 156 +- wgpu/src/api/adapter.rs | 53 +- wgpu/src/api/bind_group.rs | 14 +- wgpu/src/api/bind_group_layout.rs | 15 +- wgpu/src/api/buffer.rs | 38 +- wgpu/src/api/command_buffer.rs | 7 +- wgpu/src/api/command_encoder.rs | 64 +- wgpu/src/api/compute_pass.rs | 27 +- wgpu/src/api/compute_pipeline.rs | 25 +- wgpu/src/api/device.rs | 150 +- wgpu/src/api/id.rs | 67 - wgpu/src/api/instance.rs | 22 +- wgpu/src/api/mod.rs | 34 +- wgpu/src/api/pipeline_cache.rs | 8 +- wgpu/src/api/pipeline_layout.rs | 15 +- wgpu/src/api/query_set.rs | 14 +- wgpu/src/api/queue.rs | 34 +- wgpu/src/api/render_bundle.rs | 15 +- wgpu/src/api/render_bundle_encoder.rs | 22 +- wgpu/src/api/render_pass.rs | 60 +- wgpu/src/api/render_pipeline.rs | 23 +- wgpu/src/api/sampler.rs | 14 +- wgpu/src/api/shader_module.rs | 18 +- wgpu/src/api/surface.rs | 36 +- wgpu/src/api/texture.rs | 30 +- wgpu/src/api/texture_view.rs | 23 +- wgpu/src/backend/webgpu.rs | 573 ++----- wgpu/src/backend/wgpu_core.rs | 1252 ++++++--------- wgpu/src/context.rs | 2035 +++++------------------- wgpu/src/util/mod.rs | 1 - 31 files changed, 1268 insertions(+), 3587 deletions(-) delete mode 100644 wgpu/src/api/id.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 21ec6bde0..c089dd192 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,16 @@ which we're hoping to build performance improvements upon in the future. By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https://github.com/gfx-rs/wgpu/pull/6099), [#6100](https://github.com/gfx-rs/wgpu/pull/6100). +#### `wgpu`'s resources no longer have `.global_id()` getters + +`wgpu-core`'s internals no longer use nor need IDs and we are moving towards removing IDs +completely. This is a step in that direction. + +Current users of `.global_id()` are encouraged to make use of the `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord` +traits that have now been implemented for `wgpu` resources. + +By @teoxoy [#6134](https://github.com/gfx-rs/wgpu/pull/6134). + ### New Features #### Naga diff --git a/tests/tests/bind_group_layout_dedup.rs b/tests/tests/bind_group_layout_dedup.rs index 32f71b89d..b322b019b 100644 --- a/tests/tests/bind_group_layout_dedup.rs +++ b/tests/tests/bind_group_layout_dedup.rs @@ -31,129 +31,77 @@ static BIND_GROUP_LAYOUT_DEDUPLICATION: GpuTestConfiguration = GpuTestConfigurat .run_async(bgl_dedupe); async fn bgl_dedupe(ctx: TestingContext) { - let entries_1 = &[]; + let entries = &[]; - let entries_2 = &[ENTRY]; - - // Block so we can force all resource to die. - { - let bgl_1a = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); - - let bgl_2 = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_2, - }); - - let bgl_1b = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); - - let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + let bgl_1a = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: None, - layout: &bgl_1a, - entries: &[], + entries, }); - let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + let bgl_1b = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: None, - layout: &bgl_1b, - entries: &[], + entries, }); - let pipeline_layout = ctx - .device - .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: None, - bind_group_layouts: &[&bgl_1b], - push_constant_ranges: &[], - }); + let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bgl_1a, + entries: &[], + }); - let module = ctx - .device - .create_shader_module(wgpu::ShaderModuleDescriptor { - label: None, - source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), - }); + let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bgl_1b, + entries: &[], + }); - let desc = wgpu::ComputePipelineDescriptor { + let pipeline_layout = ctx + .device + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: None, - layout: Some(&pipeline_layout), - module: &module, - entry_point: Some("no_resources"), - compilation_options: Default::default(), - cache: None, - }; - - let pipeline = ctx.device.create_compute_pipeline(&desc); - - let mut encoder = ctx.device.create_command_encoder(&Default::default()); - - let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { - label: None, - timestamp_writes: None, + bind_group_layouts: &[&bgl_1b], + push_constant_ranges: &[], }); - pass.set_bind_group(0, &bg_1b, &[]); - pass.set_pipeline(&pipeline); - pass.dispatch_workgroups(1, 1, 1); + let module = ctx + .device + .create_shader_module(wgpu::ShaderModuleDescriptor { + label: None, + source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), + }); - pass.set_bind_group(0, &bg_1a, &[]); - pass.dispatch_workgroups(1, 1, 1); + let desc = wgpu::ComputePipelineDescriptor { + label: None, + layout: Some(&pipeline_layout), + module: &module, + entry_point: Some("no_resources"), + compilation_options: Default::default(), + cache: None, + }; - drop(pass); + let pipeline = ctx.device.create_compute_pipeline(&desc); - ctx.queue.submit(Some(encoder.finish())); + let mut encoder = ctx.device.create_command_encoder(&Default::default()); - // Abuse the fact that global_id is really just the bitpacked ids when targeting wgpu-core. - if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { - let bgl_1a_idx = bgl_1a.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_1a_idx, 0); - let bgl_2_idx = bgl_2.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_2_idx, 1); - let bgl_1b_idx = bgl_1b.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_1b_idx, 2); - } - } + let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { + label: None, + timestamp_writes: None, + }); - ctx.async_poll(wgpu::Maintain::wait()) - .await - .panic_on_timeout(); + pass.set_bind_group(0, &bg_1b, &[]); + pass.set_pipeline(&pipeline); + pass.dispatch_workgroups(1, 1, 1); - if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { - // Indices are made reusable as soon as the handle is dropped so we keep them around - // for the duration of the loop. - let mut bgls = Vec::new(); - let mut indices = Vec::new(); - // Now all of the BGL ids should be dead, so we should get the same ids again. - for _ in 0..=2 { - let test_bgl = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); + pass.set_bind_group(0, &bg_1a, &[]); + pass.dispatch_workgroups(1, 1, 1); - let test_bgl_idx = test_bgl.global_id().inner() & 0xFFFF_FFFF; - bgls.push(test_bgl); - indices.push(test_bgl_idx); - } - // We don't guarantee that the IDs will appear in the same order. Sort them - // and check that they all appear exactly once. - indices.sort(); - for (i, index) in indices.iter().enumerate() { - assert_eq!(*index, i as u64); - } - } + drop(pass); + + ctx.queue.submit(Some(encoder.finish())); } #[gpu_test] diff --git a/wgpu/src/api/adapter.rs b/wgpu/src/api/adapter.rs index 5f43a461f..d4250a6c6 100644 --- a/wgpu/src/api/adapter.rs +++ b/wgpu/src/api/adapter.rs @@ -1,6 +1,6 @@ use std::{future::Future, sync::Arc, thread}; -use crate::context::{DeviceRequest, DynContext, ObjectId}; +use crate::context::{DeviceRequest, DynContext}; use crate::*; /// Handle to a physical graphics and/or compute device. @@ -14,7 +14,6 @@ use crate::*; #[derive(Debug)] pub struct Adapter { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -23,7 +22,7 @@ static_assertions::assert_impl_all!(Adapter: Send, Sync); impl Drop for Adapter { fn drop(&mut self) { if !thread::panicking() { - self.context.adapter_drop(&self.id, self.data.as_ref()) + self.context.adapter_drop(self.data.as_ref()) } } } @@ -40,14 +39,6 @@ pub type RequestAdapterOptions<'a, 'b> = RequestAdapterOptionsBase<&'a Surface<' static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync); impl Adapter { - /// Returns a globally-unique identifier for this `Adapter`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Requests a connection to a physical device, creating a logical device. /// /// Returns the [`Device`] together with a [`Queue`] that executes command buffers. @@ -80,7 +71,6 @@ impl Adapter { let context = Arc::clone(&self.context); let device = DynContext::adapter_request_device( &*self.context, - &self.id, self.data.as_ref(), desc, trace_path, @@ -88,20 +78,16 @@ impl Adapter { async move { device.await.map( |DeviceRequest { - device_id, device_data, - queue_id, queue_data, }| { ( Device { context: Arc::clone(&context), - id: device_id, data: device_data, }, Queue { context, - id: queue_id, data: queue_data, }, ) @@ -131,18 +117,21 @@ impl Adapter { // Part of the safety requirements is that the device was generated from the same adapter. // Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices. .unwrap() - .create_device_from_hal(&self.id.into(), hal_device, desc, trace_path) + .create_device_from_hal( + crate::context::downcast_ref(&self.data), + hal_device, + desc, + trace_path, + ) } .map(|(device, queue)| { ( Device { context: Arc::clone(&context), - id: device.id().into(), data: Box::new(device), }, Queue { context, - id: queue.id().into(), data: Box::new(queue), }, ) @@ -178,7 +167,12 @@ impl Adapter { .as_any() .downcast_ref::() { - unsafe { ctx.adapter_as_hal::(self.id.into(), hal_adapter_callback) } + unsafe { + ctx.adapter_as_hal::( + crate::context::downcast_ref(&self.data), + hal_adapter_callback, + ) + } } else { hal_adapter_callback(None) } @@ -188,31 +182,29 @@ impl Adapter { pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool { DynContext::adapter_is_surface_supported( &*self.context, - &self.id, self.data.as_ref(), - &surface.id, surface.surface_data.as_ref(), ) } /// The features which can be used to create devices on this adapter. pub fn features(&self) -> Features { - DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_features(&*self.context, self.data.as_ref()) } /// The best limits which can be used to create devices on this adapter. pub fn limits(&self) -> Limits { - DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_limits(&*self.context, self.data.as_ref()) } /// Get info about the adapter itself. pub fn get_info(&self) -> AdapterInfo { - DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_get_info(&*self.context, self.data.as_ref()) } /// Get info about the adapter itself. pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities { - DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref()) } /// Returns the features supported for a given texture format by this adapter. @@ -220,12 +212,7 @@ impl Adapter { /// Note that the WebGPU spec further restricts the available usages/features. /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature. pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures { - DynContext::adapter_get_texture_format_features( - &*self.context, - &self.id, - self.data.as_ref(), - format, - ) + DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format) } /// Generates a timestamp using the clock used by the presentation engine. @@ -250,6 +237,6 @@ impl Adapter { // /// [Instant]: std::time::Instant pub fn get_presentation_timestamp(&self) -> PresentationTimestamp { - DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref()) } } diff --git a/wgpu/src/api/bind_group.rs b/wgpu/src/api/bind_group.rs index 51c1efac7..42a774b29 100644 --- a/wgpu/src/api/bind_group.rs +++ b/wgpu/src/api/bind_group.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a binding group. @@ -14,26 +13,17 @@ use crate::*; #[derive(Debug)] pub struct BindGroup { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroup: Send, Sync); -impl BindGroup { - /// Returns a globally-unique identifier for this `BindGroup`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(BindGroup); impl Drop for BindGroup { fn drop(&mut self) { if !thread::panicking() { - self.context.bind_group_drop(&self.id, self.data.as_ref()); + self.context.bind_group_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/bind_group_layout.rs b/wgpu/src/api/bind_group_layout.rs index 1268c664f..db335689c 100644 --- a/wgpu/src/api/bind_group_layout.rs +++ b/wgpu/src/api/bind_group_layout.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a binding group layout. @@ -17,27 +16,17 @@ use crate::*; #[derive(Debug)] pub struct BindGroupLayout { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync); -impl BindGroupLayout { - /// Returns a globally-unique identifier for this `BindGroupLayout`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(BindGroupLayout); impl Drop for BindGroupLayout { fn drop(&mut self) { if !thread::panicking() { - self.context - .bind_group_layout_drop(&self.id, self.data.as_ref()); + self.context.bind_group_layout_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/buffer.rs b/wgpu/src/api/buffer.rs index 6f5463799..d5687a78d 100644 --- a/wgpu/src/api/buffer.rs +++ b/wgpu/src/api/buffer.rs @@ -7,7 +7,7 @@ use std::{ use parking_lot::Mutex; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Handle to a GPU-accessible buffer. @@ -173,7 +173,6 @@ use crate::*; #[derive(Debug)] pub struct Buffer { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) map_context: Mutex, pub(crate) size: wgt::BufferAddress, @@ -183,15 +182,9 @@ pub struct Buffer { #[cfg(send_sync)] static_assertions::assert_impl_all!(Buffer: Send, Sync); -impl Buffer { - /// Returns a globally-unique identifier for this `Buffer`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(Buffer); +impl Buffer { /// Return the binding view of the entire buffer. pub fn as_entire_binding(&self) -> BindingResource<'_> { BindingResource::Buffer(self.as_entire_buffer_binding()) @@ -217,14 +210,17 @@ impl Buffer { &self, hal_buffer_callback: F, ) -> R { - let id = self.id; - if let Some(ctx) = self .context .as_any() .downcast_ref::() { - unsafe { ctx.buffer_as_hal::(id.into(), hal_buffer_callback) } + unsafe { + ctx.buffer_as_hal::( + crate::context::downcast_ref(&self.data), + hal_buffer_callback, + ) + } } else { hal_buffer_callback(None) } @@ -256,12 +252,12 @@ impl Buffer { /// Flushes any pending write operations and unmaps the buffer from host memory. pub fn unmap(&self) { self.map_context.lock().reset(); - DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref()); + DynContext::buffer_unmap(&*self.context, self.data.as_ref()); } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref()); + DynContext::buffer_destroy(&*self.context, self.data.as_ref()); } /// Returns the length of the buffer allocation in bytes. @@ -343,12 +339,7 @@ impl<'a> BufferSlice<'a> { callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static, ) { let mut mc = self.buffer.map_context.lock(); - assert_eq!( - mc.initial_range, - 0..0, - "Buffer {:?} is already mapped", - self.buffer.id - ); + assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped"); let end = match self.size { Some(s) => self.offset + s.get(), None => mc.total_size, @@ -357,7 +348,6 @@ impl<'a> BufferSlice<'a> { DynContext::buffer_map_async( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), mode, self.offset..end, @@ -383,7 +373,6 @@ impl<'a> BufferSlice<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); let data = DynContext::buffer_get_mapped_range( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), self.offset..end, ); @@ -429,7 +418,6 @@ impl<'a> BufferSlice<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); let data = DynContext::buffer_get_mapped_range( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), self.offset..end, ); @@ -680,7 +668,7 @@ impl Drop for BufferViewMut<'_> { impl Drop for Buffer { fn drop(&mut self) { if !thread::panicking() { - self.context.buffer_drop(&self.id, self.data.as_ref()); + self.context.buffer_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/command_buffer.rs b/wgpu/src/api/command_buffer.rs index 4d56fe9b2..6c519ed65 100644 --- a/wgpu/src/api/command_buffer.rs +++ b/wgpu/src/api/command_buffer.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a command buffer on the GPU. @@ -13,7 +12,6 @@ use crate::*; #[derive(Debug)] pub struct CommandBuffer { pub(crate) context: Arc, - pub(crate) id: Option, pub(crate) data: Option>, } #[cfg(send_sync)] @@ -22,9 +20,8 @@ static_assertions::assert_impl_all!(CommandBuffer: Send, Sync); impl Drop for CommandBuffer { fn drop(&mut self) { if !thread::panicking() { - if let Some(id) = self.id.take() { - self.context - .command_buffer_drop(&id, self.data.take().unwrap().as_ref()); + if let Some(data) = self.data.take() { + self.context.command_buffer_drop(data.as_ref()); } } } diff --git a/wgpu/src/api/command_encoder.rs b/wgpu/src/api/command_encoder.rs index d8e8594a8..949efc809 100644 --- a/wgpu/src/api/command_encoder.rs +++ b/wgpu/src/api/command_encoder.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Encodes a series of GPU operations. @@ -15,7 +15,6 @@ use crate::*; #[derive(Debug)] pub struct CommandEncoder { pub(crate) context: Arc, - pub(crate) id: Option, pub(crate) data: Box, } #[cfg(send_sync)] @@ -24,9 +23,7 @@ static_assertions::assert_impl_all!(CommandEncoder: Send, Sync); impl Drop for CommandEncoder { fn drop(&mut self) { if !thread::panicking() { - if let Some(id) = self.id.take() { - self.context.command_encoder_drop(&id, self.data.as_ref()); - } + self.context.command_encoder_drop(self.data.as_ref()); } } } @@ -71,14 +68,9 @@ static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync); impl CommandEncoder { /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution. pub fn finish(mut self) -> CommandBuffer { - let (id, data) = DynContext::command_encoder_finish( - &*self.context, - self.id.take().unwrap(), - self.data.as_mut(), - ); + let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut()); CommandBuffer { context: Arc::clone(&self.context), - id: Some(id), data: Some(data), } } @@ -97,16 +89,10 @@ impl CommandEncoder { &'encoder mut self, desc: &RenderPassDescriptor<'_>, ) -> RenderPass<'encoder> { - let id = self.id.as_ref().unwrap(); - let (id, data) = DynContext::command_encoder_begin_render_pass( - &*self.context, - id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc); RenderPass { inner: RenderPassInner { - id, data, context: self.context.clone(), }, @@ -128,16 +114,13 @@ impl CommandEncoder { &'encoder mut self, desc: &ComputePassDescriptor<'_>, ) -> ComputePass<'encoder> { - let id = self.id.as_ref().unwrap(); - let (id, data) = DynContext::command_encoder_begin_compute_pass( + let data = DynContext::command_encoder_begin_compute_pass( &*self.context, - id, self.data.as_ref(), desc, ); ComputePass { inner: ComputePassInner { - id, data, context: self.context.clone(), }, @@ -162,12 +145,9 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_buffer_to_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - &source.id, source.data.as_ref(), source_offset, - &destination.id, destination.data.as_ref(), destination_offset, copy_size, @@ -183,7 +163,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_buffer_to_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -200,7 +179,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_texture_to_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -223,7 +201,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_texture_to_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -247,9 +224,8 @@ impl CommandEncoder { pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) { DynContext::command_encoder_clear_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - texture, + texture.data.as_ref(), subresource_range, ); } @@ -268,9 +244,8 @@ impl CommandEncoder { ) { DynContext::command_encoder_clear_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - buffer, + buffer.data.as_ref(), offset, size, ); @@ -278,25 +253,17 @@ impl CommandEncoder { /// Inserts debug marker. pub fn insert_debug_marker(&mut self, label: &str) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_insert_debug_marker( - &*self.context, - id, - self.data.as_ref(), - label, - ); + DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label); } /// Start record commands and group it into debug marker group. pub fn push_debug_group(&mut self, label: &str) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label); + DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label); } /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref()); + DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref()); } /// Resolves a query set, writing the results into the supplied destination buffer. @@ -312,13 +279,10 @@ impl CommandEncoder { ) { DynContext::command_encoder_resolve_query_set( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - &query_set.id, query_set.data.as_ref(), query_range.start, query_range.end - query_range.start, - &destination.id, destination.data.as_ref(), destination_offset, ) @@ -341,14 +305,12 @@ impl CommandEncoder { &mut self, hal_command_encoder_callback: F, ) -> Option { - use wgc::id::CommandEncoderId; - self.context .as_any() .downcast_ref::() .map(|ctx| unsafe { ctx.command_encoder_as_hal_mut::( - CommandEncoderId::from(self.id.unwrap()), + crate::context::downcast_ref(&self.data), hal_command_encoder_callback, ) }) @@ -372,9 +334,7 @@ impl CommandEncoder { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::command_encoder_write_timestamp( &*self.context, - self.id.as_ref().unwrap(), self.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) diff --git a/wgpu/src/api/compute_pass.rs b/wgpu/src/api/compute_pass.rs index 30123b805..bd0575bbd 100644 --- a/wgpu/src/api/compute_pass.rs +++ b/wgpu/src/api/compute_pass.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// In-progress recording of a compute pass. @@ -53,10 +53,8 @@ impl<'encoder> ComputePass<'encoder> { ) { DynContext::compute_pass_set_bind_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), index, - &bind_group.id, bind_group.data.as_ref(), offsets, ); @@ -66,9 +64,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) { DynContext::compute_pass_set_pipeline( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ); } @@ -77,7 +73,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn insert_debug_marker(&mut self, label: &str) { DynContext::compute_pass_insert_debug_marker( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -87,7 +82,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn push_debug_group(&mut self, label: &str) { DynContext::compute_pass_push_debug_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -95,11 +89,7 @@ impl<'encoder> ComputePass<'encoder> { /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::compute_pass_pop_debug_group( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); } /// Dispatches compute work operations. @@ -108,7 +98,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { DynContext::compute_pass_dispatch_workgroups( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -126,9 +115,7 @@ impl<'encoder> ComputePass<'encoder> { ) { DynContext::compute_pass_dispatch_workgroups_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -148,7 +135,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) { DynContext::compute_pass_set_push_constants( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), offset, data, @@ -167,9 +153,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::compute_pass_write_timestamp( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) @@ -183,9 +167,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::compute_pass_begin_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ); @@ -196,7 +178,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn end_pipeline_statistics_query(&mut self) { DynContext::compute_pass_end_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), ); } @@ -204,7 +185,6 @@ impl<'encoder> ComputePass<'encoder> { #[derive(Debug)] pub(crate) struct ComputePassInner { - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) context: Arc, } @@ -212,8 +192,7 @@ pub(crate) struct ComputePassInner { impl Drop for ComputePassInner { fn drop(&mut self) { if !thread::panicking() { - self.context - .compute_pass_end(&mut self.id, self.data.as_mut()); + self.context.compute_pass_end(self.data.as_mut()); } } } diff --git a/wgpu/src/api/compute_pipeline.rs b/wgpu/src/api/compute_pipeline.rs index ea2de4b8b..50f17122e 100644 --- a/wgpu/src/api/compute_pipeline.rs +++ b/wgpu/src/api/compute_pipeline.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a compute pipeline. @@ -12,38 +11,28 @@ use crate::*; #[derive(Debug)] pub struct ComputePipeline { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ComputePipeline: Send, Sync); -impl ComputePipeline { - /// Returns a globally-unique identifier for this `ComputePipeline`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(ComputePipeline); +impl ComputePipeline { /// Get an object representing the bind group layout at a given index. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { let context = Arc::clone(&self.context); - let (id, data) = self.context.compute_pipeline_get_bind_group_layout( - &self.id, - self.data.as_ref(), - index, - ); - BindGroupLayout { context, id, data } + let data = self + .context + .compute_pipeline_get_bind_group_layout(self.data.as_ref(), index); + BindGroupLayout { context, data } } } impl Drop for ComputePipeline { fn drop(&mut self) { if !thread::panicking() { - self.context - .compute_pipeline_drop(&self.id, self.data.as_ref()); + self.context.compute_pipeline_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/device.rs b/wgpu/src/api/device.rs index 6d40f4f86..8d1705ab4 100644 --- a/wgpu/src/api/device.rs +++ b/wgpu/src/api/device.rs @@ -2,7 +2,7 @@ use std::{error, fmt, future::Future, sync::Arc, thread}; use parking_lot::Mutex; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Open connection to a graphics and/or compute device. @@ -16,7 +16,6 @@ use crate::*; #[derive(Debug)] pub struct Device { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -32,14 +31,6 @@ pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor>; static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync); impl Device { - /// Returns a globally-unique identifier for this `Device`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed. /// /// Return `true` if the queue is empty, or `false` if there are more queue @@ -50,7 +41,7 @@ impl Device { /// /// When running on WebGPU, this is a no-op. `Device`s are automatically polled. pub fn poll(&self, maintain: Maintain) -> MaintainResult { - DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain) + DynContext::device_poll(&*self.context, self.data.as_ref(), maintain) } /// The features which can be used on this device. @@ -58,7 +49,7 @@ impl Device { /// No additional features can be used, even if the underlying adapter can support them. #[must_use] pub fn features(&self) -> Features { - DynContext::device_features(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_features(&*self.context, self.data.as_ref()) } /// The limits which can be used on this device. @@ -66,7 +57,7 @@ impl Device { /// No better limits can be used, even if the underlying adapter can support them. #[must_use] pub fn limits(&self) -> Limits { - DynContext::device_limits(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_limits(&*self.context, self.data.as_ref()) } /// Creates a shader module from either SPIR-V or WGSL source code. @@ -85,16 +76,14 @@ impl Device { /// #[must_use] pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule { - let (id, data) = DynContext::device_create_shader_module( + let data = DynContext::device_create_shader_module( &*self.context, - &self.id, self.data.as_ref(), desc, wgt::ShaderBoundChecks::new(), ); ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -114,16 +103,14 @@ impl Device { &self, desc: ShaderModuleDescriptor<'_>, ) -> ShaderModule { - let (id, data) = DynContext::device_create_shader_module( + let data = DynContext::device_create_shader_module( &*self.context, - &self.id, self.data.as_ref(), desc, unsafe { wgt::ShaderBoundChecks::unchecked() }, ); ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -141,17 +128,11 @@ impl Device { &self, desc: &ShaderModuleDescriptorSpirV<'_>, ) -> ShaderModule { - let (id, data) = unsafe { - DynContext::device_create_shader_module_spirv( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ) + let data = unsafe { + DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc) }; ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -159,15 +140,10 @@ impl Device { /// Creates an empty [`CommandEncoder`]. #[must_use] pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder { - let (id, data) = DynContext::device_create_command_encoder( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc); CommandEncoder { context: Arc::clone(&self.context), - id: Some(id), data, } } @@ -178,15 +154,13 @@ impl Device { &self, desc: &RenderBundleEncoderDescriptor<'_>, ) -> RenderBundleEncoder<'_> { - let (id, data) = DynContext::device_create_render_bundle_encoder( + let data = DynContext::device_create_render_bundle_encoder( &*self.context, - &self.id, self.data.as_ref(), desc, ); RenderBundleEncoder { context: Arc::clone(&self.context), - id, data, parent: self, _p: Default::default(), @@ -196,15 +170,9 @@ impl Device { /// Creates a new [`BindGroup`]. #[must_use] pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup { - let (id, data) = DynContext::device_create_bind_group( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc); BindGroup { context: Arc::clone(&self.context), - id, data, } } @@ -215,15 +183,10 @@ impl Device { &self, desc: &BindGroupLayoutDescriptor<'_>, ) -> BindGroupLayout { - let (id, data) = DynContext::device_create_bind_group_layout( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc); BindGroupLayout { context: Arc::clone(&self.context), - id, data, } } @@ -231,15 +194,10 @@ impl Device { /// Creates a [`PipelineLayout`]. #[must_use] pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout { - let (id, data) = DynContext::device_create_pipeline_layout( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc); PipelineLayout { context: Arc::clone(&self.context), - id, data, } } @@ -247,15 +205,10 @@ impl Device { /// Creates a [`RenderPipeline`]. #[must_use] pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline { - let (id, data) = DynContext::device_create_render_pipeline( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc); RenderPipeline { context: Arc::clone(&self.context), - id, data, } } @@ -263,15 +216,10 @@ impl Device { /// Creates a [`ComputePipeline`]. #[must_use] pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline { - let (id, data) = DynContext::device_create_compute_pipeline( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc); ComputePipeline { context: Arc::clone(&self.context), - id, data, } } @@ -284,12 +232,10 @@ impl Device { map_context.initial_range = 0..desc.size; } - let (id, data) = - DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc); Buffer { context: Arc::clone(&self.context), - id, data, map_context: Mutex::new(map_context), size: desc.size, @@ -302,11 +248,9 @@ impl Device { /// `desc` specifies the general format of the texture. #[must_use] pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture { - let (id, data) = - DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc); Texture { context: Arc::clone(&self.context), - id, data, owned: true, descriptor: TextureDescriptor { @@ -340,13 +284,12 @@ impl Device { .unwrap() .create_texture_from_hal::( hal_texture, - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(&self.data), desc, ) }; Texture { context: Arc::clone(&self.context), - id: ObjectId::from(texture.id()), data: Box::new(texture), owned: true, descriptor: TextureDescriptor { @@ -376,7 +319,7 @@ impl Device { map_context.initial_range = 0..desc.size; } - let (id, buffer) = unsafe { + let buffer = unsafe { self.context .as_any() .downcast_ref::() @@ -385,14 +328,13 @@ impl Device { .unwrap() .create_buffer_from_hal::( hal_buffer, - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(&self.data), desc, ) }; Buffer { context: Arc::clone(&self.context), - id: ObjectId::from(id), data: Box::new(buffer), map_context: Mutex::new(map_context), size: desc.size, @@ -405,11 +347,9 @@ impl Device { /// `desc` specifies the behavior of the sampler. #[must_use] pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler { - let (id, data) = - DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc); Sampler { context: Arc::clone(&self.context), - id, data, } } @@ -417,11 +357,9 @@ impl Device { /// Creates a new [`QuerySet`]. #[must_use] pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet { - let (id, data) = - DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc); QuerySet { context: Arc::clone(&self.context), - id, data, } } @@ -429,29 +367,28 @@ impl Device { /// Set a callback for errors that are not handled in error scopes. pub fn on_uncaptured_error(&self, handler: Box) { self.context - .device_on_uncaptured_error(&self.id, self.data.as_ref(), handler); + .device_on_uncaptured_error(self.data.as_ref(), handler); } /// Push an error scope. pub fn push_error_scope(&self, filter: ErrorFilter) { self.context - .device_push_error_scope(&self.id, self.data.as_ref(), filter); + .device_push_error_scope(self.data.as_ref(), filter); } /// Pop an error scope. pub fn pop_error_scope(&self) -> impl Future> + WasmNotSend { - self.context - .device_pop_error_scope(&self.id, self.data.as_ref()) + self.context.device_pop_error_scope(self.data.as_ref()) } /// Starts frame capture. pub fn start_capture(&self) { - DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_start_capture(&*self.context, self.data.as_ref()) } /// Stops frame capture. pub fn stop_capture(&self) { - DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_stop_capture(&*self.context, self.data.as_ref()) } /// Query internal counters from the native backend for debugging purposes. @@ -462,7 +399,7 @@ impl Device { /// If a counter is not set, its contains its default value (zero). #[must_use] pub fn get_internal_counters(&self) -> wgt::InternalCounters { - DynContext::device_get_internal_counters(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_get_internal_counters(&*self.context, self.data.as_ref()) } /// Generate an GPU memory allocation report if the underlying backend supports it. @@ -472,7 +409,7 @@ impl Device { /// for example as a workaround for driver issues. #[must_use] pub fn generate_allocator_report(&self) -> Option { - DynContext::generate_allocator_report(&*self.context, &self.id, self.data.as_ref()) + DynContext::generate_allocator_report(&*self.context, self.data.as_ref()) } /// Apply a callback to this `Device`'s underlying backend device. @@ -504,7 +441,7 @@ impl Device { .downcast_ref::() .map(|ctx| unsafe { ctx.device_as_hal::( - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(&self.data), hal_device_callback, ) }) @@ -512,7 +449,7 @@ impl Device { /// Destroy this device. pub fn destroy(&self) { - DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_destroy(&*self.context, self.data.as_ref()) } /// Set a DeviceLostCallback on this device. @@ -522,7 +459,6 @@ impl Device { ) { DynContext::device_set_device_lost_callback( &*self.context, - &self.id, self.data.as_ref(), Box::new(callback), ) @@ -531,7 +467,7 @@ impl Device { /// Test-only function to make this device invalid. #[doc(hidden)] pub fn make_invalid(&self) { - DynContext::device_make_invalid(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_make_invalid(&*self.context, self.data.as_ref()) } /// Create a [`PipelineCache`] with initial data @@ -576,17 +512,11 @@ impl Device { &self, desc: &PipelineCacheDescriptor<'_>, ) -> PipelineCache { - let (id, data) = unsafe { - DynContext::device_create_pipeline_cache( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ) + let data = unsafe { + DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc) }; PipelineCache { context: Arc::clone(&self.context), - id, data, } } @@ -595,7 +525,7 @@ impl Device { impl Drop for Device { fn drop(&mut self) { if !thread::panicking() { - self.context.device_drop(&self.id, self.data.as_ref()); + self.context.device_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/id.rs b/wgpu/src/api/id.rs deleted file mode 100644 index d9041883b..000000000 --- a/wgpu/src/api/id.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64}; - -use crate::context::ObjectId; - -/// Opaque globally-unique identifier -#[repr(transparent)] -pub struct Id(NonZeroU64, PhantomData<*mut T>); - -impl Id { - /// Create a new `Id` from a ObjectID. - pub(crate) fn new(id: ObjectId) -> Self { - Id(id.global_id(), PhantomData) - } - - /// For testing use only. We provide no guarantees about the actual value of the ids. - #[doc(hidden)] - pub fn inner(&self) -> u64 { - self.0.get() - } -} - -// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids -// returned for different types , so `Id` can safely implement Send and Sync. -unsafe impl Send for Id {} - -// SAFETY: See the implementation for `Send`. -unsafe impl Sync for Id {} - -impl Clone for Id { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for Id {} - -impl fmt::Debug for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Id").field(&self.0).finish() - } -} - -impl PartialEq for Id { - fn eq(&self, other: &Id) -> bool { - self.0 == other.0 - } -} - -impl Eq for Id {} - -impl PartialOrd for Id { - fn partial_cmp(&self, other: &Id) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Id { - fn cmp(&self, other: &Id) -> Ordering { - self.0.cmp(&other.0) - } -} - -impl std::hash::Hash for Id { - fn hash(&self, state: &mut H) { - self.0.hash(state) - } -} diff --git a/wgpu/src/api/instance.rs b/wgpu/src/api/instance.rs index 26d8b863b..33a7b0f64 100644 --- a/wgpu/src/api/instance.rs +++ b/wgpu/src/api/instance.rs @@ -202,8 +202,6 @@ impl Instance { /// - `backends` - Backends from which to enumerate adapters. #[cfg(native)] pub fn enumerate_adapters(&self, backends: Backends) -> Vec { - use crate::context::ObjectId; - let context = Arc::clone(&self.context); self.context .as_any() @@ -211,10 +209,9 @@ impl Instance { .map(|ctx| { ctx.enumerate_adapters(backends) .into_iter() - .map(move |id| crate::Adapter { + .map(move |adapter| crate::Adapter { context: Arc::clone(&context), - id: ObjectId::from(id), - data: Box::new(()), + data: Box::new(adapter), }) .collect() }) @@ -234,11 +231,7 @@ impl Instance { ) -> impl Future> + WasmNotSend { let context = Arc::clone(&self.context); let adapter = self.context.instance_request_adapter(options); - async move { - adapter - .await - .map(|(id, data)| Adapter { context, id, data }) - } + async move { adapter.await.map(|data| Adapter { context, data }) } } /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`]. @@ -252,18 +245,16 @@ impl Instance { hal_adapter: hal::ExposedAdapter, ) -> Adapter { let context = Arc::clone(&self.context); - let id = unsafe { + let adapter = unsafe { context .as_any() .downcast_ref::() .unwrap() .create_adapter_from_hal(hal_adapter) - .into() }; Adapter { context, - id, - data: Box::new(()), + data: Box::new(adapter), } } @@ -355,12 +346,11 @@ impl Instance { &self, target: SurfaceTargetUnsafe, ) -> Result, CreateSurfaceError> { - let (id, data) = unsafe { self.context.instance_create_surface(target) }?; + let data = unsafe { self.context.instance_create_surface(target) }?; Ok(Surface { context: Arc::clone(&self.context), _handle_source: None, - id, surface_data: data, config: Mutex::new(None), }) diff --git a/wgpu/src/api/mod.rs b/wgpu/src/api/mod.rs index 819f6847c..52b9ec160 100644 --- a/wgpu/src/api/mod.rs +++ b/wgpu/src/api/mod.rs @@ -32,7 +32,6 @@ mod common_pipeline; mod compute_pass; mod compute_pipeline; mod device; -mod id; mod instance; mod pipeline_cache; mod pipeline_layout; @@ -59,7 +58,6 @@ pub use common_pipeline::*; pub use compute_pass::*; pub use compute_pipeline::*; pub use device::*; -pub use id::*; pub use instance::*; pub use pipeline_cache::*; pub use pipeline_layout::*; @@ -78,3 +76,35 @@ pub use texture_view::*; /// Object debugging label. pub type Label<'a> = Option<&'a str>; + +macro_rules! impl_partialeq_eq_hash { + ($ty:ty) => { + impl PartialEq for $ty { + fn eq(&self, other: &Self) -> bool { + std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref()) + } + } + impl Eq for $ty {} + + impl std::hash::Hash for $ty { + fn hash(&self, state: &mut H) { + let ptr = self.data.as_ref() as *const Data as *const (); + ptr.hash(state); + } + } + + impl PartialOrd for $ty { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl Ord for $ty { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let a = self.data.as_ref() as *const Data as *const (); + let b = other.data.as_ref() as *const Data as *const (); + a.cmp(&b) + } + } + }; +} +pub(crate) use impl_partialeq_eq_hash; diff --git a/wgpu/src/api/pipeline_cache.rs b/wgpu/src/api/pipeline_cache.rs index 42ab15b8b..800e786ca 100644 --- a/wgpu/src/api/pipeline_cache.rs +++ b/wgpu/src/api/pipeline_cache.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a pipeline cache, which is used to accelerate @@ -68,7 +67,6 @@ use crate::*; #[derive(Debug)] pub struct PipelineCache { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } @@ -83,16 +81,14 @@ impl PipelineCache { /// /// This function is unique to the Rust API of `wgpu`. pub fn get_data(&self) -> Option> { - self.context - .pipeline_cache_get_data(&self.id, self.data.as_ref()) + self.context.pipeline_cache_get_data(self.data.as_ref()) } } impl Drop for PipelineCache { fn drop(&mut self) { if !thread::panicking() { - self.context - .pipeline_cache_drop(&self.id, self.data.as_ref()); + self.context.pipeline_cache_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/pipeline_layout.rs b/wgpu/src/api/pipeline_layout.rs index f47ea1a17..2b89d2b7a 100644 --- a/wgpu/src/api/pipeline_layout.rs +++ b/wgpu/src/api/pipeline_layout.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a pipeline layout. @@ -12,27 +11,17 @@ use crate::*; #[derive(Debug)] pub struct PipelineLayout { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(PipelineLayout: Send, Sync); -impl PipelineLayout { - /// Returns a globally-unique identifier for this `PipelineLayout`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(PipelineLayout); impl Drop for PipelineLayout { fn drop(&mut self) { if !thread::panicking() { - self.context - .pipeline_layout_drop(&self.id, self.data.as_ref()); + self.context.pipeline_layout_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/query_set.rs b/wgpu/src/api/query_set.rs index 41c262bd9..a0cac6847 100644 --- a/wgpu/src/api/query_set.rs +++ b/wgpu/src/api/query_set.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a query set. @@ -11,27 +10,18 @@ use crate::*; #[derive(Debug)] pub struct QuerySet { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] #[cfg(send_sync)] static_assertions::assert_impl_all!(QuerySet: Send, Sync); -impl QuerySet { - /// Returns a globally-unique identifier for this `QuerySet`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(QuerySet); impl Drop for QuerySet { fn drop(&mut self) { if !thread::panicking() { - self.context.query_set_drop(&self.id, self.data.as_ref()); + self.context.query_set_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/queue.rs b/wgpu/src/api/queue.rs index c675f9f92..65e6dd1ce 100644 --- a/wgpu/src/api/queue.rs +++ b/wgpu/src/api/queue.rs @@ -4,7 +4,7 @@ use std::{ thread, }; -use crate::context::{DynContext, ObjectId, QueueWriteBuffer}; +use crate::context::{DynContext, QueueWriteBuffer}; use crate::*; /// Handle to a command queue on a device. @@ -17,7 +17,6 @@ use crate::*; #[derive(Debug)] pub struct Queue { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -26,7 +25,7 @@ static_assertions::assert_impl_all!(Queue: Send, Sync); impl Drop for Queue { fn drop(&mut self) { if !thread::panicking() { - self.context.queue_drop(&self.id, self.data.as_ref()); + self.context.queue_drop(self.data.as_ref()); } } } @@ -87,9 +86,7 @@ impl<'a> Drop for QueueWriteBufferView<'a> { fn drop(&mut self) { DynContext::queue_write_staging_buffer( &*self.queue.context, - &self.queue.id, self.queue.data.as_ref(), - &self.buffer.id, self.buffer.data.as_ref(), self.offset, &*self.inner, @@ -121,9 +118,7 @@ impl Queue { pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) { DynContext::queue_write_buffer( &*self.context, - &self.id, self.data.as_ref(), - &buffer.id, buffer.data.as_ref(), offset, data, @@ -168,19 +163,13 @@ impl Queue { profiling::scope!("Queue::write_buffer_with"); DynContext::queue_validate_write_buffer( &*self.context, - &self.id, self.data.as_ref(), - &buffer.id, buffer.data.as_ref(), offset, size, )?; - let staging_buffer = DynContext::queue_create_staging_buffer( - &*self.context, - &self.id, - self.data.as_ref(), - size, - )?; + let staging_buffer = + DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?; Some(QueueWriteBufferView { queue: self, buffer, @@ -222,7 +211,6 @@ impl Queue { ) { DynContext::queue_write_texture( &*self.context, - &self.id, self.data.as_ref(), texture, data, @@ -241,7 +229,6 @@ impl Queue { ) { DynContext::queue_copy_external_image_to_texture( &*self.context, - &self.id, self.data.as_ref(), source, dest, @@ -256,14 +243,10 @@ impl Queue { ) -> SubmissionIndex { let mut command_buffers = command_buffers .into_iter() - .map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())); + .map(|mut comb| comb.data.take().unwrap()); - let data = DynContext::queue_submit( - &*self.context, - &self.id, - self.data.as_ref(), - &mut command_buffers, - ); + let data = + DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers); SubmissionIndex(data) } @@ -275,7 +258,7 @@ impl Queue { /// Timestamp values are represented in nanosecond values on WebGPU, see `` /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required. pub fn get_timestamp_period(&self) -> f32 { - DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref()) + DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref()) } /// Registers a callback when the previous call to submit finishes running on the gpu. This callback @@ -292,7 +275,6 @@ impl Queue { pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) { DynContext::queue_on_submitted_work_done( &*self.context, - &self.id, self.data.as_ref(), Box::new(callback), ) diff --git a/wgpu/src/api/render_bundle.rs b/wgpu/src/api/render_bundle.rs index e80da93e2..5932458ae 100644 --- a/wgpu/src/api/render_bundle.rs +++ b/wgpu/src/api/render_bundle.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Pre-prepared reusable bundle of GPU operations. @@ -15,27 +14,17 @@ use crate::*; #[derive(Debug)] pub struct RenderBundle { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderBundle: Send, Sync); -impl RenderBundle { - /// Returns a globally-unique identifier for this `RenderBundle`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(RenderBundle); impl Drop for RenderBundle { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_bundle_drop(&self.id, self.data.as_ref()); + self.context.render_bundle_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/render_bundle_encoder.rs b/wgpu/src/api/render_bundle_encoder.rs index ae5829bee..446b4cbf8 100644 --- a/wgpu/src/api/render_bundle_encoder.rs +++ b/wgpu/src/api/render_bundle_encoder.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Encodes a series of GPU operations into a reusable "render bundle". @@ -17,7 +17,6 @@ use crate::*; #[derive(Debug)] pub struct RenderBundleEncoder<'a> { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) parent: &'a Device, /// This type should be !Send !Sync, because it represents an allocation on this thread's @@ -53,11 +52,9 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn impl<'a> RenderBundleEncoder<'a> { /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes. pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle { - let (id, data) = - DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc); + let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc); RenderBundle { context: Arc::clone(&self.context), - id, data, } } @@ -74,10 +71,8 @@ impl<'a> RenderBundleEncoder<'a> { ) { DynContext::render_bundle_encoder_set_bind_group( &*self.parent.context, - &mut self.id, self.data.as_mut(), index, - &bind_group.id, bind_group.data.as_ref(), offsets, ) @@ -89,9 +84,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) { DynContext::render_bundle_encoder_set_pipeline( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ) } @@ -103,9 +96,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) { DynContext::render_bundle_encoder_set_index_buffer( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), index_format, buffer_slice.offset, @@ -126,10 +117,8 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) { DynContext::render_bundle_encoder_set_vertex_buffer( &*self.parent.context, - &mut self.id, self.data.as_mut(), slot, - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), buffer_slice.offset, buffer_slice.size, @@ -157,7 +146,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw(&mut self, vertices: Range, instances: Range) { DynContext::render_bundle_encoder_draw( &*self.parent.context, - &mut self.id, self.data.as_mut(), vertices, instances, @@ -188,7 +176,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { DynContext::render_bundle_encoder_draw_indexed( &*self.parent.context, - &mut self.id, self.data.as_mut(), indices, base_vertex, @@ -204,9 +191,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) { DynContext::render_bundle_encoder_draw_indirect( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -226,9 +211,7 @@ impl<'a> RenderBundleEncoder<'a> { ) { DynContext::render_bundle_encoder_draw_indexed_indirect( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -268,7 +251,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { DynContext::render_bundle_encoder_set_push_constants( &*self.parent.context, - &mut self.id, self.data.as_mut(), stages, offset, diff --git a/wgpu/src/api/render_pass.rs b/wgpu/src/api/render_pass.rs index bdb8ebe37..8fddfd009 100644 --- a/wgpu/src/api/render_pass.rs +++ b/wgpu/src/api/render_pass.rs @@ -1,11 +1,10 @@ use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; #[derive(Debug)] pub(crate) struct RenderPassInner { - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) context: Arc, } @@ -13,8 +12,7 @@ pub(crate) struct RenderPassInner { impl Drop for RenderPassInner { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_pass_end(&mut self.id, self.data.as_mut()); + self.context.render_pass_end(self.data.as_mut()); } } } @@ -84,10 +82,8 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_set_bind_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), index, - &bind_group.id, bind_group.data.as_ref(), offsets, ) @@ -99,9 +95,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) { DynContext::render_pass_set_pipeline( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ) } @@ -114,7 +108,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_blend_constant(&mut self, color: Color) { DynContext::render_pass_set_blend_constant( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), color, ) @@ -127,9 +120,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) { DynContext::render_pass_set_index_buffer( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), index_format, buffer_slice.offset, @@ -150,10 +141,8 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) { DynContext::render_pass_set_vertex_buffer( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), slot, - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), buffer_slice.offset, buffer_slice.size, @@ -172,7 +161,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { DynContext::render_pass_set_scissor_rect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -190,7 +178,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) { DynContext::render_pass_set_viewport( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -208,7 +195,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_stencil_reference(&mut self, reference: u32) { DynContext::render_pass_set_stencil_reference( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), reference, ); @@ -218,7 +204,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn insert_debug_marker(&mut self, label: &str) { DynContext::render_pass_insert_debug_marker( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -228,7 +213,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn push_debug_group(&mut self, label: &str) { DynContext::render_pass_push_debug_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -236,11 +220,7 @@ impl<'encoder> RenderPass<'encoder> { /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::render_pass_pop_debug_group( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); } /// Draws primitives from the active vertex buffer(s). @@ -267,7 +247,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw(&mut self, vertices: Range, instances: Range) { DynContext::render_pass_draw( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), vertices, instances, @@ -301,7 +280,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { DynContext::render_pass_draw_indexed( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), indices, base_vertex, @@ -325,9 +303,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) { DynContext::render_pass_draw_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -354,9 +330,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_draw_indexed_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -371,13 +345,10 @@ impl<'encoder> RenderPass<'encoder> { &mut self, render_bundles: I, ) { - let mut render_bundles = render_bundles - .into_iter() - .map(|rb| (&rb.id, rb.data.as_ref())); + let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref()); DynContext::render_pass_execute_bundles( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), &mut render_bundles, ) @@ -404,9 +375,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, count, @@ -432,9 +401,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indexed_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, count, @@ -476,12 +443,9 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indirect_count( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, - &count_buffer.id, count_buffer.data.as_ref(), count_offset, max_count, @@ -523,12 +487,9 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indexed_indirect_count( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, - &count_buffer.id, count_buffer.data.as_ref(), count_offset, max_count, @@ -581,7 +542,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { DynContext::render_pass_set_push_constants( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), stages, offset, @@ -602,9 +562,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::render_pass_write_timestamp( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) @@ -617,7 +575,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn begin_occlusion_query(&mut self, query_index: u32) { DynContext::render_pass_begin_occlusion_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), query_index, ); @@ -626,11 +583,7 @@ impl<'encoder> RenderPass<'encoder> { /// End the occlusion query on this render pass. It can be started with /// `begin_occlusion_query`. Occlusion queries may not be nested. pub fn end_occlusion_query(&mut self) { - DynContext::render_pass_end_occlusion_query( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut()); } } @@ -641,9 +594,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::render_pass_begin_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ); @@ -654,7 +605,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn end_pipeline_statistics_query(&mut self) { DynContext::render_pass_end_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), ); } diff --git a/wgpu/src/api/render_pipeline.rs b/wgpu/src/api/render_pipeline.rs index 7e7412716..1893f7c7b 100644 --- a/wgpu/src/api/render_pipeline.rs +++ b/wgpu/src/api/render_pipeline.rs @@ -1,6 +1,5 @@ use std::{num::NonZeroU32, sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a rendering (graphics) pipeline. @@ -12,37 +11,29 @@ use crate::*; #[derive(Debug)] pub struct RenderPipeline { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderPipeline: Send, Sync); +super::impl_partialeq_eq_hash!(RenderPipeline); + impl Drop for RenderPipeline { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_pipeline_drop(&self.id, self.data.as_ref()); + self.context.render_pipeline_drop(self.data.as_ref()); } } } impl RenderPipeline { - /// Returns a globally-unique identifier for this `RenderPipeline`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Get an object representing the bind group layout at a given index. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { let context = Arc::clone(&self.context); - let (id, data) = - self.context - .render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index); - BindGroupLayout { context, id, data } + let data = self + .context + .render_pipeline_get_bind_group_layout(self.data.as_ref(), index); + BindGroupLayout { context, data } } } diff --git a/wgpu/src/api/sampler.rs b/wgpu/src/api/sampler.rs index 63267ded5..d60bcccd2 100644 --- a/wgpu/src/api/sampler.rs +++ b/wgpu/src/api/sampler.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a sampler. @@ -15,26 +14,17 @@ use crate::*; #[derive(Debug)] pub struct Sampler { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Sampler: Send, Sync); -impl Sampler { - /// Returns a globally-unique identifier for this `Sampler`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(Sampler); impl Drop for Sampler { fn drop(&mut self) { if !thread::panicking() { - self.context.sampler_drop(&self.id, self.data.as_ref()); + self.context.sampler_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/shader_module.rs b/wgpu/src/api/shader_module.rs index d81562e93..20334a75a 100644 --- a/wgpu/src/api/shader_module.rs +++ b/wgpu/src/api/shader_module.rs @@ -1,6 +1,5 @@ use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a compiled shader module. @@ -14,34 +13,25 @@ use crate::*; #[derive(Debug)] pub struct ShaderModule { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ShaderModule: Send, Sync); +super::impl_partialeq_eq_hash!(ShaderModule); + impl Drop for ShaderModule { fn drop(&mut self) { if !thread::panicking() { - self.context - .shader_module_drop(&self.id, self.data.as_ref()); + self.context.shader_module_drop(self.data.as_ref()); } } } impl ShaderModule { - /// Returns a globally-unique identifier for this `ShaderModule`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Get the compilation info for the shader module. pub fn get_compilation_info(&self) -> impl Future + WasmNotSend { - self.context - .shader_get_compilation_info(&self.id, self.data.as_ref()) + self.context.shader_get_compilation_info(self.data.as_ref()) } } diff --git a/wgpu/src/api/surface.rs b/wgpu/src/api/surface.rs index de140a9dc..2b5252a21 100644 --- a/wgpu/src/api/surface.rs +++ b/wgpu/src/api/surface.rs @@ -3,7 +3,7 @@ use std::{error, fmt, sync::Arc, thread}; use parking_lot::Mutex; use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Describes a [`Surface`]. @@ -32,9 +32,6 @@ pub struct Surface<'window> { /// would become invalid when the window is dropped. pub(crate) _handle_source: Option>, - /// Wgpu-core surface id. - pub(crate) id: ObjectId, - /// Additional surface data returned by [`DynContext::instance_create_surface`]. pub(crate) surface_data: Box, @@ -48,23 +45,13 @@ pub struct Surface<'window> { } impl Surface<'_> { - /// Returns a globally-unique identifier for this `Surface`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id> { - Id::new(self.id) - } - /// Returns the capabilities of the surface when used with the given adapter. /// /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter. pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities { DynContext::surface_get_capabilities( &*self.context, - &self.id, self.surface_data.as_ref(), - &adapter.id, adapter.data.as_ref(), ) } @@ -101,9 +88,7 @@ impl Surface<'_> { pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) { DynContext::surface_configure( &*self.context, - &self.id, self.surface_data.as_ref(), - &device.id, device.data.as_ref(), config, ); @@ -121,11 +106,8 @@ impl Surface<'_> { /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated, /// recreating the swapchain will panic. pub fn get_current_texture(&self) -> Result { - let (texture_id, texture_data, status, detail) = DynContext::surface_get_current_texture( - &*self.context, - &self.id, - self.surface_data.as_ref(), - ); + let (texture_data, status, detail) = + DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref()); let suboptimal = match status { SurfaceStatus::Good => false, @@ -155,12 +137,10 @@ impl Surface<'_> { view_formats: &[], }; - texture_id - .zip(texture_data) - .map(|(id, data)| SurfaceTexture { + texture_data + .map(|data| SurfaceTexture { texture: Texture { context: Arc::clone(&self.context), - id, data, owned: false, descriptor, @@ -188,7 +168,7 @@ impl Surface<'_> { .downcast_ref::() .map(|ctx| unsafe { ctx.surface_as_hal::( - self.surface_data.downcast_ref().unwrap(), + crate::context::downcast_ref(&self.surface_data), hal_surface_callback, ) }) @@ -209,7 +189,6 @@ impl<'window> fmt::Debug for Surface<'window> { "None" }, ) - .field("id", &self.id) .field("data", &self.surface_data) .field("config", &self.config) .finish() @@ -222,8 +201,7 @@ static_assertions::assert_impl_all!(Surface<'_>: Send, Sync); impl Drop for Surface<'_> { fn drop(&mut self) { if !thread::panicking() { - self.context - .surface_drop(&self.id, self.surface_data.as_ref()) + self.context.surface_drop(self.surface_data.as_ref()) } } } diff --git a/wgpu/src/api/texture.rs b/wgpu/src/api/texture.rs index 98295b939..9f4f6ad4c 100644 --- a/wgpu/src/api/texture.rs +++ b/wgpu/src/api/texture.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Handle to a texture on the GPU. @@ -11,7 +11,6 @@ use crate::*; #[derive(Debug)] pub struct Texture { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) owned: bool, pub(crate) descriptor: TextureDescriptor<'static>, @@ -19,15 +18,9 @@ pub struct Texture { #[cfg(send_sync)] static_assertions::assert_impl_all!(Texture: Send, Sync); -impl Texture { - /// Returns a globally-unique identifier for this `Texture`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(Texture); +impl Texture { /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the /// backend type argument does not match with this wgpu Texture /// @@ -39,14 +32,17 @@ impl Texture { &self, hal_texture_callback: F, ) -> R { - let texture = self.data.as_ref().downcast_ref().unwrap(); - if let Some(ctx) = self .context .as_any() .downcast_ref::() { - unsafe { ctx.texture_as_hal::(texture, hal_texture_callback) } + unsafe { + ctx.texture_as_hal::( + crate::context::downcast_ref(&self.data), + hal_texture_callback, + ) + } } else { hal_texture_callback(None) } @@ -54,18 +50,16 @@ impl Texture { /// Creates a view of this texture. pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView { - let (id, data) = - DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc); TextureView { context: Arc::clone(&self.context), - id, data, } } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref()); + DynContext::texture_destroy(&*self.context, self.data.as_ref()); } /// Make an `ImageCopyTexture` representing the whole texture. @@ -145,7 +139,7 @@ impl Texture { impl Drop for Texture { fn drop(&mut self) { if self.owned && !thread::panicking() { - self.context.texture_drop(&self.id, self.data.as_ref()); + self.context.texture_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/texture_view.rs b/wgpu/src/api/texture_view.rs index b6e60a3c6..bba82c745 100644 --- a/wgpu/src/api/texture_view.rs +++ b/wgpu/src/api/texture_view.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a texture view. @@ -12,21 +11,14 @@ use crate::*; #[derive(Debug)] pub struct TextureView { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(TextureView: Send, Sync); -impl TextureView { - /// Returns a globally-unique identifier for this `TextureView`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(TextureView); +impl TextureView { /// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the /// backend type argument does not match with this wgpu Texture /// @@ -38,17 +30,16 @@ impl TextureView { &self, hal_texture_view_callback: F, ) -> R { - use wgc::id::TextureViewId; - - let texture_view_id = TextureViewId::from(self.id); - if let Some(ctx) = self .context .as_any() .downcast_ref::() { unsafe { - ctx.texture_view_as_hal::(texture_view_id, hal_texture_view_callback) + ctx.texture_view_as_hal::( + crate::context::downcast_ref(&self.data), + hal_texture_view_callback, + ) } } else { hal_texture_view_callback(None) @@ -59,7 +50,7 @@ impl TextureView { impl Drop for TextureView { fn drop(&mut self) { if !thread::panicking() { - self.context.texture_view_drop(&self.id, self.data.as_ref()); + self.context.texture_view_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index 702f17083..5a9a3b988 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -10,30 +10,18 @@ use std::{ collections::HashMap, fmt, future::Future, - marker::PhantomData, - num::NonZeroU64, ops::Range, pin::Pin, rc::Rc, - sync::atomic::{AtomicU64, Ordering}, task::{self, Poll}, }; use wasm_bindgen::{prelude::*, JsCast}; use crate::{ - context::{downcast_ref, ObjectId, QueueWriteBuffer, Unused}, + context::{downcast_ref, QueueWriteBuffer}, CompilationInfo, SurfaceTargetUnsafe, UncapturedErrorHandler, }; -fn create_identified(value: T) -> (Identified, Sendable) { - static NEXT_ID: AtomicU64 = AtomicU64::new(1); - let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); - ( - Identified(NonZeroU64::new(id).unwrap(), PhantomData), - Sendable(value), - ) -} - // We need to make a wrapper for some of the handle types returned by the web backend to make them // implement `Send` and `Sync` to match native. // @@ -42,23 +30,6 @@ fn create_identified(value: T) -> (Identified, Sendable) { // type is (for now) harmless. Eventually wasm32 will support threading, and depending on how this // is integrated (or not integrated) with values like those in webgpu, this may become unsound. -impl From for Identified { - fn from(object_id: ObjectId) -> Self { - Self(object_id.global_id(), PhantomData) - } -} - -impl From> for ObjectId { - fn from(identified: Identified) -> Self { - Self::new( - // TODO: the ID isn't used, so we hardcode it to 1 for now until we rework this - // API. - NonZeroU64::new(1).unwrap(), - identified.0, - ) - } -} - #[derive(Clone, Debug)] pub(crate) struct Sendable(T); #[cfg(send_sync)] @@ -66,13 +37,6 @@ unsafe impl Send for Sendable {} #[cfg(send_sync)] unsafe impl Sync for Sendable {} -#[derive(Clone, Debug)] -pub(crate) struct Identified(std::num::NonZeroU64, PhantomData); -#[cfg(send_sync)] -unsafe impl Send for Identified {} -#[cfg(send_sync)] -unsafe impl Sync for Identified {} - pub(crate) struct ContextWebGpu(webgpu_sys::Gpu); #[cfg(send_sync)] unsafe impl Send for ContextWebGpu {} @@ -888,14 +852,9 @@ fn map_js_sys_limits(limits: &wgt::Limits) -> js_sys::Object { type JsFutureResult = Result; -fn future_request_adapter( - result: JsFutureResult, -) -> Option<( - Identified, - Sendable, -)> { +fn future_request_adapter(result: JsFutureResult) -> Option> { match result.and_then(wasm_bindgen::JsCast::dyn_into) { - Ok(adapter) => Some(create_identified(adapter)), + Ok(adapter) => Some(Sendable(adapter)), Err(_) => None, } } @@ -904,19 +863,17 @@ fn future_request_device( result: JsFutureResult, ) -> Result< ( - Identified, Sendable, - Identified, Sendable, ), crate::RequestDeviceError, > { result .map(|js_value| { - let (device_id, device_data) = create_identified(webgpu_sys::GpuDevice::from(js_value)); - let (queue_id, queue_data) = create_identified(device_data.0.queue()); + let device_data = Sendable(webgpu_sys::GpuDevice::from(js_value)); + let queue_data = Sendable(device_data.0.queue()); - (device_id, device_data, queue_id, queue_data) + (device_data, queue_data) }) .map_err(|error_value| crate::RequestDeviceError { inner: crate::RequestDeviceErrorKind::WebGpu(error_value), @@ -1019,13 +976,7 @@ impl ContextWebGpu { &self, canvas: Canvas, context_result: Result, wasm_bindgen::JsValue>, - ) -> Result< - ( - ::SurfaceId, - ::SurfaceData, - ), - crate::CreateSurfaceError, - > { + ) -> Result<::SurfaceData, crate::CreateSurfaceError> { let context: js_sys::Object = match context_result { Ok(Some(context)) => context, Ok(None) => { @@ -1060,7 +1011,7 @@ impl ContextWebGpu { .dyn_into() .expect("canvas context is not a GPUCanvasContext"); - Ok(create_identified((canvas, context))) + Ok(Sendable((canvas, context))) } /// Get mapped buffer range directly as a `js_sys::ArrayBuffer`. @@ -1118,71 +1069,41 @@ pub fn get_browser_gpu_property() -> Option { } impl crate::context::Context for ContextWebGpu { - type AdapterId = Identified; type AdapterData = Sendable; - type DeviceId = Identified; type DeviceData = Sendable; - type QueueId = Identified; type QueueData = Sendable; - type ShaderModuleId = Identified; type ShaderModuleData = Sendable; - type BindGroupLayoutId = Identified; type BindGroupLayoutData = Sendable; - type BindGroupId = Identified; type BindGroupData = Sendable; - type TextureViewId = Identified; type TextureViewData = Sendable; - type SamplerId = Identified; type SamplerData = Sendable; - type BufferId = Identified; type BufferData = Sendable; - type TextureId = Identified; type TextureData = Sendable; - type QuerySetId = Identified; type QuerySetData = Sendable; - type PipelineLayoutId = Identified; type PipelineLayoutData = Sendable; - type RenderPipelineId = Identified; type RenderPipelineData = Sendable; - type ComputePipelineId = Identified; type ComputePipelineData = Sendable; - type CommandEncoderId = Identified; type CommandEncoderData = Sendable; - type ComputePassId = Identified; type ComputePassData = Sendable; - type RenderPassId = Identified; type RenderPassData = Sendable; - type CommandBufferId = Identified; type CommandBufferData = Sendable; - type RenderBundleEncoderId = Identified; type RenderBundleEncoderData = Sendable; - type RenderBundleId = Identified; type RenderBundleData = Sendable; - type SurfaceId = Identified<(Canvas, webgpu_sys::GpuCanvasContext)>; type SurfaceData = Sendable<(Canvas, webgpu_sys::GpuCanvasContext)>; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndexData = (); - type PipelineCacheId = Unused; type PipelineCacheData = (); type RequestAdapterFuture = MakeSendFuture< wasm_bindgen_futures::JsFuture, - fn(JsFutureResult) -> Option<(Self::AdapterId, Self::AdapterData)>, + fn(JsFutureResult) -> Option, >; type RequestDeviceFuture = MakeSendFuture< wasm_bindgen_futures::JsFuture, fn( JsFutureResult, - ) -> Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - crate::RequestDeviceError, - >, + ) -> Result<(Self::DeviceData, Self::QueueData), crate::RequestDeviceError>, >; type PopErrorScopeFuture = MakeSendFuture Option>; @@ -1204,7 +1125,7 @@ impl crate::context::Context for ContextWebGpu { unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError> { + ) -> Result { match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle: _, @@ -1278,7 +1199,6 @@ impl crate::context::Context for ContextWebGpu { fn adapter_request_device( &self, - _adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -1332,44 +1252,29 @@ impl crate::context::Context for ContextWebGpu { fn adapter_is_surface_supported( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, - _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData, ) -> bool { true } - fn adapter_features( - &self, - _adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> wgt::Features { + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> wgt::Features { map_wgt_features(adapter_data.0.features()) } - fn adapter_limits( - &self, - _adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> wgt::Limits { + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> wgt::Limits { map_wgt_limits(adapter_data.0.limits()) } fn adapter_downlevel_capabilities( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::DownlevelCapabilities { // WebGPU is assumed to be fully compliant wgt::DownlevelCapabilities::default() } - fn adapter_get_info( - &self, - _adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> wgt::AdapterInfo { + fn adapter_get_info(&self, _adapter_data: &Self::AdapterData) -> wgt::AdapterInfo { // TODO: web-sys has no way of getting information on adapters wgt::AdapterInfo { name: String::new(), @@ -1384,16 +1289,14 @@ impl crate::context::Context for ContextWebGpu { fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, format: wgt::TextureFormat, ) -> wgt::TextureFormatFeatures { - format.guaranteed_format_features(self.adapter_features(adapter, adapter_data)) + format.guaranteed_format_features(self.adapter_features(adapter_data)) } fn adapter_get_presentation_timestamp( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp { wgt::PresentationTimestamp::INVALID_TIMESTAMP @@ -1401,9 +1304,7 @@ impl crate::context::Context for ContextWebGpu { fn surface_get_capabilities( &self, - _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities { let mut formats = vec![ @@ -1432,9 +1333,7 @@ impl crate::context::Context for ContextWebGpu { fn surface_configure( &self, - _surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - _device: &Self::DeviceId, device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ) { @@ -1478,21 +1377,14 @@ impl crate::context::Context for ContextWebGpu { fn surface_get_current_texture( &self, - _surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> ( - Option, Option, wgt::SurfaceStatus, Self::SurfaceOutputDetail, ) { - let (surface_id, surface_data) = create_identified(surface_data.0 .1.get_current_texture()); - ( - Some(surface_id), - Some(surface_data), - wgt::SurfaceStatus::Good, - (), - ) + let surface_data = Sendable(surface_data.0 .1.get_current_texture()); + (Some(surface_data), wgt::SurfaceStatus::Good, ()) } fn surface_present(&self, _detail: &Self::SurfaceOutputDetail) { @@ -1503,25 +1395,16 @@ impl crate::context::Context for ContextWebGpu { // Can't really discard this on the Web } - fn device_features( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> wgt::Features { + fn device_features(&self, device_data: &Self::DeviceData) -> wgt::Features { map_wgt_features(device_data.0.features()) } - fn device_limits( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> wgt::Limits { + fn device_limits(&self, device_data: &Self::DeviceData) -> wgt::Limits { map_wgt_limits(device_data.0.limits()) } fn device_downlevel_properties( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> wgt::DownlevelCapabilities { // WebGPU is assumed to be fully compliant @@ -1539,11 +1422,10 @@ impl crate::context::Context for ContextWebGpu { )] fn device_create_shader_module( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: crate::ShaderModuleDescriptor<'_>, _shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let shader_module_result = match desc.source { #[cfg(feature = "spirv")] crate::ShaderSource::SpirV(ref spv) => { @@ -1672,25 +1554,22 @@ impl crate::context::Context for ContextWebGpu { module: device_data.0.create_shader_module(&descriptor), compilation_info, }; - let (id, data) = create_identified(shader_module); - (id, data) + Sendable(shader_module) } unsafe fn device_create_shader_module_spirv( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, _desc: &crate::ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { unreachable!("SPIRV_SHADER_PASSTHROUGH is not enabled for this backend") } fn device_create_bind_group_layout( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { + ) -> Self::BindGroupLayoutData { let mapped_bindings = desc .entries .iter() @@ -1782,15 +1661,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_bind_group_layout(&mapped_desc)) + Sendable(device_data.0.create_bind_group_layout(&mapped_desc)) } fn device_create_bind_group( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData) { + ) -> Self::BindGroupData { let mapped_entries = desc .entries .iter() @@ -1842,15 +1720,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_bind_group(&mapped_desc)) + Sendable(device_data.0.create_bind_group(&mapped_desc)) } fn device_create_pipeline_layout( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData) { + ) -> Self::PipelineLayoutData { let temp_layouts = desc .bind_group_layouts .iter() @@ -1864,15 +1741,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_pipeline_layout(&mapped_desc)) + Sendable(device_data.0.create_pipeline_layout(&mapped_desc)) } fn device_create_render_pipeline( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData) { + ) -> Self::RenderPipelineData { let module: &::ShaderModuleData = downcast_ref(desc.vertex.module.data.as_ref()); let mut mapped_vertex_state = webgpu_sys::GpuVertexState::new(&module.0.module); @@ -1974,15 +1850,14 @@ impl crate::context::Context for ContextWebGpu { let mapped_primitive = map_primitive_state(&desc.primitive); mapped_desc.primitive(&mapped_primitive); - create_identified(device_data.0.create_render_pipeline(&mapped_desc)) + Sendable(device_data.0.create_render_pipeline(&mapped_desc)) } fn device_create_compute_pipeline( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData) { + ) -> Self::ComputePipelineData { let shader_module: &::ShaderModuleData = downcast_ref(desc.module.data.as_ref()); let mut mapped_compute_stage = @@ -2007,32 +1882,29 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.label(label); } - create_identified(device_data.0.create_compute_pipeline(&mapped_desc)) + Sendable(device_data.0.create_compute_pipeline(&mapped_desc)) } unsafe fn device_create_pipeline_cache( &self, - _: &Self::DeviceId, _: &Self::DeviceData, _: &crate::PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData) { - (Unused, ()) + ) -> Self::PipelineCacheData { } - fn pipeline_cache_drop(&self, _: &Self::PipelineCacheId, _: &Self::PipelineCacheData) {} + fn pipeline_cache_drop(&self, _: &Self::PipelineCacheData) {} fn device_create_buffer( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData) { + ) -> Self::BufferData { let mut mapped_desc = webgpu_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); mapped_desc.mapped_at_creation(desc.mapped_at_creation); if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(WebBuffer::new( + Sendable(WebBuffer::new( device_data.0.create_buffer(&mapped_desc), desc, )) @@ -2040,10 +1912,9 @@ impl crate::context::Context for ContextWebGpu { fn device_create_texture( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData) { + ) -> Self::TextureData { let mut mapped_desc = webgpu_sys::GpuTextureDescriptor::new( map_texture_format(desc.format), &map_extent_3d(desc.size), @@ -2061,15 +1932,14 @@ impl crate::context::Context for ContextWebGpu { .map(|format| JsValue::from(map_texture_format(*format))) .collect::(); mapped_desc.view_formats(&mapped_view_formats); - create_identified(device_data.0.create_texture(&mapped_desc)) + Sendable(device_data.0.create_texture(&mapped_desc)) } fn device_create_sampler( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData) { + ) -> Self::SamplerData { let mut mapped_desc = webgpu_sys::GpuSamplerDescriptor::new(); mapped_desc.address_mode_u(map_address_mode(desc.address_mode_u)); mapped_desc.address_mode_v(map_address_mode(desc.address_mode_v)); @@ -2087,15 +1957,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_sampler_with_descriptor(&mapped_desc)) + Sendable(device_data.0.create_sampler_with_descriptor(&mapped_desc)) } fn device_create_query_set( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &wgt::QuerySetDescriptor>, - ) -> (Self::QuerySetId, Self::QuerySetData) { + ) -> Self::QuerySetData { let ty = match desc.ty { wgt::QueryType::Occlusion => webgpu_sys::GpuQueryType::Occlusion, wgt::QueryType::Timestamp => webgpu_sys::GpuQueryType::Timestamp, @@ -2105,20 +1974,19 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_query_set(&mapped_desc)) + Sendable(device_data.0.create_query_set(&mapped_desc)) } fn device_create_command_encoder( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData) { + ) -> Self::CommandEncoderData { let mut mapped_desc = webgpu_sys::GpuCommandEncoderDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified( + Sendable( device_data .0 .create_command_encoder_with_descriptor(&mapped_desc), @@ -2127,10 +1995,9 @@ impl crate::context::Context for ContextWebGpu { fn device_create_render_bundle_encoder( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData) { + ) -> Self::RenderBundleEncoderData { let mapped_color_formats = desc .color_formats .iter() @@ -2150,40 +2017,34 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.stencil_read_only(ds.stencil_read_only); } mapped_desc.sample_count(desc.sample_count); - create_identified(device_data.0.create_render_bundle_encoder(&mapped_desc)) + Sendable(device_data.0.create_render_bundle_encoder(&mapped_desc)) } #[doc(hidden)] - fn device_make_invalid(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { + fn device_make_invalid(&self, _device_data: &Self::DeviceData) { // Unimplemented } - fn device_drop(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { + fn device_drop(&self, _device_data: &Self::DeviceData) { // Device is dropped automatically } - fn device_destroy(&self, _buffer: &Self::DeviceId, device_data: &Self::DeviceData) { + fn device_destroy(&self, device_data: &Self::DeviceData) { device_data.0.destroy(); } - fn device_mark_lost( - &self, - _device: &Self::DeviceId, - _device_data: &Self::DeviceData, - _message: &str, - ) { + fn device_mark_lost(&self, _device_data: &Self::DeviceData, _message: &str) { // TODO: figure out the GPUDevice implementation of this, including resolving // the device.lost promise, which will require a different invocation pattern // with a callback. } - fn queue_drop(&self, _queue: &Self::QueueId, _queue_data: &Self::QueueData) { + fn queue_drop(&self, _queue_data: &Self::QueueData) { // Queue is dropped automatically } fn device_set_device_lost_callback( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, device_lost_callback: crate::context::DeviceLostCallback, ) { @@ -2205,7 +2066,6 @@ impl crate::context::Context for ContextWebGpu { fn device_poll( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, _maintain: crate::Maintain, ) -> crate::MaintainResult { @@ -2215,7 +2075,6 @@ impl crate::context::Context for ContextWebGpu { fn device_on_uncaptured_error( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ) { @@ -2230,12 +2089,7 @@ impl crate::context::Context for ContextWebGpu { f.forget(); } - fn device_push_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: crate::ErrorFilter, - ) { + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { device_data.0.push_error_scope(match filter { crate::ErrorFilter::OutOfMemory => webgpu_sys::GpuErrorFilter::OutOfMemory, crate::ErrorFilter::Validation => webgpu_sys::GpuErrorFilter::Validation, @@ -2243,11 +2097,7 @@ impl crate::context::Context for ContextWebGpu { }); } - fn device_pop_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture { + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { let error_promise = device_data.0.pop_error_scope(); MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(error_promise), @@ -2257,7 +2107,6 @@ impl crate::context::Context for ContextWebGpu { fn buffer_map_async( &self, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: crate::MapMode, range: Range, @@ -2276,7 +2125,6 @@ impl crate::context::Context for ContextWebGpu { fn buffer_get_mapped_range( &self, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, sub_range: Range, ) -> Box { @@ -2288,14 +2136,13 @@ impl crate::context::Context for ContextWebGpu { }) } - fn buffer_unmap(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + fn buffer_unmap(&self, buffer_data: &Self::BufferData) { buffer_data.0.buffer.unmap(); buffer_data.0.mapping.borrow_mut().mapped_buffer = None; } fn shader_get_compilation_info( &self, - _shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture { let compilation_info_promise = shader_data.0.module.get_compilation_info(); @@ -2311,10 +2158,9 @@ impl crate::context::Context for ContextWebGpu { fn texture_create_view( &self, - _texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &crate::TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData) { + ) -> Self::TextureViewData { let mut mapped = webgpu_sys::GpuTextureViewDescriptor::new(); if let Some(dim) = desc.dimension { mapped.dimension(map_texture_view_dimension(dim)); @@ -2334,147 +2180,102 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped.label(label); } - create_identified(texture_data.0.create_view_with_descriptor(&mapped)) + Sendable(texture_data.0.create_view_with_descriptor(&mapped)) } - fn surface_drop(&self, _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData) { + fn surface_drop(&self, _surface_data: &Self::SurfaceData) { // Dropped automatically } - fn adapter_drop(&self, _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData) { + fn adapter_drop(&self, _adapter_data: &Self::AdapterData) { // Dropped automatically } - fn buffer_destroy(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + fn buffer_destroy(&self, buffer_data: &Self::BufferData) { buffer_data.0.buffer.destroy(); } - fn buffer_drop(&self, _buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { + fn buffer_drop(&self, _buffer_data: &Self::BufferData) { // Dropped automatically } - fn texture_destroy(&self, _texture: &Self::TextureId, texture_data: &Self::TextureData) { + fn texture_destroy(&self, texture_data: &Self::TextureData) { texture_data.0.destroy(); } - fn texture_drop(&self, _texture: &Self::TextureId, _texture_data: &Self::TextureData) { + fn texture_drop(&self, _texture_data: &Self::TextureData) { // Dropped automatically } - fn texture_view_drop( - &self, - _texture_view: &Self::TextureViewId, - _texture_view_data: &Self::TextureViewData, - ) { + fn texture_view_drop(&self, _texture_view_data: &Self::TextureViewData) { // Dropped automatically } - fn sampler_drop(&self, _sampler: &Self::SamplerId, _sampler_data: &Self::SamplerData) { + fn sampler_drop(&self, _sampler_data: &Self::SamplerData) { // Dropped automatically } - fn query_set_drop(&self, _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData) { + fn query_set_drop(&self, _query_set_data: &Self::QuerySetData) { // Dropped automatically } - fn bind_group_drop( - &self, - _bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, - ) { + fn bind_group_drop(&self, _bind_group_data: &Self::BindGroupData) { // Dropped automatically } - fn bind_group_layout_drop( - &self, - _bind_group_layout: &Self::BindGroupLayoutId, - _bind_group_layout_data: &Self::BindGroupLayoutData, - ) { + fn bind_group_layout_drop(&self, _bind_group_layout_data: &Self::BindGroupLayoutData) { // Dropped automatically } - fn pipeline_layout_drop( - &self, - _pipeline_layout: &Self::PipelineLayoutId, - _pipeline_layout_data: &Self::PipelineLayoutData, - ) { + fn pipeline_layout_drop(&self, _pipeline_layout_data: &Self::PipelineLayoutData) { // Dropped automatically } - fn shader_module_drop( - &self, - _shader_module: &Self::ShaderModuleId, - _shader_module_data: &Self::ShaderModuleData, - ) { + fn shader_module_drop(&self, _shader_module_data: &Self::ShaderModuleData) { // Dropped automatically } - fn command_encoder_drop( - &self, - _command_encoder: &Self::CommandEncoderId, - _command_encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_drop(&self, _command_encoder_data: &Self::CommandEncoderData) { // Dropped automatically } - fn command_buffer_drop( - &self, - _command_buffer: &Self::CommandBufferId, - _command_buffer_data: &Self::CommandBufferData, - ) { + fn command_buffer_drop(&self, _command_buffer_data: &Self::CommandBufferData) { // Dropped automatically } - fn render_bundle_drop( - &self, - _render_bundle: &Self::RenderBundleId, - _render_bundle_data: &Self::RenderBundleData, - ) { + fn render_bundle_drop(&self, _render_bundle_data: &Self::RenderBundleData) { // Dropped automatically } - fn compute_pipeline_drop( - &self, - _pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, - ) { + fn compute_pipeline_drop(&self, _pipeline_data: &Self::ComputePipelineData) { // Dropped automatically } - fn render_pipeline_drop( - &self, - _pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, - ) { + fn render_pipeline_drop(&self, _pipeline_data: &Self::RenderPipelineData) { // Dropped automatically } fn compute_pipeline_get_bind_group_layout( &self, - _pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - create_identified(pipeline_data.0.get_bind_group_layout(index)) + ) -> Self::BindGroupLayoutData { + Sendable(pipeline_data.0.get_bind_group_layout(index)) } fn render_pipeline_get_bind_group_layout( &self, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - create_identified(pipeline_data.0.get_bind_group_layout(index)) + ) -> Self::BindGroupLayoutData { + Sendable(pipeline_data.0.get_bind_group_layout(index)) } fn command_encoder_copy_buffer_to_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - _source: &Self::BufferId, source_data: &Self::BufferData, source_offset: wgt::BufferAddress, - _destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, copy_size: wgt::BufferAddress, @@ -2492,7 +2293,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_buffer_to_texture( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyBuffer<'_>, destination: crate::ImageCopyTexture<'_>, @@ -2509,7 +2309,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_texture_to_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyBuffer<'_>, @@ -2526,7 +2325,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_texture_to_texture( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyTexture<'_>, @@ -2543,10 +2341,9 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_begin_compute_pass( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData) { + ) -> Self::ComputePassData { let mut mapped_desc = webgpu_sys::GpuComputePassDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); @@ -2565,7 +2362,7 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.timestamp_writes(&writes); } - create_identified( + Sendable( encoder_data .0 .begin_compute_pass_with_descriptor(&mapped_desc), @@ -2574,10 +2371,9 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_begin_render_pass( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData) { + ) -> Self::RenderPassData { let mapped_color_attachments = desc .color_attachments .iter() @@ -2667,16 +2463,15 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.timestamp_writes(&writes); } - create_identified(encoder_data.0.begin_render_pass(&mapped_desc)) + Sendable(encoder_data.0.begin_render_pass(&mapped_desc)) } fn command_encoder_finish( &self, - _encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData) { + ) -> Self::CommandBufferData { let label = encoder_data.0.label(); - create_identified(if label.is_empty() { + Sendable(if label.is_empty() { encoder_data.0.finish() } else { let mut mapped_desc = webgpu_sys::GpuCommandBufferDescriptor::new(); @@ -2687,9 +2482,8 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_clear_texture( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - _texture: &crate::Texture, + _texture_data: &Self::TextureData, _subresource_range: &wgt::ImageSubresourceRange, ) { //TODO @@ -2697,29 +2491,25 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_clear_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &crate::Buffer, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - let buffer: &::BufferData = - downcast_ref(buffer.data.as_ref()); match size { Some(size) => encoder_data.0.clear_buffer_with_f64_and_f64( - &buffer.0.buffer, + &buffer_data.0.buffer, offset as f64, size as f64, ), None => encoder_data .0 - .clear_buffer_with_f64(&buffer.0.buffer, offset as f64), + .clear_buffer_with_f64(&buffer_data.0.buffer, offset as f64), } } fn command_encoder_insert_debug_marker( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, _label: &str, ) { @@ -2729,7 +2519,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_push_debug_group( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, _label: &str, ) { @@ -2737,20 +2526,14 @@ impl crate::context::Context for ContextWebGpu { // encoder.push_debug_group(label); } - fn command_encoder_pop_debug_group( - &self, - _encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_pop_debug_group(&self, _encoder_data: &Self::CommandEncoderData) { // Not available in gecko yet // encoder.pop_debug_group(); } fn command_encoder_write_timestamp( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -2761,13 +2544,10 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_resolve_query_set( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - _query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - _destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, ) { @@ -2782,11 +2562,10 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_finish( &self, - _encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &crate::RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData) { - create_identified(match desc.label { + ) -> Self::RenderBundleData { + Sendable(match desc.label { Some(label) => { let mut mapped_desc = webgpu_sys::GpuRenderBundleDescriptor::new(); mapped_desc.label(label); @@ -2798,9 +2577,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_write_buffer( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, data: &[u8], @@ -2827,9 +2604,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_validate_write_buffer( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, @@ -2864,7 +2639,6 @@ impl crate::context::Context for ContextWebGpu { fn queue_create_staging_buffer( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, size: wgt::BufferSize, ) -> Option> { @@ -2875,9 +2649,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, staging_buffer: &dyn QueueWriteBuffer, @@ -2887,19 +2659,11 @@ impl crate::context::Context for ContextWebGpu { .downcast_ref::() .unwrap() .slice(); - self.queue_write_buffer( - queue, - queue_data, - buffer, - buffer_data, - offset, - staging_buffer, - ) + self.queue_write_buffer(queue_data, buffer_data, offset, staging_buffer) } fn queue_write_texture( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, texture: crate::ImageCopyTexture<'_>, data: &[u8], @@ -2935,7 +2699,6 @@ impl crate::context::Context for ContextWebGpu { fn queue_copy_external_image_to_texture( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, @@ -2950,43 +2713,36 @@ impl crate::context::Context for ContextWebGpu { ); } - fn queue_submit>( + fn queue_submit>( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData { let temp_command_buffers = command_buffers - .map(|(_, data)| data.0) + .map(|data| data.0) .collect::(); queue_data.0.submit(&temp_command_buffers); } - fn queue_get_timestamp_period( - &self, - _queue: &Self::QueueId, - _queue_data: &Self::QueueData, - ) -> f32 { + fn queue_get_timestamp_period(&self, _queue_data: &Self::QueueData) -> f32 { // Timestamp values are always in nanoseconds, see https://gpuweb.github.io/gpuweb/#timestamp 1.0 } fn queue_on_submitted_work_done( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, _callback: crate::context::SubmittedWorkDoneCallback, ) { unimplemented!() } - fn device_start_capture(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) {} - fn device_stop_capture(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) {} + fn device_start_capture(&self, _device_data: &Self::DeviceData) {} + fn device_stop_capture(&self, _device_data: &Self::DeviceData) {} fn device_get_internal_counters( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> wgt::InternalCounters { Default::default() @@ -2994,25 +2750,18 @@ impl crate::context::Context for ContextWebGpu { fn device_generate_allocator_report( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> Option { None } - fn pipeline_cache_get_data( - &self, - _: &Self::PipelineCacheId, - _: &Self::PipelineCacheData, - ) -> Option> { + fn pipeline_cache_get_data(&self, _: &Self::PipelineCacheData) -> Option> { None } fn compute_pass_set_pipeline( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - _pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, ) { pass_data.0.set_pipeline(&pipeline_data.0) @@ -3020,10 +2769,8 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_set_bind_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - _bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { @@ -3044,7 +2791,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_set_push_constants( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _offset: u32, _data: &[u8], @@ -3054,7 +2800,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_insert_debug_marker( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _label: &str, ) { @@ -3064,7 +2809,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_push_debug_group( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _group_label: &str, ) { @@ -3072,20 +2816,14 @@ impl crate::context::Context for ContextWebGpu { // self.0.push_debug_group(group_label); } - fn compute_pass_pop_debug_group( - &self, - _pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_pop_debug_group(&self, _pass_data: &mut Self::ComputePassData) { // Not available in gecko yet // self.0.pop_debug_group(); } fn compute_pass_write_timestamp( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -3094,26 +2832,19 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { // Not available in gecko yet } - fn compute_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::ComputePassData) { // Not available in gecko yet } fn compute_pass_dispatch_workgroups( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -3126,9 +2857,7 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_dispatch_workgroups_indirect( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3138,19 +2867,13 @@ impl crate::context::Context for ContextWebGpu { ); } - fn compute_pass_end( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { pass_data.0.end(); } fn render_bundle_encoder_set_pipeline( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ) { encoder_data.0.set_pipeline(&pipeline_data.0); @@ -3158,10 +2881,8 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_bind_group( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - _bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { @@ -3184,9 +2905,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_index_buffer( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, @@ -3213,10 +2932,8 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_vertex_buffer( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, @@ -3242,7 +2959,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_push_constants( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, _stages: wgt::ShaderStages, _offset: u32, @@ -3253,7 +2969,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, @@ -3270,7 +2985,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indexed( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -3289,9 +3003,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3302,9 +3014,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3315,9 +3025,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_multi_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3327,9 +3035,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_multi_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3339,12 +3045,9 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_multi_draw_indirect_count( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3356,12 +3059,9 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_multi_draw_indexed_indirect_count( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3371,9 +3071,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_pipeline( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ) { pass_data.0.set_pipeline(&pipeline_data.0); @@ -3381,10 +3079,8 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_bind_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - _bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { @@ -3405,9 +3101,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_index_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, @@ -3434,10 +3128,8 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_vertex_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, @@ -3463,7 +3155,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_push_constants( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _stages: wgt::ShaderStages, _offset: u32, @@ -3474,7 +3165,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, @@ -3491,7 +3181,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indexed( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -3510,9 +3199,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3523,9 +3210,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3536,9 +3221,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indirect( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3548,9 +3231,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3560,12 +3241,9 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indirect_count( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3577,12 +3255,9 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indexed_indirect_count( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3592,7 +3267,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_blend_constant( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, color: wgt::Color, ) { @@ -3603,7 +3277,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_scissor_rect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -3615,7 +3288,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_viewport( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -3631,26 +3303,19 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_stencil_reference( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ) { pass_data.0.set_stencil_reference(reference); } - fn render_pass_insert_debug_marker( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - _label: &str, - ) { + fn render_pass_insert_debug_marker(&self, _pass_data: &mut Self::RenderPassData, _label: &str) { // Not available in gecko yet // self.0.insert_debug_marker(label); } fn render_pass_push_debug_group( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _group_label: &str, ) { @@ -3658,20 +3323,14 @@ impl crate::context::Context for ContextWebGpu { // self.0.push_debug_group(group_label); } - fn render_pass_pop_debug_group( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_pop_debug_group(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet // self.0.pop_debug_group(); } fn render_pass_write_timestamp( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -3680,57 +3339,41 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_begin_occlusion_query( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _query_index: u32, ) { // Not available in gecko yet } - fn render_pass_end_occlusion_query( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_occlusion_query(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet } fn render_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { // Not available in gecko yet } - fn render_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet } fn render_pass_execute_bundles( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { let mapped = render_bundles - .map(|(_, bundle_data)| &bundle_data.0) + .map(|bundle_data| &bundle_data.0) .collect::(); pass_data.0.execute_bundles(&mapped); } - fn render_pass_end( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { pass_data.0.end(); } } diff --git a/wgpu/src/backend/wgpu_core.rs b/wgpu/src/backend/wgpu_core.rs index 24eb086c1..8ed6acdd8 100644 --- a/wgpu/src/backend/wgpu_core.rs +++ b/wgpu/src/backend/wgpu_core.rs @@ -1,8 +1,7 @@ use crate::{ - context::{ObjectId, Unused}, - AdapterInfo, BindGroupDescriptor, BindGroupLayoutDescriptor, BindingResource, BufferBinding, - BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, CompilationMessage, - CompilationMessageType, ComputePassDescriptor, ComputePipelineDescriptor, + context::downcast_ref, AdapterInfo, BindGroupDescriptor, BindGroupLayoutDescriptor, + BindingResource, BufferBinding, BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, + CompilationMessage, CompilationMessageType, ComputePassDescriptor, ComputePipelineDescriptor, DownlevelCapabilities, ErrorSource, Features, Label, Limits, LoadOp, MapMode, Operations, PipelineCacheDescriptor, PipelineLayoutDescriptor, RenderBundleEncoderDescriptor, RenderPipelineDescriptor, SamplerDescriptor, ShaderModuleDescriptor, @@ -15,7 +14,7 @@ use parking_lot::Mutex; use smallvec::SmallVec; use std::{ any::Any, - borrow::Cow::{Borrowed, Owned}, + borrow::Cow::Borrowed, error::Error, fmt, future::{ready, Ready}, @@ -25,10 +24,7 @@ use std::{ sync::Arc, }; use wgc::error::ContextErrorSource; -use wgc::{ - command::bundle_ffi::*, device::DeviceLostClosure, id::CommandEncoderId, id::TextureViewId, - pipeline::CreateShaderModuleError, -}; +use wgc::{command::bundle_ffi::*, device::DeviceLostClosure, pipeline::CreateShaderModuleError}; use wgt::WasmNotSendSync; pub struct ContextWgpuCore(wgc::global::Global); @@ -82,21 +78,24 @@ impl ContextWgpuCore { R, >( &self, - adapter: wgc::id::AdapterId, + adapter: &wgc::id::AdapterId, hal_adapter_callback: F, ) -> R { unsafe { self.0 - .adapter_as_hal::(adapter, hal_adapter_callback) + .adapter_as_hal::(*adapter, hal_adapter_callback) } } pub unsafe fn buffer_as_hal) -> R, R>( &self, - id: wgc::id::BufferId, + buffer: &Buffer, hal_buffer_callback: F, ) -> R { - unsafe { self.0.buffer_as_hal::(id, hal_buffer_callback) } + unsafe { + self.0 + .buffer_as_hal::(buffer.id, hal_buffer_callback) + } } pub unsafe fn create_device_from_hal( @@ -165,7 +164,7 @@ impl ContextWgpuCore { hal_buffer: A::Buffer, device: &Device, desc: &BufferDescriptor<'_>, - ) -> (wgc::id::BufferId, Buffer) { + ) -> Buffer { let (id, error) = unsafe { self.0.create_buffer_from_hal::( hal_buffer, @@ -182,12 +181,10 @@ impl ContextWgpuCore { "Device::create_buffer_from_hal", ); } - ( + Buffer { id, - Buffer { - error_sink: Arc::clone(&device.error_sink), - }, - ) + error_sink: Arc::clone(&device.error_sink), + } } pub unsafe fn device_as_hal) -> R, R>( @@ -237,12 +234,12 @@ impl ContextWgpuCore { R, >( &self, - texture_view_id: TextureViewId, + texture_view_data: &wgc::id::TextureViewId, hal_texture_view_callback: F, ) -> R { unsafe { self.0 - .texture_view_as_hal::(texture_view_id, hal_texture_view_callback) + .texture_view_as_hal::(*texture_view_data, hal_texture_view_callback) } } @@ -253,12 +250,12 @@ impl ContextWgpuCore { R, >( &self, - command_encoder_id: CommandEncoderId, + command_encoder: &CommandEncoder, hal_command_encoder_callback: F, ) -> R { unsafe { self.0.command_encoder_as_hal_mut::( - command_encoder_id, + command_encoder.id, hal_command_encoder_callback, ) } @@ -372,14 +369,14 @@ impl ContextWgpuCore { fn map_buffer_copy_view(view: crate::ImageCopyBuffer<'_>) -> wgc::command::ImageCopyBuffer { wgc::command::ImageCopyBuffer { - buffer: view.buffer.id.into(), + buffer: downcast_buffer(view.buffer).id, layout: view.layout, } } fn map_texture_copy_view(view: crate::ImageCopyTexture<'_>) -> wgc::command::ImageCopyTexture { wgc::command::ImageCopyTexture { - texture: view.texture.id.into(), + texture: downcast_texture(view.texture).id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -394,7 +391,7 @@ fn map_texture_tagged_copy_view( view: crate::ImageCopyTextureTagged<'_>, ) -> wgc::command::ImageCopyTextureTagged { wgc::command::ImageCopyTextureTagged { - texture: view.texture.id.into(), + texture: downcast_texture(view.texture).id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -449,14 +446,6 @@ pub struct Surface { configured_device: Mutex>, } -impl Surface { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::SurfaceId { - self.id - } -} - #[derive(Debug)] pub struct Device { id: wgc::id::DeviceId, @@ -464,21 +453,15 @@ pub struct Device { features: Features, } -impl Device { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::DeviceId { - self.id - } -} - #[derive(Debug)] pub struct Buffer { + id: wgc::id::BufferId, error_sink: ErrorSink, } #[derive(Debug)] pub struct ShaderModule { + id: wgc::id::ShaderModuleId, compilation_info: CompilationInfo, } @@ -488,28 +471,12 @@ pub struct Texture { error_sink: ErrorSink, } -impl Texture { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::TextureId { - self.id - } -} - #[derive(Debug)] pub struct Queue { id: wgc::id::QueueId, error_sink: ErrorSink, } -impl Queue { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::QueueId { - self.id - } -} - #[derive(Debug)] pub struct ComputePass { pass: wgc::command::ComputePass, @@ -524,73 +491,43 @@ pub struct RenderPass { #[derive(Debug)] pub struct CommandEncoder { + id: wgc::id::CommandEncoderId, error_sink: ErrorSink, open: bool, } impl crate::Context for ContextWgpuCore { - type AdapterId = wgc::id::AdapterId; - type AdapterData = (); - type DeviceId = wgc::id::DeviceId; + type AdapterData = wgc::id::AdapterId; type DeviceData = Device; - type QueueId = wgc::id::QueueId; type QueueData = Queue; - type ShaderModuleId = wgc::id::ShaderModuleId; type ShaderModuleData = ShaderModule; - type BindGroupLayoutId = wgc::id::BindGroupLayoutId; - type BindGroupLayoutData = (); - type BindGroupId = wgc::id::BindGroupId; - type BindGroupData = (); - type TextureViewId = wgc::id::TextureViewId; - type TextureViewData = (); - type SamplerId = wgc::id::SamplerId; - type SamplerData = (); - type BufferId = wgc::id::BufferId; + type BindGroupLayoutData = wgc::id::BindGroupLayoutId; + type BindGroupData = wgc::id::BindGroupId; + type TextureViewData = wgc::id::TextureViewId; + type SamplerData = wgc::id::SamplerId; type BufferData = Buffer; - type TextureId = wgc::id::TextureId; type TextureData = Texture; - type QuerySetId = wgc::id::QuerySetId; - type QuerySetData = (); - type PipelineLayoutId = wgc::id::PipelineLayoutId; - type PipelineLayoutData = (); - type RenderPipelineId = wgc::id::RenderPipelineId; - type RenderPipelineData = (); - type ComputePipelineId = wgc::id::ComputePipelineId; - type ComputePipelineData = (); - type PipelineCacheId = wgc::id::PipelineCacheId; - type PipelineCacheData = (); - type CommandEncoderId = wgc::id::CommandEncoderId; + type QuerySetData = wgc::id::QuerySetId; + type PipelineLayoutData = wgc::id::PipelineLayoutId; + type RenderPipelineData = wgc::id::RenderPipelineId; + type ComputePipelineData = wgc::id::ComputePipelineId; + type PipelineCacheData = wgc::id::PipelineCacheId; type CommandEncoderData = CommandEncoder; - type ComputePassId = Unused; type ComputePassData = ComputePass; - type RenderPassId = Unused; type RenderPassData = RenderPass; - type CommandBufferId = wgc::id::CommandBufferId; - type CommandBufferData = (); - type RenderBundleEncoderId = Unused; + type CommandBufferData = wgc::id::CommandBufferId; type RenderBundleEncoderData = wgc::command::RenderBundleEncoder; - type RenderBundleId = wgc::id::RenderBundleId; - type RenderBundleData = (); + type RenderBundleData = wgc::id::RenderBundleId; - type SurfaceId = wgc::id::SurfaceId; type SurfaceData = Surface; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndexData = wgc::SubmissionIndex; - type RequestAdapterFuture = Ready>; + type RequestAdapterFuture = Ready>; #[allow(clippy::type_complexity)] - type RequestDeviceFuture = Ready< - Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - crate::RequestDeviceError, - >, - >; + type RequestDeviceFuture = + Ready>; type PopErrorScopeFuture = Ready>; type CompilationInfoFuture = Ready; @@ -602,7 +539,7 @@ impl crate::Context for ContextWgpuCore { unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError> { + ) -> Result { let id = match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle, @@ -635,13 +572,10 @@ impl crate::Context for ContextWgpuCore { }, }?; - Ok(( + Ok(Surface { id, - Surface { - id, - configured_device: Mutex::default(), - }, - )) + configured_device: Mutex::default(), + }) } fn instance_request_adapter( @@ -652,17 +586,20 @@ impl crate::Context for ContextWgpuCore { &wgc::instance::RequestAdapterOptions { power_preference: options.power_preference, force_fallback_adapter: options.force_fallback_adapter, - compatible_surface: options.compatible_surface.map(|surface| surface.id.into()), + compatible_surface: options.compatible_surface.map(|surface| { + let surface: &::SurfaceData = + downcast_ref(surface.surface_data.as_ref()); + surface.id + }), }, wgc::instance::AdapterInputs::Mask(wgt::Backends::all(), |_| None), ); - ready(id.ok().map(|id| (id, ()))) + ready(id.ok()) } fn adapter_request_device( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, ) -> Self::RequestDeviceFuture { @@ -670,7 +607,7 @@ impl crate::Context for ContextWgpuCore { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } let (device_id, queue_id, error) = self.0.adapter_request_device( - *adapter, + *adapter_data, &desc.map_label(|l| l.map(Borrowed)), None, None, @@ -689,7 +626,7 @@ impl crate::Context for ContextWgpuCore { id: queue_id, error_sink, }; - ready(Ok((device_id, device, queue_id, queue))) + ready(Ok((device, queue))) } fn instance_poll_all_devices(&self, force_wait: bool) -> bool { @@ -701,34 +638,27 @@ impl crate::Context for ContextWgpuCore { fn adapter_is_surface_supported( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, + adapter_data: &Self::AdapterData, + surface_data: &Self::SurfaceData, ) -> bool { - match self.0.adapter_is_surface_supported(*adapter, *surface) { + match self + .0 + .adapter_is_surface_supported(*adapter_data, surface_data.id) + { Ok(result) => result, Err(err) => self.handle_error_fatal(err, "Adapter::is_surface_supported"), } } - fn adapter_features( - &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> Features { - match self.0.adapter_features(*adapter) { + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features { + match self.0.adapter_features(*adapter_data) { Ok(features) => features, Err(err) => self.handle_error_fatal(err, "Adapter::features"), } } - fn adapter_limits( - &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> Limits { - match self.0.adapter_limits(*adapter) { + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits { + match self.0.adapter_limits(*adapter_data) { Ok(limits) => limits, Err(err) => self.handle_error_fatal(err, "Adapter::limits"), } @@ -736,21 +666,16 @@ impl crate::Context for ContextWgpuCore { fn adapter_downlevel_capabilities( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, ) -> DownlevelCapabilities { - match self.0.adapter_downlevel_capabilities(*adapter) { + match self.0.adapter_downlevel_capabilities(*adapter_data) { Ok(downlevel) => downlevel, Err(err) => self.handle_error_fatal(err, "Adapter::downlevel_properties"), } } - fn adapter_get_info( - &self, - adapter: &wgc::id::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> AdapterInfo { - match self.0.adapter_get_info(*adapter) { + fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo { + match self.0.adapter_get_info(*adapter_data) { Ok(info) => info, Err(err) => self.handle_error_fatal(err, "Adapter::get_info"), } @@ -758,11 +683,13 @@ impl crate::Context for ContextWgpuCore { fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, format: wgt::TextureFormat, ) -> wgt::TextureFormatFeatures { - match self.0.adapter_get_texture_format_features(*adapter, format) { + match self + .0 + .adapter_get_texture_format_features(*adapter_data, format) + { Ok(info) => info, Err(err) => self.handle_error_fatal(err, "Adapter::get_texture_format_features"), } @@ -770,10 +697,9 @@ impl crate::Context for ContextWgpuCore { fn adapter_get_presentation_timestamp( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp { - match self.0.adapter_get_presentation_timestamp(*adapter) { + match self.0.adapter_get_presentation_timestamp(*adapter_data) { Ok(timestamp) => timestamp, Err(err) => self.handle_error_fatal(err, "Adapter::correlate_presentation_timestamp"), } @@ -781,12 +707,13 @@ impl crate::Context for ContextWgpuCore { fn surface_get_capabilities( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + surface_data: &Self::SurfaceData, + adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities { - match self.0.surface_get_capabilities(*surface, *adapter) { + match self + .0 + .surface_get_capabilities(surface_data.id, *adapter_data) + { Ok(caps) => caps, Err(wgc::instance::GetSurfaceSupportError::Unsupported) => { wgt::SurfaceCapabilities::default() @@ -797,48 +724,40 @@ impl crate::Context for ContextWgpuCore { fn surface_configure( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ) { - let error = self.0.surface_configure(*surface, *device, config); + let error = self + .0 + .surface_configure(surface_data.id, device_data.id, config); if let Some(e) = error { self.handle_error_fatal(e, "Surface::configure"); } else { - *surface_data.configured_device.lock() = Some(*device); + *surface_data.configured_device.lock() = Some(device_data.id); } } fn surface_get_current_texture( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, + surface_data: &Self::SurfaceData, ) -> ( - Option, Option, SurfaceStatus, Self::SurfaceOutputDetail, ) { - match self.0.surface_get_current_texture(*surface, None) { + match self.0.surface_get_current_texture(surface_data.id, None) { Ok(wgc::present::SurfaceOutput { status, texture_id }) => { - let (id, data) = { - ( - texture_id, - texture_id.map(|id| Texture { - id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), - }), - ) - }; + let data = texture_id.map(|id| Texture { + id, + error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + }); ( - id, data, status, SurfaceOutputDetail { - surface_id: *surface, + surface_id: surface_data.id, }, ) } @@ -860,30 +779,22 @@ impl crate::Context for ContextWgpuCore { } } - fn device_features( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - ) -> Features { - match self.0.device_features(*device) { + fn device_features(&self, device_data: &Self::DeviceData) -> Features { + match self.0.device_features(device_data.id) { Ok(features) => features, Err(err) => self.handle_error_fatal(err, "Device::features"), } } - fn device_limits(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) -> Limits { - match self.0.device_limits(*device) { + fn device_limits(&self, device_data: &Self::DeviceData) -> Limits { + match self.0.device_limits(device_data.id) { Ok(limits) => limits, Err(err) => self.handle_error_fatal(err, "Device::limits"), } } - fn device_downlevel_properties( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - ) -> DownlevelCapabilities { - match self.0.device_downlevel_properties(*device) { + fn device_downlevel_properties(&self, device_data: &Self::DeviceData) -> DownlevelCapabilities { + match self.0.device_downlevel_properties(device_data.id) { Ok(limits) => limits, Err(err) => self.handle_error_fatal(err, "Device::downlevel_properties"), } @@ -900,11 +811,10 @@ impl crate::Context for ContextWgpuCore { )] fn device_create_shader_module( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), shader_bound_checks, @@ -935,9 +845,9 @@ impl crate::Context for ContextWgpuCore { ShaderSource::Naga(module) => wgc::pipeline::ShaderModuleSource::Naga(module), ShaderSource::Dummy(_) => panic!("found `ShaderSource::Dummy`"), }; - let (id, error) = self - .0 - .device_create_shader_module(*device, &descriptor, source, None); + let (id, error) = + self.0 + .device_create_shader_module(device_data.id, &descriptor, source, None); let compilation_info = match error { Some(cause) => { self.handle_error( @@ -951,15 +861,17 @@ impl crate::Context for ContextWgpuCore { None => CompilationInfo { messages: vec![] }, }; - (id, ShaderModule { compilation_info }) + ShaderModule { + id, + compilation_info, + } } unsafe fn device_create_shader_module_spirv( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), // Doesn't matter the value since spirv shaders aren't mutated to include @@ -968,7 +880,7 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = unsafe { self.0.device_create_shader_module_spirv( - *device, + device_data.id, &descriptor, Borrowed(&desc.source), None, @@ -986,22 +898,24 @@ impl crate::Context for ContextWgpuCore { } None => CompilationInfo { messages: vec![] }, }; - (id, ShaderModule { compilation_info }) + ShaderModule { + id, + compilation_info, + } } fn device_create_bind_group_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { + ) -> Self::BindGroupLayoutData { let descriptor = wgc::binding_model::BindGroupLayoutDescriptor { label: desc.label.map(Borrowed), entries: Borrowed(desc.entries), }; let (id, error) = self .0 - .device_create_bind_group_layout(*device, &descriptor, None); + .device_create_bind_group_layout(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1010,18 +924,17 @@ impl crate::Context for ContextWgpuCore { "Device::create_bind_group_layout", ); } - (id, ()) + id } fn device_create_bind_group( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData) { + ) -> Self::BindGroupData { use wgc::binding_model as bm; - let mut arrayed_texture_views = Vec::::new(); - let mut arrayed_samplers = Vec::::new(); + let mut arrayed_texture_views = Vec::new(); + let mut arrayed_samplers = Vec::new(); if device_data .features .contains(Features::TEXTURE_BINDING_ARRAY) @@ -1029,10 +942,11 @@ impl crate::Context for ContextWgpuCore { // gather all the array view IDs first for entry in desc.entries.iter() { if let BindingResource::TextureViewArray(array) = entry.resource { - arrayed_texture_views.extend(array.iter().map(|view| &view.id)); + arrayed_texture_views + .extend(array.iter().map(|view| *downcast_texture_view(view))); } if let BindingResource::SamplerArray(array) = entry.resource { - arrayed_samplers.extend(array.iter().map(|sampler| &sampler.id)); + arrayed_samplers.extend(array.iter().map(|sampler| *downcast_sampler(sampler))); } } } @@ -1048,7 +962,7 @@ impl crate::Context for ContextWgpuCore { for entry in desc.entries.iter() { if let BindingResource::BufferArray(array) = entry.resource { arrayed_buffer_bindings.extend(array.iter().map(|binding| bm::BufferBinding { - buffer_id: binding.buffer.id.into(), + buffer_id: downcast_buffer(binding.buffer).id, offset: binding.offset, size: binding.size, })); @@ -1068,7 +982,7 @@ impl crate::Context for ContextWgpuCore { offset, size, }) => bm::BindingResource::Buffer(bm::BufferBinding { - buffer_id: buffer.id.into(), + buffer_id: downcast_buffer(buffer).id, offset, size, }), @@ -1079,38 +993,34 @@ impl crate::Context for ContextWgpuCore { bm::BindingResource::BufferArray(Borrowed(slice)) } BindingResource::Sampler(sampler) => { - bm::BindingResource::Sampler(sampler.id.into()) + bm::BindingResource::Sampler(*downcast_sampler(sampler)) } BindingResource::SamplerArray(array) => { - let samplers = remaining_arrayed_samplers[..array.len()] - .iter() - .map(|id| ::from(*id)) - .collect::>(); + let slice = &remaining_arrayed_samplers[..array.len()]; remaining_arrayed_samplers = &remaining_arrayed_samplers[array.len()..]; - bm::BindingResource::SamplerArray(Owned(samplers)) + bm::BindingResource::SamplerArray(Borrowed(slice)) } BindingResource::TextureView(texture_view) => { - bm::BindingResource::TextureView(texture_view.id.into()) + bm::BindingResource::TextureView(*downcast_texture_view(texture_view)) } BindingResource::TextureViewArray(array) => { - let views = remaining_arrayed_texture_views[..array.len()] - .iter() - .map(|id| ::from(*id)) - .collect::>(); + let slice = &remaining_arrayed_texture_views[..array.len()]; remaining_arrayed_texture_views = &remaining_arrayed_texture_views[array.len()..]; - bm::BindingResource::TextureViewArray(Owned(views)) + bm::BindingResource::TextureViewArray(Borrowed(slice)) } }, }) .collect::>(); let descriptor = bm::BindGroupDescriptor { label: desc.label.as_ref().map(|label| Borrowed(&label[..])), - layout: desc.layout.id.into(), + layout: *downcast_bind_group_layout(desc.layout), entries: Borrowed(&entries), }; - let (id, error) = self.0.device_create_bind_group(*device, &descriptor, None); + let (id, error) = self + .0 + .device_create_bind_group(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1119,14 +1029,13 @@ impl crate::Context for ContextWgpuCore { "Device::create_bind_group", ); } - (id, ()) + id } fn device_create_pipeline_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData) { + ) -> Self::PipelineLayoutData { // Limit is always less or equal to hal::MAX_BIND_GROUPS, so this is always right // Guards following ArrayVec assert!( @@ -1139,7 +1048,7 @@ impl crate::Context for ContextWgpuCore { let temp_layouts = desc .bind_group_layouts .iter() - .map(|bgl| bgl.id.into()) + .map(|bgl| *downcast_bind_group_layout(bgl)) .collect::>(); let descriptor = wgc::binding_model::PipelineLayoutDescriptor { label: desc.label.map(Borrowed), @@ -1149,7 +1058,7 @@ impl crate::Context for ContextWgpuCore { let (id, error) = self .0 - .device_create_pipeline_layout(*device, &descriptor, None); + .device_create_pipeline_layout(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1158,14 +1067,13 @@ impl crate::Context for ContextWgpuCore { "Device::create_pipeline_layout", ); } - (id, ()) + id } fn device_create_render_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData) { + ) -> Self::RenderPipelineData { use wgc::pipeline as pipe; let vertex_buffers: ArrayVec<_, { wgc::MAX_VERTEX_BUFFERS }> = desc @@ -1181,10 +1089,10 @@ impl crate::Context for ContextWgpuCore { let descriptor = pipe::RenderPipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(|l| l.id.into()), + layout: desc.layout.map(downcast_pipeline_layout).copied(), vertex: pipe::VertexState { stage: pipe::ProgrammableStageDescriptor { - module: desc.vertex.module.id.into(), + module: downcast_shader_module(desc.vertex.module).id, entry_point: desc.vertex.entry_point.map(Borrowed), constants: Borrowed(desc.vertex.compilation_options.constants), zero_initialize_workgroup_memory: desc @@ -1199,7 +1107,7 @@ impl crate::Context for ContextWgpuCore { multisample: desc.multisample, fragment: desc.fragment.as_ref().map(|frag| pipe::FragmentState { stage: pipe::ProgrammableStageDescriptor { - module: frag.module.id.into(), + module: downcast_shader_module(frag.module).id, entry_point: frag.entry_point.map(Borrowed), constants: Borrowed(frag.compilation_options.constants), zero_initialize_workgroup_memory: frag @@ -1209,12 +1117,12 @@ impl crate::Context for ContextWgpuCore { targets: Borrowed(frag.targets), }), multiview: desc.multiview, - cache: desc.cache.map(|c| c.id.into()), + cache: desc.cache.map(downcast_pipeline_cache).copied(), }; - let (id, error) = self - .0 - .device_create_render_pipeline(*device, &descriptor, None, None); + let (id, error) = + self.0 + .device_create_render_pipeline(device_data.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateRenderPipelineError::Internal { stage, ref error } = cause { log::error!("Shader translation error for stage {:?}: {}", stage, error); @@ -1227,33 +1135,32 @@ impl crate::Context for ContextWgpuCore { "Device::create_render_pipeline", ); } - (id, ()) + id } fn device_create_compute_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData) { + ) -> Self::ComputePipelineData { use wgc::pipeline as pipe; let descriptor = pipe::ComputePipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(|l| l.id.into()), + layout: desc.layout.map(downcast_pipeline_layout).copied(), stage: pipe::ProgrammableStageDescriptor { - module: desc.module.id.into(), + module: downcast_shader_module(desc.module).id, entry_point: desc.entry_point.map(Borrowed), constants: Borrowed(desc.compilation_options.constants), zero_initialize_workgroup_memory: desc .compilation_options .zero_initialize_workgroup_memory, }, - cache: desc.cache.map(|c| c.id.into()), + cache: desc.cache.map(downcast_pipeline_cache).copied(), }; - let (id, error) = self - .0 - .device_create_compute_pipeline(*device, &descriptor, None, None); + let (id, error) = + self.0 + .device_create_compute_pipeline(device_data.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateComputePipelineError::Internal(ref error) = cause { log::error!( @@ -1270,15 +1177,14 @@ impl crate::Context for ContextWgpuCore { "Device::create_compute_pipeline", ); } - (id, ()) + id } unsafe fn device_create_pipeline_cache( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData) { + ) -> Self::PipelineCacheData { use wgc::pipeline as pipe; let descriptor = pipe::PipelineCacheDescriptor { @@ -1288,7 +1194,7 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = unsafe { self.0 - .device_create_pipeline_cache(*device, &descriptor, None) + .device_create_pipeline_cache(device_data.id, &descriptor, None) }; if let Some(cause) = error { self.handle_error( @@ -1298,18 +1204,17 @@ impl crate::Context for ContextWgpuCore { "Device::device_create_pipeline_cache_init", ); } - (id, ()) + id } fn device_create_buffer( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData) { + ) -> Self::BufferData { let (id, error) = self.0 - .device_create_buffer(*device, &desc.map_label(|l| l.map(Borrowed)), None); + .device_create_buffer(device_data.id, &desc.map_label(|l| l.map(Borrowed)), None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1318,21 +1223,21 @@ impl crate::Context for ContextWgpuCore { "Device::create_buffer", ); } - ( + + Buffer { id, - Buffer { - error_sink: Arc::clone(&device_data.error_sink), - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + } } fn device_create_texture( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData) { + ) -> Self::TextureData { let wgt_desc = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); - let (id, error) = self.0.device_create_texture(*device, &wgt_desc, None); + let (id, error) = self + .0 + .device_create_texture(device_data.id, &wgt_desc, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1341,20 +1246,17 @@ impl crate::Context for ContextWgpuCore { "Device::create_texture", ); } - ( + + Texture { id, - Texture { - id, - error_sink: Arc::clone(&device_data.error_sink), - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + } } fn device_create_sampler( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData) { + ) -> Self::SamplerData { let descriptor = wgc::resource::SamplerDescriptor { label: desc.label.map(Borrowed), address_modes: [ @@ -1372,7 +1274,9 @@ impl crate::Context for ContextWgpuCore { border_color: desc.border_color, }; - let (id, error) = self.0.device_create_sampler(*device, &descriptor, None); + let (id, error) = self + .0 + .device_create_sampler(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1381,30 +1285,30 @@ impl crate::Context for ContextWgpuCore { "Device::create_sampler", ); } - (id, ()) + id } fn device_create_query_set( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &wgt::QuerySetDescriptor>, - ) -> (Self::QuerySetId, Self::QuerySetData) { - let (id, error) = - self.0 - .device_create_query_set(*device, &desc.map_label(|l| l.map(Borrowed)), None); + ) -> Self::QuerySetData { + let (id, error) = self.0.device_create_query_set( + device_data.id, + &desc.map_label(|l| l.map(Borrowed)), + None, + ); if let Some(cause) = error { self.handle_error_nolabel(&device_data.error_sink, cause, "Device::create_query_set"); } - (id, ()) + id } fn device_create_command_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData) { + ) -> Self::CommandEncoderData { let (id, error) = self.0.device_create_command_encoder( - *device, + device_data.id, &desc.map_label(|l| l.map(Borrowed)), None, ); @@ -1416,20 +1320,18 @@ impl crate::Context for ContextWgpuCore { "Device::create_command_encoder", ); } - ( + + CommandEncoder { id, - CommandEncoder { - error_sink: Arc::clone(&device_data.error_sink), - open: true, - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + open: true, + } } fn device_create_render_bundle_encoder( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData) { + ) -> Self::RenderBundleEncoderData { let descriptor = wgc::command::RenderBundleEncoderDescriptor { label: desc.label.map(Borrowed), color_formats: Borrowed(desc.color_formats), @@ -1437,60 +1339,53 @@ impl crate::Context for ContextWgpuCore { sample_count: desc.sample_count, multiview: desc.multiview, }; - match wgc::command::RenderBundleEncoder::new(&descriptor, *device, None) { - Ok(encoder) => (Unused, encoder), + match wgc::command::RenderBundleEncoder::new(&descriptor, device_data.id, None) { + Ok(encoder) => encoder, Err(e) => panic!("Error in Device::create_render_bundle_encoder: {e}"), } } #[doc(hidden)] - fn device_make_invalid(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_make_invalid(*device); + fn device_make_invalid(&self, device_data: &Self::DeviceData) { + self.0.device_make_invalid(device_data.id); } #[cfg_attr(not(any(native, Emscripten)), allow(unused))] - fn device_drop(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { + fn device_drop(&self, device_data: &Self::DeviceData) { #[cfg(any(native, Emscripten))] { // Call device_poll, but don't check for errors. We have to use its // return value, but we just drop it. - let _ = self.0.device_poll(*device, wgt::Maintain::wait()); - self.0.device_drop(*device); + let _ = self.0.device_poll(device_data.id, wgt::Maintain::wait()); + self.0.device_drop(device_data.id); } } #[cfg_attr(target_arch = "wasm32", allow(unused))] - fn queue_drop(&self, queue: &Self::QueueId, _device_data: &Self::QueueData) { - self.0.queue_drop(*queue); + fn queue_drop(&self, queue_data: &Self::QueueData) { + self.0.queue_drop(queue_data.id); } fn device_set_device_lost_callback( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, device_lost_callback: crate::context::DeviceLostCallback, ) { let device_lost_closure = DeviceLostClosure::from_rust(device_lost_callback); self.0 - .device_set_device_lost_closure(*device, device_lost_closure); + .device_set_device_lost_closure(device_data.id, device_lost_closure); } - fn device_destroy(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_destroy(*device); + fn device_destroy(&self, device_data: &Self::DeviceData) { + self.0.device_destroy(device_data.id); } - fn device_mark_lost( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - message: &str, - ) { + fn device_mark_lost(&self, device_data: &Self::DeviceData, message: &str) { // We do not provide a reason to device_lose, because all reasons other than // destroyed (which this is not) are "unknown". - self.0.device_mark_lost(*device, message); + self.0.device_mark_lost(device_data.id, message); } fn device_poll( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, maintain: crate::Maintain, ) -> wgt::MaintainResult { let maintain_inner = maintain.map_index(|i| *i.0.as_ref().downcast_ref().unwrap()); - match self.0.device_poll(*device, maintain_inner) { + match self.0.device_poll(device_data.id, maintain_inner) { Ok(done) => match done { true => wgt::MaintainResult::SubmissionQueueEmpty, false => wgt::MaintainResult::Ok, @@ -1500,30 +1395,20 @@ impl crate::Context for ContextWgpuCore { } fn device_on_uncaptured_error( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ) { let mut error_sink = device_data.error_sink.lock(); error_sink.uncaptured_handler = Some(handler); } - fn device_push_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: crate::ErrorFilter, - ) { + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { let mut error_sink = device_data.error_sink.lock(); error_sink.scopes.push(ErrorScope { error: None, filter, }); } - fn device_pop_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture { + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { let mut error_sink = device_data.error_sink.lock(); let scope = error_sink.scopes.pop().unwrap(); ready(scope.error) @@ -1531,7 +1416,6 @@ impl crate::Context for ContextWgpuCore { fn buffer_map_async( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: MapMode, range: Range, @@ -1551,7 +1435,7 @@ impl crate::Context for ContextWgpuCore { }; match self.0.buffer_map_async( - *buffer, + buffer_data.id, range.start, Some(range.end - range.start), operation, @@ -1564,14 +1448,13 @@ impl crate::Context for ContextWgpuCore { } fn buffer_get_mapped_range( &self, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, sub_range: Range, ) -> Box { let size = sub_range.end - sub_range.start; match self .0 - .buffer_get_mapped_range(*buffer, sub_range.start, Some(size)) + .buffer_get_mapped_range(buffer_data.id, sub_range.start, Some(size)) { Ok((ptr, size)) => Box::new(BufferMappedRange { ptr, @@ -1581,8 +1464,8 @@ impl crate::Context for ContextWgpuCore { } } - fn buffer_unmap(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData) { - match self.0.buffer_unmap(*buffer) { + fn buffer_unmap(&self, buffer_data: &Self::BufferData) { + match self.0.buffer_unmap(buffer_data.id) { Ok(()) => (), Err(cause) => { self.handle_error_nolabel(&buffer_data.error_sink, cause, "Buffer::buffer_unmap") @@ -1592,7 +1475,6 @@ impl crate::Context for ContextWgpuCore { fn shader_get_compilation_info( &self, - _shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture { ready(shader_data.compilation_info.clone()) @@ -1600,10 +1482,9 @@ impl crate::Context for ContextWgpuCore { fn texture_create_view( &self, - texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData) { + ) -> Self::TextureViewData { let descriptor = wgc::resource::TextureViewDescriptor { label: desc.label.map(Borrowed), format: desc.format, @@ -1616,7 +1497,9 @@ impl crate::Context for ContextWgpuCore { array_layer_count: desc.array_layer_count, }, }; - let (id, error) = self.0.texture_create_view(*texture, &descriptor, None); + let (id, error) = self + .0 + .texture_create_view(texture_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &texture_data.error_sink, @@ -1625,178 +1508,129 @@ impl crate::Context for ContextWgpuCore { "Texture::create_view", ); } - (id, ()) + id } - fn surface_drop(&self, surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData) { - self.0.surface_drop(*surface) + fn surface_drop(&self, surface_data: &Self::SurfaceData) { + self.0.surface_drop(surface_data.id) } - fn adapter_drop(&self, adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData) { - self.0.adapter_drop(*adapter) + fn adapter_drop(&self, adapter_data: &Self::AdapterData) { + self.0.adapter_drop(*adapter_data) } - fn buffer_destroy(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { + fn buffer_destroy(&self, buffer_data: &Self::BufferData) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.buffer_destroy(*buffer); + let _ = self.0.buffer_destroy(buffer_data.id); } - fn buffer_drop(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { - self.0.buffer_drop(*buffer) + fn buffer_drop(&self, buffer_data: &Self::BufferData) { + self.0.buffer_drop(buffer_data.id) } - fn texture_destroy(&self, texture: &Self::TextureId, _texture_data: &Self::TextureData) { + fn texture_destroy(&self, texture_data: &Self::TextureData) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.texture_destroy(*texture); + let _ = self.0.texture_destroy(texture_data.id); } - fn texture_drop(&self, texture: &Self::TextureId, _texture_data: &Self::TextureData) { - self.0.texture_drop(*texture) + fn texture_drop(&self, texture_data: &Self::TextureData) { + self.0.texture_drop(texture_data.id) } - fn texture_view_drop( - &self, - texture_view: &Self::TextureViewId, - __texture_view_data: &Self::TextureViewData, - ) { - let _ = self.0.texture_view_drop(*texture_view); + fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData) { + let _ = self.0.texture_view_drop(*texture_view_data); } - fn sampler_drop(&self, sampler: &Self::SamplerId, _sampler_data: &Self::SamplerData) { - self.0.sampler_drop(*sampler) + fn sampler_drop(&self, sampler_data: &Self::SamplerData) { + self.0.sampler_drop(*sampler_data) } - fn query_set_drop(&self, query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData) { - self.0.query_set_drop(*query_set) + fn query_set_drop(&self, query_set_data: &Self::QuerySetData) { + self.0.query_set_drop(*query_set_data) } - fn bind_group_drop( - &self, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, - ) { - self.0.bind_group_drop(*bind_group) + fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData) { + self.0.bind_group_drop(*bind_group_data) } - fn bind_group_layout_drop( - &self, - bind_group_layout: &Self::BindGroupLayoutId, - _bind_group_layout_data: &Self::BindGroupLayoutData, - ) { - self.0.bind_group_layout_drop(*bind_group_layout) + fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData) { + self.0.bind_group_layout_drop(*bind_group_layout_data) } - fn pipeline_layout_drop( - &self, - pipeline_layout: &Self::PipelineLayoutId, - _pipeline_layout_data: &Self::PipelineLayoutData, - ) { - self.0.pipeline_layout_drop(*pipeline_layout) + fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData) { + self.0.pipeline_layout_drop(*pipeline_layout_data) } - fn shader_module_drop( - &self, - shader_module: &Self::ShaderModuleId, - _shader_module_data: &Self::ShaderModuleData, - ) { - self.0.shader_module_drop(*shader_module) + fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData) { + self.0.shader_module_drop(shader_module_data.id) } - fn command_encoder_drop( - &self, - command_encoder: &Self::CommandEncoderId, - command_encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData) { if command_encoder_data.open { - self.0.command_encoder_drop(*command_encoder) + self.0.command_encoder_drop(command_encoder_data.id) } } - fn command_buffer_drop( - &self, - command_buffer: &Self::CommandBufferId, - _command_buffer_data: &Self::CommandBufferData, - ) { - self.0.command_buffer_drop(*command_buffer) + fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData) { + self.0.command_buffer_drop(*command_buffer_data) } - fn render_bundle_drop( - &self, - render_bundle: &Self::RenderBundleId, - _render_bundle_data: &Self::RenderBundleData, - ) { - self.0.render_bundle_drop(*render_bundle) + fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData) { + self.0.render_bundle_drop(*render_bundle_data) } - fn compute_pipeline_drop( - &self, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, - ) { - self.0.compute_pipeline_drop(*pipeline) + fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData) { + self.0.compute_pipeline_drop(*pipeline_data) } - fn render_pipeline_drop( - &self, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, - ) { - self.0.render_pipeline_drop(*pipeline) + fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData) { + self.0.render_pipeline_drop(*pipeline_data) } - fn pipeline_cache_drop( - &self, - cache: &Self::PipelineCacheId, - _cache_data: &Self::PipelineCacheData, - ) { - self.0.pipeline_cache_drop(*cache) + fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData) { + self.0.pipeline_cache_drop(*cache_data) } fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - let (id, error) = self - .0 - .compute_pipeline_get_bind_group_layout(*pipeline, index, None); + ) -> Self::BindGroupLayoutData { + let (id, error) = + self.0 + .compute_pipeline_get_bind_group_layout(*pipeline_data, index, None); if let Some(err) = error { panic!("Error reflecting bind group {index}: {err}"); } - (id, ()) + id } fn render_pipeline_get_bind_group_layout( &self, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { + ) -> Self::BindGroupLayoutData { let (id, error) = self .0 - .render_pipeline_get_bind_group_layout(*pipeline, index, None); + .render_pipeline_get_bind_group_layout(*pipeline_data, index, None); if let Some(err) = error { panic!("Error reflecting bind group {index}: {err}"); } - (id, ()) + id } fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - source: &Self::BufferId, - _source_data: &Self::BufferData, + source_data: &Self::BufferData, source_offset: wgt::BufferAddress, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, copy_size: wgt::BufferAddress, ) { if let Err(cause) = self.0.command_encoder_copy_buffer_to_buffer( - *encoder, - *source, + encoder_data.id, + source_data.id, source_offset, - *destination, + destination_data.id, destination_offset, copy_size, ) { @@ -1810,14 +1644,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_buffer_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyBuffer<'_>, destination: crate::ImageCopyTexture<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_buffer_to_texture( - *encoder, + encoder_data.id, &map_buffer_copy_view(source), &map_texture_copy_view(destination), ©_size, @@ -1832,14 +1665,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_texture_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyBuffer<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_texture_to_buffer( - *encoder, + encoder_data.id, &map_texture_copy_view(source), &map_buffer_copy_view(destination), ©_size, @@ -1854,14 +1686,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_texture_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyTexture<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_texture_to_texture( - *encoder, + encoder_data.id, &map_texture_copy_view(source), &map_texture_copy_view(destination), ©_size, @@ -1876,21 +1707,20 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_begin_compute_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData) { + ) -> Self::ComputePassData { let timestamp_writes = desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: tw.query_set.id.into(), + query_set: *downcast_query_set(tw.query_set), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); let (pass, err) = self.0.command_encoder_create_compute_pass( - *encoder, + encoder_data.id, &wgc::command::ComputePassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), @@ -1906,29 +1736,25 @@ impl crate::Context for ContextWgpuCore { ); } - ( - Unused, - Self::ComputePassData { - pass, - error_sink: encoder_data.error_sink.clone(), - }, - ) + Self::ComputePassData { + pass, + error_sink: encoder_data.error_sink.clone(), + } } fn command_encoder_begin_render_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData) { + ) -> Self::RenderPassData { let colors = desc .color_attachments .iter() .map(|ca| { ca.as_ref() .map(|at| wgc::command::RenderPassColorAttachment { - view: at.view.id.into(), - resolve_target: at.resolve_target.map(|rt| rt.id.into()), + view: *downcast_texture_view(at.view), + resolve_target: at.resolve_target.map(downcast_texture_view).copied(), channel: map_pass_channel(Some(&at.ops)), }) }) @@ -1936,7 +1762,7 @@ impl crate::Context for ContextWgpuCore { let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| { wgc::command::RenderPassDepthStencilAttachment { - view: dsa.view.id.into(), + view: *downcast_texture_view(dsa.view), depth: map_pass_channel(dsa.depth_ops.as_ref()), stencil: map_pass_channel(dsa.stencil_ops.as_ref()), } @@ -1946,21 +1772,19 @@ impl crate::Context for ContextWgpuCore { desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: tw.query_set.id.into(), + query_set: *downcast_query_set(tw.query_set), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); let (pass, err) = self.0.command_encoder_create_render_pass( - *encoder, + encoder_data.id, &wgc::command::RenderPassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), color_attachments: std::borrow::Cow::Borrowed(&colors), depth_stencil_attachment: depth_stencil.as_ref(), - occlusion_query_set: desc - .occlusion_query_set - .map(|query_set| query_set.id.into()), + occlusion_query_set: desc.occlusion_query_set.map(downcast_query_set).copied(), }, ); @@ -1973,40 +1797,36 @@ impl crate::Context for ContextWgpuCore { ); } - ( - Unused, - Self::RenderPassData { - pass, - error_sink: encoder_data.error_sink.clone(), - }, - ) + Self::RenderPassData { + pass, + error_sink: encoder_data.error_sink.clone(), + } } fn command_encoder_finish( &self, - encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData) { + ) -> Self::CommandBufferData { let descriptor = wgt::CommandBufferDescriptor::default(); encoder_data.open = false; // prevent the drop - let (id, error) = self.0.command_encoder_finish(encoder, &descriptor); + let (id, error) = self.0.command_encoder_finish(encoder_data.id, &descriptor); if let Some(cause) = error { self.handle_error_nolabel(&encoder_data.error_sink, cause, "a CommandEncoder"); } - (id, ()) + id } fn command_encoder_clear_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - texture: &crate::Texture, + texture_data: &Self::TextureData, subresource_range: &wgt::ImageSubresourceRange, ) { - if let Err(cause) = - self.0 - .command_encoder_clear_texture(*encoder, texture.id.into(), subresource_range) - { + if let Err(cause) = self.0.command_encoder_clear_texture( + encoder_data.id, + texture_data.id, + subresource_range, + ) { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2017,15 +1837,14 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_clear_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &crate::Buffer, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { if let Err(cause) = self.0 - .command_encoder_clear_buffer(*encoder, buffer.id.into(), offset, size) + .command_encoder_clear_buffer(encoder_data.id, buffer_data.id, offset, size) { self.handle_error_nolabel( &encoder_data.error_sink, @@ -2037,11 +1856,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_insert_debug_marker( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ) { - if let Err(cause) = self.0.command_encoder_insert_debug_marker(*encoder, label) { + if let Err(cause) = self + .0 + .command_encoder_insert_debug_marker(encoder_data.id, label) + { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2052,11 +1873,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_push_debug_group( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ) { - if let Err(cause) = self.0.command_encoder_push_debug_group(*encoder, label) { + if let Err(cause) = self + .0 + .command_encoder_push_debug_group(encoder_data.id, label) + { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2065,12 +1888,8 @@ impl crate::Context for ContextWgpuCore { } } - fn command_encoder_pop_debug_group( - &self, - encoder: &Self::CommandEncoderId, - encoder_data: &Self::CommandEncoderData, - ) { - if let Err(cause) = self.0.command_encoder_pop_debug_group(*encoder) { + fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData) { + if let Err(cause) = self.0.command_encoder_pop_debug_group(encoder_data.id) { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2081,15 +1900,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_write_timestamp( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .command_encoder_write_timestamp(*encoder, *query_set, query_index) + .command_encoder_write_timestamp(encoder_data.id, *query_set_data, query_index) { self.handle_error_nolabel( &encoder_data.error_sink, @@ -2101,22 +1918,19 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_resolve_query_set( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.command_encoder_resolve_query_set( - *encoder, - *query_set, + encoder_data.id, + *query_set_data, first_query, query_count, - *destination, + destination_data.id, destination_offset, ) { self.handle_error_nolabel( @@ -2129,10 +1943,9 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_finish( &self, - _encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &crate::RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData) { + ) -> Self::RenderBundleData { let (id, error) = self.0.render_bundle_encoder_finish( encoder_data, &desc.map_label(|l| l.map(Borrowed)), @@ -2141,19 +1954,20 @@ impl crate::Context for ContextWgpuCore { if let Some(err) = error { self.handle_error_fatal(err, "RenderBundleEncoder::finish"); } - (id, ()) + id } fn queue_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, data: &[u8], ) { - match self.0.queue_write_buffer(*queue, *buffer, offset, data) { + match self + .0 + .queue_write_buffer(queue_data.id, buffer_data.id, offset, data) + { Ok(()) => (), Err(err) => { self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer") @@ -2163,16 +1977,14 @@ impl crate::Context for ContextWgpuCore { fn queue_validate_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()> { match self .0 - .queue_validate_write_buffer(*queue, *buffer, offset, size) + .queue_validate_write_buffer(queue_data.id, buffer_data.id, offset, size) { Ok(()) => Some(()), Err(err) => { @@ -2184,11 +1996,13 @@ impl crate::Context for ContextWgpuCore { fn queue_create_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, size: wgt::BufferSize, ) -> Option> { - match self.0.queue_create_staging_buffer(*queue, size, None) { + match self + .0 + .queue_create_staging_buffer(queue_data.id, size, None) + { Ok((buffer_id, ptr)) => Some(Box::new(QueueWriteBuffer { buffer_id, mapping: BufferMappedRange { @@ -2205,10 +2019,8 @@ impl crate::Context for ContextWgpuCore { fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, staging_buffer: &dyn crate::context::QueueWriteBuffer, ) { @@ -2216,10 +2028,12 @@ impl crate::Context for ContextWgpuCore { .as_any() .downcast_ref::() .unwrap(); - match self - .0 - .queue_write_staging_buffer(*queue, *buffer, offset, staging_buffer.buffer_id) - { + match self.0.queue_write_staging_buffer( + queue_data.id, + buffer_data.id, + offset, + staging_buffer.buffer_id, + ) { Ok(()) => (), Err(err) => { self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer_with"); @@ -2229,7 +2043,6 @@ impl crate::Context for ContextWgpuCore { fn queue_write_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, texture: crate::ImageCopyTexture<'_>, data: &[u8], @@ -2237,7 +2050,7 @@ impl crate::Context for ContextWgpuCore { size: wgt::Extent3d, ) { match self.0.queue_write_texture( - *queue, + queue_data.id, &map_texture_copy_view(texture), data, &data_layout, @@ -2253,14 +2066,13 @@ impl crate::Context for ContextWgpuCore { #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ) { match self.0.queue_copy_external_image_to_texture( - *queue, + queue_data.id, source, map_texture_tagged_copy_view(dest), size, @@ -2274,17 +2086,14 @@ impl crate::Context for ContextWgpuCore { } } - fn queue_submit>( + fn queue_submit>( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData { - let temp_command_buffers = command_buffers - .map(|(i, _)| i) - .collect::>(); + let temp_command_buffers = command_buffers.collect::>(); - let index = match self.0.queue_submit(*queue, &temp_command_buffers) { + let index = match self.0.queue_submit(queue_data.id, &temp_command_buffers) { Ok(index) => index, Err(err) => self.handle_error_fatal(err, "Queue::submit"), }; @@ -2296,12 +2105,8 @@ impl crate::Context for ContextWgpuCore { index } - fn queue_get_timestamp_period( - &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, - ) -> f32 { - let res = self.0.queue_get_timestamp_period(*queue); + fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32 { + let res = self.0.queue_get_timestamp_period(queue_data.id); match res { Ok(v) => v, Err(cause) => { @@ -2312,61 +2117,55 @@ impl crate::Context for ContextWgpuCore { fn queue_on_submitted_work_done( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + queue_data: &Self::QueueData, callback: crate::context::SubmittedWorkDoneCallback, ) { let closure = wgc::device::queue::SubmittedWorkDoneClosure::from_rust(callback); - let res = self.0.queue_on_submitted_work_done(*queue, closure); + let res = self.0.queue_on_submitted_work_done(queue_data.id, closure); if let Err(cause) = res { self.handle_error_fatal(cause, "Queue::on_submitted_work_done"); } } - fn device_start_capture(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_start_capture(*device); + fn device_start_capture(&self, device_data: &Self::DeviceData) { + self.0.device_start_capture(device_data.id); } - fn device_stop_capture(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_stop_capture(*device); + fn device_stop_capture(&self, device_data: &Self::DeviceData) { + self.0.device_stop_capture(device_data.id); } fn device_get_internal_counters( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, ) -> wgt::InternalCounters { - self.0.device_get_internal_counters(*device) + self.0.device_get_internal_counters(device_data.id) } fn device_generate_allocator_report( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, ) -> Option { - self.0.device_generate_allocator_report(*device) + self.0.device_generate_allocator_report(device_data.id) } fn pipeline_cache_get_data( &self, - cache: &Self::PipelineCacheId, // TODO: Used for error handling? - _cache_data: &Self::PipelineCacheData, + cache_data: &Self::PipelineCacheData, ) -> Option> { - self.0.pipeline_cache_get_data(*cache) + self.0.pipeline_cache_get_data(*cache_data) } fn compute_pass_set_pipeline( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + pipeline_data: &Self::ComputePipelineData, ) { if let Err(cause) = self .0 - .compute_pass_set_pipeline(&mut pass_data.pass, *pipeline) + .compute_pass_set_pipeline(&mut pass_data.pass, *pipeline_data) { self.handle_error( &pass_data.error_sink, @@ -2379,17 +2178,17 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_set_bind_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { - if let Err(cause) = - self.0 - .compute_pass_set_bind_group(&mut pass_data.pass, index, *bind_group, offsets) - { + if let Err(cause) = self.0.compute_pass_set_bind_group( + &mut pass_data.pass, + index, + *bind_group_data, + offsets, + ) { self.handle_error( &pass_data.error_sink, cause, @@ -2401,7 +2200,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_set_push_constants( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, offset: u32, data: &[u8], @@ -2419,12 +2217,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_insert_debug_marker( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - label: &str, - ) { + fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str) { if let Err(cause) = self .0 .compute_pass_insert_debug_marker(&mut pass_data.pass, label, 0) @@ -2440,7 +2233,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_push_debug_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, group_label: &str, ) { @@ -2457,11 +2249,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_pop_debug_group( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self.0.compute_pass_pop_debug_group(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -2474,15 +2262,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_write_timestamp( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .compute_pass_write_timestamp(&mut pass_data.pass, *query_set, query_index) + .compute_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) { self.handle_error( &pass_data.error_sink, @@ -2495,15 +2281,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0.compute_pass_begin_pipeline_statistics_query( &mut pass_data.pass, - *query_set, + *query_set_data, query_index, ) { self.handle_error( @@ -2515,11 +2299,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self .0 .compute_pass_end_pipeline_statistics_query(&mut pass_data.pass) @@ -2535,7 +2315,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_dispatch_workgroups( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -2556,15 +2335,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_dispatch_workgroups_indirect( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.compute_pass_dispatch_workgroups_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, ) { self.handle_error( @@ -2576,11 +2353,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_end( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self.0.compute_pass_end(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -2593,28 +2366,24 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_set_pipeline( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, ) { - wgpu_render_bundle_set_pipeline(encoder_data, *pipeline) + wgpu_render_bundle_set_pipeline(encoder_data, *pipeline_data) } fn render_bundle_encoder_set_bind_group( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - bind_group: &Self::BindGroupId, - __bind_group_data: &Self::BindGroupData, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { unsafe { wgpu_render_bundle_set_bind_group( encoder_data, index, - *bind_group, + *bind_group_data, offsets.as_ptr(), offsets.len(), ) @@ -2623,33 +2392,28 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_set_index_buffer( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - buffer: &Self::BufferId, - __buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { - encoder_data.set_index_buffer(*buffer, index_format, offset, size) + encoder_data.set_index_buffer(buffer_data.id, index_format, offset, size) } fn render_bundle_encoder_set_vertex_buffer( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - buffer: &Self::BufferId, - __buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - wgpu_render_bundle_set_vertex_buffer(encoder_data, slot, *buffer, offset, size) + wgpu_render_bundle_set_vertex_buffer(encoder_data, slot, buffer_data.id, offset, size) } fn render_bundle_encoder_set_push_constants( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, stages: wgt::ShaderStages, offset: u32, @@ -2668,7 +2432,6 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, @@ -2684,7 +2447,6 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw_indexed( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -2702,31 +2464,29 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - wgpu_render_bundle_draw_indirect(encoder_data, *indirect_buffer, indirect_offset) + wgpu_render_bundle_draw_indirect(encoder_data, indirect_buffer_data.id, indirect_offset) } fn render_bundle_encoder_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - wgpu_render_bundle_draw_indexed_indirect(encoder_data, *indirect_buffer, indirect_offset) + wgpu_render_bundle_draw_indexed_indirect( + encoder_data, + indirect_buffer_data.id, + indirect_offset, + ) } fn render_bundle_encoder_multi_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -2736,9 +2496,7 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_multi_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -2748,12 +2506,9 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_multi_draw_indirect_count( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -2763,12 +2518,9 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_multi_draw_indexed_indirect_count( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -2778,14 +2530,12 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_pipeline( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, ) { if let Err(cause) = self .0 - .render_pass_set_pipeline(&mut pass_data.pass, *pipeline) + .render_pass_set_pipeline(&mut pass_data.pass, *pipeline_data) { self.handle_error( &pass_data.error_sink, @@ -2798,16 +2548,14 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_bind_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { if let Err(cause) = self.0 - .render_pass_set_bind_group(&mut pass_data.pass, index, *bind_group, offsets) + .render_pass_set_bind_group(&mut pass_data.pass, index, *bind_group_data, offsets) { self.handle_error( &pass_data.error_sink, @@ -2820,17 +2568,15 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_index_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { if let Err(cause) = self.0.render_pass_set_index_buffer( &mut pass_data.pass, - *buffer, + buffer_data.id, index_format, offset, size, @@ -2846,18 +2592,19 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_vertex_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - if let Err(cause) = - self.0 - .render_pass_set_vertex_buffer(&mut pass_data.pass, slot, *buffer, offset, size) - { + if let Err(cause) = self.0.render_pass_set_vertex_buffer( + &mut pass_data.pass, + slot, + buffer_data.id, + offset, + size, + ) { self.handle_error( &pass_data.error_sink, cause, @@ -2869,7 +2616,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_push_constants( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, stages: wgt::ShaderStages, offset: u32, @@ -2890,7 +2636,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, @@ -2913,7 +2658,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indexed( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -2938,16 +2682,15 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - if let Err(cause) = - self.0 - .render_pass_draw_indirect(&mut pass_data.pass, *indirect_buffer, indirect_offset) - { + if let Err(cause) = self.0.render_pass_draw_indirect( + &mut pass_data.pass, + indirect_buffer_data.id, + indirect_offset, + ) { self.handle_error( &pass_data.error_sink, cause, @@ -2959,15 +2702,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.render_pass_draw_indexed_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, ) { self.handle_error( @@ -2981,16 +2722,14 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, count, ) { @@ -3005,16 +2744,14 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, count, ) { @@ -3029,21 +2766,18 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indirect_count( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, - count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, + count_buffer_data: &Self::BufferData, count_buffer_offset: wgt::BufferAddress, max_count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indirect_count( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, - *count_buffer, + count_buffer_data.id, count_buffer_offset, max_count, ) { @@ -3058,21 +2792,18 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indexed_indirect_count( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, - count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, + count_buffer_data: &Self::BufferData, count_buffer_offset: wgt::BufferAddress, max_count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect_count( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, - *count_buffer, + count_buffer_data.id, count_buffer_offset, max_count, ) { @@ -3087,7 +2818,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_blend_constant( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, color: wgt::Color, ) { @@ -3106,7 +2836,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_scissor_rect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -3128,7 +2857,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_viewport( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -3157,7 +2885,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_stencil_reference( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ) { @@ -3174,12 +2901,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_insert_debug_marker( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - label: &str, - ) { + fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str) { if let Err(cause) = self .0 .render_pass_insert_debug_marker(&mut pass_data.pass, label, 0) @@ -3195,7 +2917,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_push_debug_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, group_label: &str, ) { @@ -3212,11 +2933,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_pop_debug_group( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_pop_debug_group(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3229,15 +2946,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_write_timestamp( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .render_pass_write_timestamp(&mut pass_data.pass, *query_set, query_index) + .render_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) { self.handle_error( &pass_data.error_sink, @@ -3250,7 +2965,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_begin_occlusion_query( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, query_index: u32, ) { @@ -3267,11 +2981,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end_occlusion_query( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_end_occlusion_query(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3284,15 +2994,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0.render_pass_begin_pipeline_statistics_query( &mut pass_data.pass, - *query_set, + *query_set_data, query_index, ) { self.handle_error( @@ -3304,11 +3012,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self .0 .render_pass_end_pipeline_statistics_query(&mut pass_data.pass) @@ -3324,11 +3028,10 @@ impl crate::Context for ContextWgpuCore { fn render_pass_execute_bundles( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { - let temp_render_bundles = render_bundles.map(|(i, _)| i).collect::>(); + let temp_render_bundles = render_bundles.copied().collect::>(); if let Err(cause) = self .0 .render_pass_execute_bundles(&mut pass_data.pass, &temp_render_bundles) @@ -3342,11 +3045,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_end(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3358,27 +3057,6 @@ impl crate::Context for ContextWgpuCore { } } -impl From for wgc::id::Id -where - T: wgc::id::Marker, -{ - fn from(id: ObjectId) -> Self { - let id = wgc::id::RawId::from_non_zero(id.id()); - // SAFETY: The id was created via the impl below - unsafe { Self::from_raw(id) } - } -} - -impl From> for ObjectId -where - T: wgc::id::Marker, -{ - fn from(id: wgc::id::Id) -> Self { - let id = id.into_raw().into_non_zero(); - Self::from_global_id(id) - } -} - #[derive(Debug)] pub struct SurfaceOutputDetail { surface_id: wgc::id::SurfaceId, @@ -3526,3 +3204,43 @@ impl Drop for BufferMappedRange { // implements `Drop`, to match the web backend } } + +fn downcast_buffer(buffer: &crate::Buffer) -> &::BufferData { + downcast_ref(buffer.data.as_ref()) +} +fn downcast_texture(texture: &crate::Texture) -> &::TextureData { + downcast_ref(texture.data.as_ref()) +} +fn downcast_texture_view( + texture_view: &crate::TextureView, +) -> &::TextureViewData { + downcast_ref(texture_view.data.as_ref()) +} +fn downcast_sampler(sampler: &crate::Sampler) -> &::SamplerData { + downcast_ref(sampler.data.as_ref()) +} +fn downcast_query_set( + query_set: &crate::QuerySet, +) -> &::QuerySetData { + downcast_ref(query_set.data.as_ref()) +} +fn downcast_bind_group_layout( + bind_group_layout: &crate::BindGroupLayout, +) -> &::BindGroupLayoutData { + downcast_ref(bind_group_layout.data.as_ref()) +} +fn downcast_pipeline_layout( + pipeline_layout: &crate::PipelineLayout, +) -> &::PipelineLayoutData { + downcast_ref(pipeline_layout.data.as_ref()) +} +fn downcast_shader_module( + shader_module: &crate::ShaderModule, +) -> &::ShaderModuleData { + downcast_ref(shader_module.data.as_ref()) +} +fn downcast_pipeline_cache( + pipeline_cache: &crate::PipelineCache, +) -> &::PipelineCacheData { + downcast_ref(pipeline_cache.data.as_ref()) +} diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index d28e4bc69..5e6d85192 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -1,30 +1,23 @@ -use std::{any::Any, fmt::Debug, future::Future, num::NonZeroU64, ops::Range, pin::Pin, sync::Arc}; +use std::{any::Any, fmt::Debug, future::Future, ops::Range, pin::Pin, sync::Arc}; use wgt::{ - strict_assert, strict_assert_eq, AdapterInfo, BufferAddress, BufferSize, Color, - DeviceLostReason, DownlevelCapabilities, DynamicOffset, Extent3d, Features, ImageDataLayout, + strict_assert, AdapterInfo, BufferAddress, BufferSize, Color, DeviceLostReason, + DownlevelCapabilities, DynamicOffset, Extent3d, Features, ImageDataLayout, ImageSubresourceRange, IndexFormat, Limits, ShaderStages, SurfaceStatus, TextureFormat, TextureFormatFeatures, WasmNotSend, WasmNotSendSync, }; use crate::{ - AnyWasmNotSendSync, BindGroupDescriptor, BindGroupLayoutDescriptor, Buffer, BufferAsyncError, + AnyWasmNotSendSync, BindGroupDescriptor, BindGroupLayoutDescriptor, BufferAsyncError, BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, ComputePassDescriptor, ComputePipelineDescriptor, DeviceDescriptor, Error, ErrorFilter, ImageCopyBuffer, ImageCopyTexture, Maintain, MaintainResult, MapMode, PipelineCacheDescriptor, PipelineLayoutDescriptor, QuerySetDescriptor, RenderBundleDescriptor, RenderBundleEncoderDescriptor, RenderPassDescriptor, RenderPipelineDescriptor, RequestAdapterOptions, RequestDeviceError, SamplerDescriptor, ShaderModuleDescriptor, - ShaderModuleDescriptorSpirV, SurfaceTargetUnsafe, Texture, TextureDescriptor, - TextureViewDescriptor, UncapturedErrorHandler, + ShaderModuleDescriptorSpirV, SurfaceTargetUnsafe, TextureDescriptor, TextureViewDescriptor, + UncapturedErrorHandler, }; - -/// Meta trait for an id tracked by a context. -/// -/// There is no need to manually implement this trait since there is a blanket implementation for this trait. -pub trait ContextId: Into + From + Debug + 'static {} -impl + From + Debug + 'static> ContextId for T {} - /// Meta trait for an data associated with an id tracked by a context. /// /// There is no need to manually implement this trait since there is a blanket implementation for this trait. @@ -32,69 +25,36 @@ pub trait ContextData: Debug + WasmNotSendSync + 'static {} impl ContextData for T {} pub trait Context: Debug + WasmNotSendSync + Sized { - type AdapterId: ContextId + WasmNotSendSync; type AdapterData: ContextData; - type DeviceId: ContextId + WasmNotSendSync; type DeviceData: ContextData; - type QueueId: ContextId + WasmNotSendSync; type QueueData: ContextData; - type ShaderModuleId: ContextId + WasmNotSendSync; type ShaderModuleData: ContextData; - type BindGroupLayoutId: ContextId + WasmNotSendSync; type BindGroupLayoutData: ContextData; - type BindGroupId: ContextId + WasmNotSendSync; type BindGroupData: ContextData; - type TextureViewId: ContextId + WasmNotSendSync; type TextureViewData: ContextData; - type SamplerId: ContextId + WasmNotSendSync; type SamplerData: ContextData; - type BufferId: ContextId + WasmNotSendSync; type BufferData: ContextData; - type TextureId: ContextId + WasmNotSendSync; type TextureData: ContextData; - type QuerySetId: ContextId + WasmNotSendSync; type QuerySetData: ContextData; - type PipelineLayoutId: ContextId + WasmNotSendSync; type PipelineLayoutData: ContextData; - type RenderPipelineId: ContextId + WasmNotSendSync; type RenderPipelineData: ContextData; - type ComputePipelineId: ContextId + WasmNotSendSync; type ComputePipelineData: ContextData; - type PipelineCacheId: ContextId + WasmNotSendSync; type PipelineCacheData: ContextData; - type CommandEncoderId: ContextId + WasmNotSendSync; type CommandEncoderData: ContextData; - type ComputePassId: ContextId; type ComputePassData: ContextData; - type RenderPassId: ContextId; type RenderPassData: ContextData; - type CommandBufferId: ContextId + WasmNotSendSync; type CommandBufferData: ContextData; - type RenderBundleEncoderId: ContextId; type RenderBundleEncoderData: ContextData; - type RenderBundleId: ContextId + WasmNotSendSync; type RenderBundleData: ContextData; - type SurfaceId: ContextId + WasmNotSendSync; type SurfaceData: ContextData; type SurfaceOutputDetail: WasmNotSendSync + 'static; type SubmissionIndexData: ContextData + Copy; - type RequestAdapterFuture: Future> + type RequestAdapterFuture: Future> + WasmNotSend + 'static; + type RequestDeviceFuture: Future> + WasmNotSend + 'static; - type RequestDeviceFuture: Future< - Output = Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - RequestDeviceError, - >, - > + WasmNotSend - + 'static; type PopErrorScopeFuture: Future> + WasmNotSend + 'static; type CompilationInfoFuture: Future + WasmNotSend + 'static; @@ -103,14 +63,13 @@ pub trait Context: Debug + WasmNotSendSync + Sized { unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError>; + ) -> Result; fn instance_request_adapter( &self, options: &RequestAdapterOptions<'_, '_>, ) -> Self::RequestAdapterFuture; fn adapter_request_device( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -118,62 +77,42 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn instance_poll_all_devices(&self, force_wait: bool) -> bool; fn adapter_is_surface_supported( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> bool; - fn adapter_features( - &self, - adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> Features; - fn adapter_limits(&self, adapter: &Self::AdapterId, adapter_data: &Self::AdapterData) - -> Limits; + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features; + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits; fn adapter_downlevel_capabilities( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> DownlevelCapabilities; - fn adapter_get_info( - &self, - adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> AdapterInfo; + fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo; fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, format: TextureFormat, ) -> TextureFormatFeatures; fn adapter_get_presentation_timestamp( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp; fn surface_get_capabilities( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities; fn surface_configure( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - device: &Self::DeviceId, device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ); #[allow(clippy::type_complexity)] fn surface_get_current_texture( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> ( - Option, Option, SurfaceStatus, Self::SurfaceOutputDetail, @@ -181,142 +120,102 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn surface_present(&self, detail: &Self::SurfaceOutputDetail); fn surface_texture_discard(&self, detail: &Self::SurfaceOutputDetail); - fn device_features(&self, device: &Self::DeviceId, device_data: &Self::DeviceData) -> Features; - fn device_limits(&self, device: &Self::DeviceId, device_data: &Self::DeviceData) -> Limits; - fn device_downlevel_properties( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> DownlevelCapabilities; + fn device_features(&self, device_data: &Self::DeviceData) -> Features; + fn device_limits(&self, device_data: &Self::DeviceData) -> Limits; + fn device_downlevel_properties(&self, device_data: &Self::DeviceData) -> DownlevelCapabilities; fn device_create_shader_module( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData); + ) -> Self::ShaderModuleData; unsafe fn device_create_shader_module_spirv( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData); + ) -> Self::ShaderModuleData; fn device_create_bind_group_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; fn device_create_bind_group( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData); + ) -> Self::BindGroupData; fn device_create_pipeline_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData); + ) -> Self::PipelineLayoutData; fn device_create_render_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData); + ) -> Self::RenderPipelineData; fn device_create_compute_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData); + ) -> Self::ComputePipelineData; unsafe fn device_create_pipeline_cache( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData); + ) -> Self::PipelineCacheData; fn device_create_buffer( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData); + ) -> Self::BufferData; fn device_create_texture( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData); + ) -> Self::TextureData; fn device_create_sampler( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData); + ) -> Self::SamplerData; fn device_create_query_set( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &QuerySetDescriptor<'_>, - ) -> (Self::QuerySetId, Self::QuerySetData); + ) -> Self::QuerySetData; fn device_create_command_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData); + ) -> Self::CommandEncoderData; fn device_create_render_bundle_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData); + ) -> Self::RenderBundleEncoderData; #[doc(hidden)] - fn device_make_invalid(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_drop(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn device_make_invalid(&self, device_data: &Self::DeviceData); + fn device_drop(&self, device_data: &Self::DeviceData); fn device_set_device_lost_callback( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, device_lost_callback: DeviceLostCallback, ); - fn device_destroy(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_mark_lost( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - message: &str, - ); - fn queue_drop(&self, queue: &Self::QueueId, queue_data: &Self::QueueData); - fn device_poll( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - maintain: Maintain, - ) -> MaintainResult; + fn device_destroy(&self, device_data: &Self::DeviceData); + fn device_mark_lost(&self, device_data: &Self::DeviceData, message: &str); + fn queue_drop(&self, queue_data: &Self::QueueData); + fn device_poll(&self, device_data: &Self::DeviceData, maintain: Maintain) -> MaintainResult; fn device_on_uncaptured_error( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ); - fn device_push_error_scope( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: ErrorFilter, - ); - fn device_pop_error_scope( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture; + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: ErrorFilter); + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture; fn buffer_map_async( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: MapMode, range: Range, @@ -324,116 +223,63 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn buffer_get_mapped_range( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, sub_range: Range, ) -> Box; - fn buffer_unmap(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); + fn buffer_unmap(&self, buffer_data: &Self::BufferData); fn shader_get_compilation_info( &self, - shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture; fn texture_create_view( &self, - texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData); + ) -> Self::TextureViewData; - fn surface_drop(&self, surface: &Self::SurfaceId, surface_data: &Self::SurfaceData); - fn adapter_drop(&self, adapter: &Self::AdapterId, adapter_data: &Self::AdapterData); - fn buffer_destroy(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); - fn buffer_drop(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); - fn texture_destroy(&self, texture: &Self::TextureId, texture_data: &Self::TextureData); - fn texture_drop(&self, texture: &Self::TextureId, texture_data: &Self::TextureData); - fn texture_view_drop( - &self, - texture_view: &Self::TextureViewId, - texture_view_data: &Self::TextureViewData, - ); - fn sampler_drop(&self, sampler: &Self::SamplerId, sampler_data: &Self::SamplerData); - fn query_set_drop(&self, query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData); - fn bind_group_drop( - &self, - bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, - ); - fn bind_group_layout_drop( - &self, - bind_group_layout: &Self::BindGroupLayoutId, - bind_group_layout_data: &Self::BindGroupLayoutData, - ); - fn pipeline_layout_drop( - &self, - pipeline_layout: &Self::PipelineLayoutId, - pipeline_layout_data: &Self::PipelineLayoutData, - ); - fn shader_module_drop( - &self, - shader_module: &Self::ShaderModuleId, - shader_module_data: &Self::ShaderModuleData, - ); - fn command_encoder_drop( - &self, - command_encoder: &Self::CommandEncoderId, - command_encoder_data: &Self::CommandEncoderData, - ); - fn command_buffer_drop( - &self, - command_buffer: &Self::CommandBufferId, - command_buffer_data: &Self::CommandBufferData, - ); - fn render_bundle_drop( - &self, - render_bundle: &Self::RenderBundleId, - render_bundle_data: &Self::RenderBundleData, - ); - fn compute_pipeline_drop( - &self, - pipeline: &Self::ComputePipelineId, - pipeline_data: &Self::ComputePipelineData, - ); - fn render_pipeline_drop( - &self, - pipeline: &Self::RenderPipelineId, - pipeline_data: &Self::RenderPipelineData, - ); - fn pipeline_cache_drop( - &self, - cache: &Self::PipelineCacheId, - cache_data: &Self::PipelineCacheData, - ); + fn surface_drop(&self, surface_data: &Self::SurfaceData); + fn adapter_drop(&self, adapter_data: &Self::AdapterData); + fn buffer_destroy(&self, buffer_data: &Self::BufferData); + fn buffer_drop(&self, buffer_data: &Self::BufferData); + fn texture_destroy(&self, texture_data: &Self::TextureData); + fn texture_drop(&self, texture_data: &Self::TextureData); + fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData); + fn sampler_drop(&self, sampler_data: &Self::SamplerData); + fn query_set_drop(&self, query_set_data: &Self::QuerySetData); + fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData); + fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData); + fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData); + fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData); + fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData); + fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData); + fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData); + fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData); + fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData); + fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData); fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; fn render_pipeline_get_bind_group_layout( &self, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; #[allow(clippy::too_many_arguments)] fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - source: &Self::BufferId, source_data: &Self::BufferData, source_offset: BufferAddress, - destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: BufferAddress, copy_size: BufferAddress, ); fn command_encoder_copy_buffer_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, @@ -441,7 +287,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn command_encoder_copy_texture_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, @@ -449,7 +294,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn command_encoder_copy_texture_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, @@ -458,120 +302,95 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn command_encoder_begin_compute_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData); + ) -> Self::ComputePassData; fn command_encoder_begin_render_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData); + ) -> Self::RenderPassData; fn command_encoder_finish( &self, - encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData); + ) -> Self::CommandBufferData; fn command_encoder_clear_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - texture: &Texture, // TODO: Decompose? + texture_data: &Self::TextureData, subresource_range: &ImageSubresourceRange, ); fn command_encoder_clear_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &Buffer, + buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn command_encoder_insert_debug_marker( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ); fn command_encoder_push_debug_group( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ); - fn command_encoder_pop_debug_group( - &self, - encoder: &Self::CommandEncoderId, - encoder_data: &Self::CommandEncoderData, - ); + fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData); fn command_encoder_write_timestamp( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); #[allow(clippy::too_many_arguments)] fn command_encoder_resolve_query_set( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: BufferAddress, ); fn render_bundle_encoder_finish( &self, - encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData); + ) -> Self::RenderBundleData; fn queue_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, data: &[u8], ); fn queue_validate_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()>; fn queue_create_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, size: BufferSize, ) -> Option>; fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ); fn queue_write_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, texture: ImageCopyTexture<'_>, data: &[u8], @@ -581,115 +400,78 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[cfg(any(webgl, webgpu))] fn queue_copy_external_image_to_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ); - fn queue_submit>( + fn queue_submit>( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData; - fn queue_get_timestamp_period( - &self, - queue: &Self::QueueId, - queue_data: &Self::QueueData, - ) -> f32; + fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32; fn queue_on_submitted_work_done( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, callback: SubmittedWorkDoneCallback, ); - fn device_start_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_stop_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn device_start_capture(&self, device_data: &Self::DeviceData); + fn device_stop_capture(&self, device_data: &Self::DeviceData); fn device_get_internal_counters( &self, - device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> wgt::InternalCounters; fn device_generate_allocator_report( &self, - device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> Option; - fn pipeline_cache_get_data( - &self, - cache: &Self::PipelineCacheId, - cache_data: &Self::PipelineCacheData, - ) -> Option>; + fn pipeline_cache_get_data(&self, cache_data: &Self::PipelineCacheData) -> Option>; fn compute_pass_set_pipeline( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, ); fn compute_pass_set_bind_group( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[DynamicOffset], ); fn compute_pass_set_push_constants( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, offset: u32, data: &[u8], ); - fn compute_pass_insert_debug_marker( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - label: &str, - ); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str); fn compute_pass_push_debug_group( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, group_label: &str, ); - fn compute_pass_pop_debug_group( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData); fn compute_pass_write_timestamp( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData); fn compute_pass_dispatch_workgroups( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -697,40 +479,28 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); - fn compute_pass_end( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData); fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ); fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: IndexFormat, offset: BufferAddress, @@ -739,17 +509,14 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, stages: ShaderStages, offset: u32, @@ -757,14 +524,12 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_bundle_encoder_draw( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, ); fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -772,34 +537,26 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_bundle_encoder_multi_draw_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, ); fn render_bundle_encoder_multi_draw_indexed_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, @@ -807,12 +564,9 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_multi_draw_indirect_count( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, @@ -820,12 +574,9 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_multi_draw_indexed_indirect_count( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, @@ -833,26 +584,20 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn render_pass_set_pipeline( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ); fn render_pass_set_bind_group( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - bind_group: &Self::BindGroupId, bind_group_data: &Self::BindGroupData, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_pass_set_index_buffer( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: IndexFormat, offset: BufferAddress, @@ -861,17 +606,14 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_set_vertex_buffer( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn render_pass_set_push_constants( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, stages: ShaderStages, offset: u32, @@ -879,14 +621,12 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_draw( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, ); fn render_pass_draw_indexed( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -894,34 +634,26 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_draw_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_pass_draw_indexed_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_pass_multi_draw_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, ); fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, @@ -929,12 +661,9 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indirect_count( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, @@ -942,25 +671,16 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, ); - fn render_pass_set_blend_constant( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - color: Color, - ); + fn render_pass_set_blend_constant(&self, pass_data: &mut Self::RenderPassData, color: Color); fn render_pass_set_scissor_rect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -970,7 +690,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_set_viewport( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -981,112 +700,39 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_set_stencil_reference( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ); - fn render_pass_insert_debug_marker( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - label: &str, - ); - fn render_pass_push_debug_group( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - group_label: &str, - ); - fn render_pass_pop_debug_group( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str); + fn render_pass_push_debug_group(&self, pass_data: &mut Self::RenderPassData, group_label: &str); + fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData); fn render_pass_write_timestamp( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); fn render_pass_begin_occlusion_query( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, query_index: u32, ); - fn render_pass_end_occlusion_query( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData); fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData); fn render_pass_execute_bundles( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ); - fn render_pass_end(&self, pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData); + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData); } -/// Object id. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct ObjectId { - /// ID that is unique at any given time - id: Option, - /// ID that is unique at all times - global_id: Option, -} - -impl ObjectId { - pub(crate) const UNUSED: Self = ObjectId { - id: None, - global_id: None, - }; - - #[allow(dead_code)] - pub fn new(id: NonZeroU64, global_id: NonZeroU64) -> Self { - Self { - id: Some(id), - global_id: Some(global_id), - } - } - - #[allow(dead_code)] - pub fn from_global_id(global_id: NonZeroU64) -> Self { - Self { - id: Some(global_id), - global_id: Some(global_id), - } - } - - #[allow(dead_code)] - pub fn id(&self) -> NonZeroU64 { - self.id.unwrap() - } - - pub fn global_id(&self) -> NonZeroU64 { - self.global_id.unwrap() - } -} - -#[cfg(send_sync)] -static_assertions::assert_impl_all!(ObjectId: Send, Sync); - pub(crate) fn downcast_ref(data: &crate::Data) -> &T { strict_assert!(data.is::()); // Copied from std. @@ -1099,29 +745,8 @@ fn downcast_mut(data: &mut crate::Data) -> unsafe { &mut *(data as *mut dyn Any as *mut T) } } -/// Representation of an object id that is not used. -/// -/// This may be used as the id type when only a the data associated type is used for a specific type of object. -#[derive(Debug, Clone, Copy)] -pub struct Unused; - -impl From for Unused { - fn from(id: ObjectId) -> Self { - strict_assert_eq!(id, ObjectId::UNUSED); - Self - } -} - -impl From for ObjectId { - fn from(_: Unused) -> Self { - ObjectId::UNUSED - } -} - pub(crate) struct DeviceRequest { - pub device_id: ObjectId, pub device_data: Box, - pub queue_id: ObjectId, pub queue_data: Box, } @@ -1138,11 +763,9 @@ pub(crate) type AdapterRequestDeviceFuture = Box>>; #[cfg(send_sync)] -pub type InstanceRequestAdapterFuture = - Box)>> + Send>; +pub type InstanceRequestAdapterFuture = Box>> + Send>; #[cfg(not(send_sync))] -pub type InstanceRequestAdapterFuture = - Box)>>>; +pub type InstanceRequestAdapterFuture = Box>>>; #[cfg(send_sync)] pub type DevicePopErrorFuture = Box> + Send>; @@ -1170,7 +793,7 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(ObjectId, Box), crate::CreateSurfaceError>; + ) -> Result, crate::CreateSurfaceError>; #[allow(clippy::type_complexity)] fn instance_request_adapter( &self, @@ -1178,7 +801,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ) -> Pin; fn adapter_request_device( &self, - adapter: &ObjectId, adapter_data: &crate::Data, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -1187,52 +809,38 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn instance_poll_all_devices(&self, force_wait: bool) -> bool; fn adapter_is_surface_supported( &self, - adapter: &ObjectId, adapter_data: &crate::Data, - surface: &ObjectId, surface_data: &crate::Data, ) -> bool; - fn adapter_features(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Features; - fn adapter_limits(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Limits; - fn adapter_downlevel_capabilities( - &self, - adapter: &ObjectId, - adapter_data: &crate::Data, - ) -> DownlevelCapabilities; - fn adapter_get_info(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> AdapterInfo; + fn adapter_features(&self, adapter_data: &crate::Data) -> Features; + fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits; + fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities; + fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo; fn adapter_get_texture_format_features( &self, - adapter: &ObjectId, adapter_data: &crate::Data, format: TextureFormat, ) -> TextureFormatFeatures; fn adapter_get_presentation_timestamp( &self, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::PresentationTimestamp; fn surface_get_capabilities( &self, - surface: &ObjectId, surface_data: &crate::Data, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::SurfaceCapabilities; fn surface_configure( &self, - surface: &ObjectId, surface_data: &crate::Data, - device: &ObjectId, device_data: &crate::Data, config: &crate::SurfaceConfiguration, ); fn surface_get_current_texture( &self, - surface: &ObjectId, surface_data: &crate::Data, ) -> ( - Option, Option>, SurfaceStatus, Box, @@ -1240,136 +848,101 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn surface_present(&self, detail: &dyn AnyWasmNotSendSync); fn surface_texture_discard(&self, detail: &dyn AnyWasmNotSendSync); - fn device_features(&self, device: &ObjectId, device_data: &crate::Data) -> Features; - fn device_limits(&self, device: &ObjectId, device_data: &crate::Data) -> Limits; - fn device_downlevel_properties( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> DownlevelCapabilities; + fn device_features(&self, device_data: &crate::Data) -> Features; + fn device_limits(&self, device_data: &crate::Data) -> Limits; + fn device_downlevel_properties(&self, device_data: &crate::Data) -> DownlevelCapabilities; fn device_create_shader_module( &self, - device: &ObjectId, device_data: &crate::Data, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (ObjectId, Box); + ) -> Box; unsafe fn device_create_shader_module_spirv( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_bind_group_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_bind_group( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_pipeline_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineLayoutDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_render_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderPipelineDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_compute_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ComputePipelineDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; unsafe fn device_create_pipeline_cache( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineCacheDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_buffer( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BufferDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_texture( &self, - device: &ObjectId, device_data: &crate::Data, desc: &TextureDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_sampler( &self, - device: &ObjectId, device_data: &crate::Data, desc: &SamplerDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_query_set( &self, - device: &ObjectId, device_data: &crate::Data, desc: &QuerySetDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_command_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &CommandEncoderDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_render_bundle_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; #[doc(hidden)] - fn device_make_invalid(&self, device: &ObjectId, device_data: &crate::Data); - fn device_drop(&self, device: &ObjectId, device_data: &crate::Data); + fn device_make_invalid(&self, device_data: &crate::Data); + fn device_drop(&self, device_data: &crate::Data); fn device_set_device_lost_callback( &self, - device: &ObjectId, device_data: &crate::Data, device_lost_callback: DeviceLostCallback, ); - fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data); - fn device_mark_lost(&self, device: &ObjectId, device_data: &crate::Data, message: &str); - fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data); - fn device_poll( - &self, - device: &ObjectId, - device_data: &crate::Data, - maintain: Maintain, - ) -> MaintainResult; + fn device_destroy(&self, device_data: &crate::Data); + fn device_mark_lost(&self, device_data: &crate::Data, message: &str); + fn queue_drop(&self, queue_data: &crate::Data); + fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult; fn device_on_uncaptured_error( &self, - device: &ObjectId, device_data: &crate::Data, handler: Box, ); - fn device_push_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - filter: ErrorFilter, - ); - fn device_pop_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Pin; + fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter); + fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin; fn buffer_map_async( &self, - buffer: &ObjectId, buffer_data: &crate::Data, mode: MapMode, range: Range, @@ -1377,76 +950,63 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn buffer_get_mapped_range( &self, - buffer: &ObjectId, buffer_data: &crate::Data, sub_range: Range, ) -> Box; - fn buffer_unmap(&self, buffer: &ObjectId, buffer_data: &crate::Data); + fn buffer_unmap(&self, buffer_data: &crate::Data); fn shader_get_compilation_info( &self, - shader: &ObjectId, shader_data: &crate::Data, ) -> Pin; fn texture_create_view( &self, - texture: &ObjectId, texture_data: &crate::Data, desc: &TextureViewDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; - fn surface_drop(&self, surface: &ObjectId, surface_data: &crate::Data); - fn adapter_drop(&self, adapter: &ObjectId, adapter_data: &crate::Data); - fn buffer_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn buffer_drop(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn texture_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn texture_drop(&self, texture: &ObjectId, texture_data: &crate::Data); - fn texture_view_drop(&self, texture_view: &ObjectId, texture_view_data: &crate::Data); - fn sampler_drop(&self, sampler: &ObjectId, sampler_data: &crate::Data); - fn query_set_drop(&self, query_set: &ObjectId, query_set_data: &crate::Data); - fn bind_group_drop(&self, bind_group: &ObjectId, bind_group_data: &crate::Data); - fn bind_group_layout_drop( - &self, - bind_group_layout: &ObjectId, - bind_group_layout_data: &crate::Data, - ); - fn pipeline_layout_drop(&self, pipeline_layout: &ObjectId, pipeline_layout_data: &crate::Data); - fn shader_module_drop(&self, shader_module: &ObjectId, shader_module_data: &crate::Data); - fn command_encoder_drop(&self, command_encoder: &ObjectId, command_encoder_data: &crate::Data); - fn command_buffer_drop(&self, command_buffer: &ObjectId, command_buffer_data: &crate::Data); - fn render_bundle_drop(&self, render_bundle: &ObjectId, render_bundle_data: &crate::Data); - fn compute_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data); - fn render_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data); - fn pipeline_cache_drop(&self, cache: &ObjectId, _cache_data: &crate::Data); + fn surface_drop(&self, surface_data: &crate::Data); + fn adapter_drop(&self, adapter_data: &crate::Data); + fn buffer_destroy(&self, buffer_data: &crate::Data); + fn buffer_drop(&self, buffer_data: &crate::Data); + fn texture_destroy(&self, buffer_data: &crate::Data); + fn texture_drop(&self, texture_data: &crate::Data); + fn texture_view_drop(&self, texture_view_data: &crate::Data); + fn sampler_drop(&self, sampler_data: &crate::Data); + fn query_set_drop(&self, query_set_data: &crate::Data); + fn bind_group_drop(&self, bind_group_data: &crate::Data); + fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data); + fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data); + fn shader_module_drop(&self, shader_module_data: &crate::Data); + fn command_encoder_drop(&self, command_encoder_data: &crate::Data); + fn command_buffer_drop(&self, command_buffer_data: &crate::Data); + fn render_bundle_drop(&self, render_bundle_data: &crate::Data); + fn compute_pipeline_drop(&self, pipeline_data: &crate::Data); + fn render_pipeline_drop(&self, pipeline_data: &crate::Data); + fn pipeline_cache_drop(&self, _cache_data: &crate::Data); fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box); + ) -> Box; fn render_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box); + ) -> Box; #[allow(clippy::too_many_arguments)] fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - source: &ObjectId, source_data: &crate::Data, source_offset: BufferAddress, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, copy_size: BufferAddress, ); fn command_encoder_copy_buffer_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, @@ -1454,7 +1014,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn command_encoder_copy_texture_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, @@ -1462,7 +1021,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn command_encoder_copy_texture_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, @@ -1471,116 +1029,84 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn command_encoder_begin_compute_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &ComputePassDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn command_encoder_begin_render_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &RenderPassDescriptor<'_>, - ) -> (ObjectId, Box); - fn command_encoder_finish( - &self, - encoder: ObjectId, - encoder_data: &mut crate::Data, - ) -> (ObjectId, Box); + ) -> Box; + fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box; fn command_encoder_clear_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - texture: &Texture, + texture_data: &crate::Data, subresource_range: &ImageSubresourceRange, ); fn command_encoder_clear_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - buffer: &Buffer, + buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); - fn command_encoder_insert_debug_marker( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ); - fn command_encoder_push_debug_group( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ); - fn command_encoder_pop_debug_group(&self, encoder: &ObjectId, encoder_data: &crate::Data); + fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str); + fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str); + fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data); fn command_encoder_write_timestamp( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); #[allow(clippy::too_many_arguments)] fn command_encoder_resolve_query_set( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, first_query: u32, query_count: u32, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, ); fn render_bundle_encoder_finish( &self, - encoder: ObjectId, encoder_data: Box, desc: &RenderBundleDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn queue_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, data: &[u8], ); fn queue_validate_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()>; fn queue_create_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, size: BufferSize, ) -> Option>; fn queue_write_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ); fn queue_write_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, texture: ImageCopyTexture<'_>, data: &[u8], @@ -1590,7 +1116,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, @@ -1598,136 +1123,80 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn queue_submit( &self, - queue: &ObjectId, queue_data: &crate::Data, - command_buffers: &mut dyn Iterator)>, + command_buffers: &mut dyn Iterator>, ) -> Arc; - fn queue_get_timestamp_period(&self, queue: &ObjectId, queue_data: &crate::Data) -> f32; + fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32; fn queue_on_submitted_work_done( &self, - queue: &ObjectId, queue_data: &crate::Data, callback: SubmittedWorkDoneCallback, ); - fn device_start_capture(&self, device: &ObjectId, data: &crate::Data); - fn device_stop_capture(&self, device: &ObjectId, data: &crate::Data); + fn device_start_capture(&self, data: &crate::Data); + fn device_stop_capture(&self, data: &crate::Data); - fn device_get_internal_counters( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> wgt::InternalCounters; + fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters; - fn generate_allocator_report( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Option; + fn generate_allocator_report(&self, device_data: &crate::Data) -> Option; - fn pipeline_cache_get_data( - &self, - cache: &ObjectId, - cache_data: &crate::Data, - ) -> Option>; + fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option>; - fn compute_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ); + fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); fn compute_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ); fn compute_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, offset: u32, data: &[u8], ); - fn compute_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ); - fn compute_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ); - fn compute_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); + fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); + fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data); fn compute_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ); - fn compute_pass_dispatch_workgroups( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - x: u32, - y: u32, - z: u32, - ); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); + fn compute_pass_dispatch_workgroups(&self, pass_data: &mut crate::Data, x: u32, y: u32, z: u32); fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); - fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn compute_pass_end(&self, pass_data: &mut crate::Data); fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - pipeline: &ObjectId, pipeline_data: &crate::Data, ); fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, @@ -1736,17 +1205,14 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, stages: ShaderStages, offset: u32, @@ -1754,14 +1220,12 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_bundle_encoder_draw( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, vertices: Range, instances: Range, ); fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, indices: Range, base_vertex: i32, @@ -1769,34 +1233,26 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_bundle_encoder_multi_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ); fn render_bundle_encoder_multi_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, @@ -1804,12 +1260,9 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_multi_draw_indirect_count( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, @@ -1817,39 +1270,26 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_multi_draw_indexed_indirect_count( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, command_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ); - fn render_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ); + fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); fn render_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_pass_set_index_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, @@ -1858,17 +1298,14 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_set_vertex_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); fn render_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, stages: ShaderStages, offset: u32, @@ -1876,14 +1313,12 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_pass_draw( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, vertices: Range, instances: Range, ); fn render_pass_draw_indexed( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, indices: Range, base_vertex: i32, @@ -1891,34 +1326,26 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_pass_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_pass_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_pass_multi_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ); fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, @@ -1926,12 +1353,9 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, @@ -1939,25 +1363,16 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, command_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ); - fn render_pass_set_blend_constant( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - color: Color, - ); + fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color); fn render_pass_set_scissor_rect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, @@ -1967,7 +1382,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_set_viewport( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: f32, y: f32, @@ -1976,60 +1390,31 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { min_depth: f32, max_depth: f32, ); - fn render_pass_set_stencil_reference( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - reference: u32, - ); - fn render_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ); - fn render_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ); - fn render_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32); + fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); + fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); + fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data); fn render_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn render_pass_begin_occlusion_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - query_index: u32, - ); - fn render_pass_end_occlusion_query(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32); + fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data); fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); fn render_pass_execute_bundles( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ); - fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_end(&self, pass_data: &mut crate::Data); } // Blanket impl of DynContext for all types which implement Context. @@ -2044,9 +1429,9 @@ where unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(ObjectId, Box), crate::CreateSurfaceError> { - let (surface, data) = unsafe { Context::instance_create_surface(self, target) }?; - Ok((surface.into(), Box::new(data) as _)) + ) -> Result, crate::CreateSurfaceError> { + let data = unsafe { Context::instance_create_surface(self, target) }?; + Ok(Box::new(data) as _) } fn instance_request_adapter( @@ -2054,29 +1439,22 @@ where options: &RequestAdapterOptions<'_, '_>, ) -> Pin { let future: T::RequestAdapterFuture = Context::instance_request_adapter(self, options); - Box::pin(async move { - let result: Option<(T::AdapterId, T::AdapterData)> = future.await; - result.map(|(adapter, data)| (adapter.into(), Box::new(data) as _)) - }) + Box::pin(async move { future.await.map(|data| Box::new(data) as _) }) } fn adapter_request_device( &self, - adapter: &ObjectId, adapter_data: &crate::Data, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, ) -> Pin { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - let future = Context::adapter_request_device(self, &adapter, adapter_data, desc, trace_dir); + let future = Context::adapter_request_device(self, adapter_data, desc, trace_dir); Box::pin(async move { - let (device_id, device_data, queue_id, queue_data) = future.await?; + let (device_data, queue_data) = future.await?; Ok(DeviceRequest { - device_id: device_id.into(), device_data: Box::new(device_data) as _, - queue_id: queue_id.into(), queue_data: Box::new(queue_data) as _, }) }) @@ -2088,116 +1466,84 @@ where fn adapter_is_surface_supported( &self, - adapter: &ObjectId, adapter_data: &crate::Data, - surface: &ObjectId, surface_data: &crate::Data, ) -> bool { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - Context::adapter_is_surface_supported(self, &adapter, adapter_data, &surface, surface_data) + Context::adapter_is_surface_supported(self, adapter_data, surface_data) } - fn adapter_features(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Features { - let adapter = ::from(*adapter); + fn adapter_features(&self, adapter_data: &crate::Data) -> Features { let adapter_data = downcast_ref(adapter_data); - Context::adapter_features(self, &adapter, adapter_data) + Context::adapter_features(self, adapter_data) } - fn adapter_limits(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Limits { - let adapter = ::from(*adapter); + fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits { let adapter_data = downcast_ref(adapter_data); - Context::adapter_limits(self, &adapter, adapter_data) + Context::adapter_limits(self, adapter_data) } - fn adapter_downlevel_capabilities( - &self, - adapter: &ObjectId, - adapter_data: &crate::Data, - ) -> DownlevelCapabilities { - let adapter = ::from(*adapter); + fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities { let adapter_data = downcast_ref(adapter_data); - Context::adapter_downlevel_capabilities(self, &adapter, adapter_data) + Context::adapter_downlevel_capabilities(self, adapter_data) } - fn adapter_get_info(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> AdapterInfo { - let adapter = ::from(*adapter); + fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo { let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_info(self, &adapter, adapter_data) + Context::adapter_get_info(self, adapter_data) } fn adapter_get_texture_format_features( &self, - adapter: &ObjectId, adapter_data: &crate::Data, format: TextureFormat, ) -> TextureFormatFeatures { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_texture_format_features(self, &adapter, adapter_data, format) + Context::adapter_get_texture_format_features(self, adapter_data, format) } fn adapter_get_presentation_timestamp( &self, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::PresentationTimestamp { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_presentation_timestamp(self, &adapter, adapter_data) + Context::adapter_get_presentation_timestamp(self, adapter_data) } fn surface_get_capabilities( &self, - surface: &ObjectId, surface_data: &crate::Data, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::SurfaceCapabilities { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::surface_get_capabilities(self, &surface, surface_data, &adapter, adapter_data) + Context::surface_get_capabilities(self, surface_data, adapter_data) } fn surface_configure( &self, - surface: &ObjectId, surface_data: &crate::Data, - device: &ObjectId, device_data: &crate::Data, config: &crate::SurfaceConfiguration, ) { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::surface_configure(self, &surface, surface_data, &device, device_data, config) + Context::surface_configure(self, surface_data, device_data, config) } fn surface_get_current_texture( &self, - surface: &ObjectId, surface_data: &crate::Data, ) -> ( - Option, Option>, SurfaceStatus, Box, ) { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let (texture, texture_data, status, detail) = - Context::surface_get_current_texture(self, &surface, surface_data); + let (texture_data, status, detail) = + Context::surface_get_current_texture(self, surface_data); let detail = Box::new(detail) as Box; - ( - texture.map(Into::into), - texture_data.map(|b| Box::new(b) as _), - status, - detail, - ) + (texture_data.map(|b| Box::new(b) as _), status, detail) } fn surface_present(&self, detail: &dyn AnyWasmNotSendSync) { @@ -2208,520 +1554,398 @@ where Context::surface_texture_discard(self, detail.downcast_ref().unwrap()) } - fn device_features(&self, device: &ObjectId, device_data: &crate::Data) -> Features { - let device = ::from(*device); + fn device_features(&self, device_data: &crate::Data) -> Features { let device_data = downcast_ref(device_data); - Context::device_features(self, &device, device_data) + Context::device_features(self, device_data) } - fn device_limits(&self, device: &ObjectId, device_data: &crate::Data) -> Limits { - let device = ::from(*device); + fn device_limits(&self, device_data: &crate::Data) -> Limits { let device_data = downcast_ref(device_data); - Context::device_limits(self, &device, device_data) + Context::device_limits(self, device_data) } - fn device_downlevel_properties( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> DownlevelCapabilities { - let device = ::from(*device); + fn device_downlevel_properties(&self, device_data: &crate::Data) -> DownlevelCapabilities { let device_data = downcast_ref(device_data); - Context::device_downlevel_properties(self, &device, device_data) + Context::device_downlevel_properties(self, device_data) } fn device_create_shader_module( &self, - device: &ObjectId, device_data: &crate::Data, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (shader_module, data) = Context::device_create_shader_module( - self, - &device, - device_data, - desc, - shader_bound_checks, - ); - (shader_module.into(), Box::new(data) as _) + let data = + Context::device_create_shader_module(self, device_data, desc, shader_bound_checks); + Box::new(data) as _ } unsafe fn device_create_shader_module_spirv( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (shader_module, data) = - unsafe { Context::device_create_shader_module_spirv(self, &device, device_data, desc) }; - (shader_module.into(), Box::new(data) as _) + let data = unsafe { Context::device_create_shader_module_spirv(self, device_data, desc) }; + Box::new(data) as _ } fn device_create_bind_group_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (bind_group_layout, data) = - Context::device_create_bind_group_layout(self, &device, device_data, desc); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::device_create_bind_group_layout(self, device_data, desc); + Box::new(data) as _ } fn device_create_bind_group( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (bind_group, data) = - Context::device_create_bind_group(self, &device, device_data, desc); - (bind_group.into(), Box::new(data) as _) + let data = Context::device_create_bind_group(self, device_data, desc); + Box::new(data) as _ } fn device_create_pipeline_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineLayoutDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (pipeline_layout, data) = - Context::device_create_pipeline_layout(self, &device, device_data, desc); - (pipeline_layout.into(), Box::new(data) as _) + let data = Context::device_create_pipeline_layout(self, device_data, desc); + Box::new(data) as _ } fn device_create_render_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderPipelineDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (render_pipeline, data) = - Context::device_create_render_pipeline(self, &device, device_data, desc); - (render_pipeline.into(), Box::new(data) as _) + let data = Context::device_create_render_pipeline(self, device_data, desc); + Box::new(data) as _ } fn device_create_compute_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ComputePipelineDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (compute_pipeline, data) = - Context::device_create_compute_pipeline(self, &device, device_data, desc); - (compute_pipeline.into(), Box::new(data) as _) + let data = Context::device_create_compute_pipeline(self, device_data, desc); + Box::new(data) as _ } unsafe fn device_create_pipeline_cache( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineCacheDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (pipeline_cache, data) = - unsafe { Context::device_create_pipeline_cache(self, &device, device_data, desc) }; - (pipeline_cache.into(), Box::new(data) as _) + let data = unsafe { Context::device_create_pipeline_cache(self, device_data, desc) }; + Box::new(data) as _ } fn device_create_buffer( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BufferDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (buffer, data) = Context::device_create_buffer(self, &device, device_data, desc); - (buffer.into(), Box::new(data) as _) + let data = Context::device_create_buffer(self, device_data, desc); + Box::new(data) as _ } fn device_create_texture( &self, - device: &ObjectId, device_data: &crate::Data, desc: &TextureDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (texture, data) = Context::device_create_texture(self, &device, device_data, desc); - (texture.into(), Box::new(data) as _) + let data = Context::device_create_texture(self, device_data, desc); + Box::new(data) as _ } fn device_create_sampler( &self, - device: &ObjectId, device_data: &crate::Data, desc: &SamplerDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (sampler, data) = Context::device_create_sampler(self, &device, device_data, desc); - (sampler.into(), Box::new(data) as _) + let data = Context::device_create_sampler(self, device_data, desc); + Box::new(data) as _ } fn device_create_query_set( &self, - device: &ObjectId, device_data: &crate::Data, desc: &QuerySetDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (query_set, data) = Context::device_create_query_set(self, &device, device_data, desc); - (query_set.into(), Box::new(data) as _) + let data = Context::device_create_query_set(self, device_data, desc); + Box::new(data) as _ } fn device_create_command_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &CommandEncoderDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (command_encoder, data) = - Context::device_create_command_encoder(self, &device, device_data, desc); - (command_encoder.into(), Box::new(data) as _) + let data = Context::device_create_command_encoder(self, device_data, desc); + Box::new(data) as _ } fn device_create_render_bundle_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (render_bundle_encoder, data) = - Context::device_create_render_bundle_encoder(self, &device, device_data, desc); - (render_bundle_encoder.into(), Box::new(data) as _) + let data = Context::device_create_render_bundle_encoder(self, device_data, desc); + Box::new(data) as _ } #[doc(hidden)] - fn device_make_invalid(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_make_invalid(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_make_invalid(self, &device, device_data) + Context::device_make_invalid(self, device_data) } - fn device_drop(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_drop(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_drop(self, &device, device_data) + Context::device_drop(self, device_data) } fn device_set_device_lost_callback( &self, - device: &ObjectId, device_data: &crate::Data, device_lost_callback: DeviceLostCallback, ) { - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::device_set_device_lost_callback(self, &device, device_data, device_lost_callback) + Context::device_set_device_lost_callback(self, device_data, device_lost_callback) } - fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_destroy(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_destroy(self, &device, device_data) + Context::device_destroy(self, device_data) } - fn device_mark_lost(&self, device: &ObjectId, device_data: &crate::Data, message: &str) { - let device = ::from(*device); + fn device_mark_lost(&self, device_data: &crate::Data, message: &str) { let device_data = downcast_ref(device_data); - Context::device_mark_lost(self, &device, device_data, message) + Context::device_mark_lost(self, device_data, message) } - fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data) { - let queue = ::from(*queue); + fn queue_drop(&self, queue_data: &crate::Data) { let queue_data = downcast_ref(queue_data); - Context::queue_drop(self, &queue, queue_data) + Context::queue_drop(self, queue_data) } - fn device_poll( - &self, - device: &ObjectId, - device_data: &crate::Data, - maintain: Maintain, - ) -> MaintainResult { - let device = ::from(*device); + fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult { let device_data = downcast_ref(device_data); - Context::device_poll(self, &device, device_data, maintain) + Context::device_poll(self, device_data, maintain) } fn device_on_uncaptured_error( &self, - device: &ObjectId, device_data: &crate::Data, handler: Box, ) { - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::device_on_uncaptured_error(self, &device, device_data, handler) + Context::device_on_uncaptured_error(self, device_data, handler) } - fn device_push_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - filter: ErrorFilter, - ) { - let device = ::from(*device); + fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter) { let device_data = downcast_ref(device_data); - Context::device_push_error_scope(self, &device, device_data, filter) + Context::device_push_error_scope(self, device_data, filter) } - fn device_pop_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Pin { - let device = ::from(*device); + fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin { let device_data = downcast_ref(device_data); - Box::pin(Context::device_pop_error_scope(self, &device, device_data)) + Box::pin(Context::device_pop_error_scope(self, device_data)) } fn buffer_map_async( &self, - buffer: &ObjectId, buffer_data: &crate::Data, mode: MapMode, range: Range, callback: BufferMapCallback, ) { - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::buffer_map_async(self, &buffer, buffer_data, mode, range, callback) + Context::buffer_map_async(self, buffer_data, mode, range, callback) } fn buffer_get_mapped_range( &self, - buffer: &ObjectId, buffer_data: &crate::Data, sub_range: Range, ) -> Box { - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::buffer_get_mapped_range(self, &buffer, buffer_data, sub_range) + Context::buffer_get_mapped_range(self, buffer_data, sub_range) } - fn buffer_unmap(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_unmap(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_unmap(self, &buffer, buffer_data) + Context::buffer_unmap(self, buffer_data) } fn shader_get_compilation_info( &self, - shader: &ObjectId, shader_data: &crate::Data, ) -> Pin { - let shader = ::from(*shader); let shader_data = downcast_ref(shader_data); - let future = Context::shader_get_compilation_info(self, &shader, shader_data); + let future = Context::shader_get_compilation_info(self, shader_data); Box::pin(future) } fn texture_create_view( &self, - texture: &ObjectId, texture_data: &crate::Data, desc: &TextureViewDescriptor<'_>, - ) -> (ObjectId, Box) { - let texture = ::from(*texture); + ) -> Box { let texture_data = downcast_ref(texture_data); - let (texture_view, data) = Context::texture_create_view(self, &texture, texture_data, desc); - (texture_view.into(), Box::new(data) as _) + let data = Context::texture_create_view(self, texture_data, desc); + Box::new(data) as _ } - fn surface_drop(&self, surface: &ObjectId, surface_data: &crate::Data) { - let surface = ::from(*surface); + fn surface_drop(&self, surface_data: &crate::Data) { let surface_data = downcast_ref(surface_data); - Context::surface_drop(self, &surface, surface_data) + Context::surface_drop(self, surface_data) } - fn adapter_drop(&self, adapter: &ObjectId, adapter_data: &crate::Data) { - let adapter = ::from(*adapter); + fn adapter_drop(&self, adapter_data: &crate::Data) { let adapter_data = downcast_ref(adapter_data); - Context::adapter_drop(self, &adapter, adapter_data) + Context::adapter_drop(self, adapter_data) } - fn buffer_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_destroy(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_destroy(self, &buffer, buffer_data) + Context::buffer_destroy(self, buffer_data) } - fn buffer_drop(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_drop(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_drop(self, &buffer, buffer_data) + Context::buffer_drop(self, buffer_data) } - fn texture_destroy(&self, texture: &ObjectId, texture_data: &crate::Data) { - let texture = ::from(*texture); + fn texture_destroy(&self, texture_data: &crate::Data) { let texture_data = downcast_ref(texture_data); - Context::texture_destroy(self, &texture, texture_data) + Context::texture_destroy(self, texture_data) } - fn texture_drop(&self, texture: &ObjectId, texture_data: &crate::Data) { - let texture = ::from(*texture); + fn texture_drop(&self, texture_data: &crate::Data) { let texture_data = downcast_ref(texture_data); - Context::texture_drop(self, &texture, texture_data) + Context::texture_drop(self, texture_data) } - fn texture_view_drop(&self, texture_view: &ObjectId, texture_view_data: &crate::Data) { - let texture_view = ::from(*texture_view); + fn texture_view_drop(&self, texture_view_data: &crate::Data) { let texture_view_data = downcast_ref(texture_view_data); - Context::texture_view_drop(self, &texture_view, texture_view_data) + Context::texture_view_drop(self, texture_view_data) } - fn sampler_drop(&self, sampler: &ObjectId, sampler_data: &crate::Data) { - let sampler = ::from(*sampler); + fn sampler_drop(&self, sampler_data: &crate::Data) { let sampler_data = downcast_ref(sampler_data); - Context::sampler_drop(self, &sampler, sampler_data) + Context::sampler_drop(self, sampler_data) } - fn query_set_drop(&self, query_set: &ObjectId, query_set_data: &crate::Data) { - let query_set = ::from(*query_set); + fn query_set_drop(&self, query_set_data: &crate::Data) { let query_set_data = downcast_ref(query_set_data); - Context::query_set_drop(self, &query_set, query_set_data) + Context::query_set_drop(self, query_set_data) } - fn bind_group_drop(&self, bind_group: &ObjectId, bind_group_data: &crate::Data) { - let bind_group = ::from(*bind_group); + fn bind_group_drop(&self, bind_group_data: &crate::Data) { let bind_group_data = downcast_ref(bind_group_data); - Context::bind_group_drop(self, &bind_group, bind_group_data) + Context::bind_group_drop(self, bind_group_data) } - fn bind_group_layout_drop( - &self, - bind_group_layout: &ObjectId, - bind_group_layout_data: &crate::Data, - ) { - let bind_group_layout = ::from(*bind_group_layout); + fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data) { let bind_group_layout_data = downcast_ref(bind_group_layout_data); - Context::bind_group_layout_drop(self, &bind_group_layout, bind_group_layout_data) + Context::bind_group_layout_drop(self, bind_group_layout_data) } - fn pipeline_layout_drop(&self, pipeline_layout: &ObjectId, pipeline_layout_data: &crate::Data) { - let pipeline_layout = ::from(*pipeline_layout); + fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data) { let pipeline_layout_data = downcast_ref(pipeline_layout_data); - Context::pipeline_layout_drop(self, &pipeline_layout, pipeline_layout_data) + Context::pipeline_layout_drop(self, pipeline_layout_data) } - fn shader_module_drop(&self, shader_module: &ObjectId, shader_module_data: &crate::Data) { - let shader_module = ::from(*shader_module); + fn shader_module_drop(&self, shader_module_data: &crate::Data) { let shader_module_data = downcast_ref(shader_module_data); - Context::shader_module_drop(self, &shader_module, shader_module_data) + Context::shader_module_drop(self, shader_module_data) } - fn command_encoder_drop(&self, command_encoder: &ObjectId, command_encoder_data: &crate::Data) { - let command_encoder = ::from(*command_encoder); + fn command_encoder_drop(&self, command_encoder_data: &crate::Data) { let command_encoder_data = downcast_ref(command_encoder_data); - Context::command_encoder_drop(self, &command_encoder, command_encoder_data) + Context::command_encoder_drop(self, command_encoder_data) } - fn command_buffer_drop(&self, command_buffer: &ObjectId, command_buffer_data: &crate::Data) { - let command_buffer = ::from(*command_buffer); + fn command_buffer_drop(&self, command_buffer_data: &crate::Data) { let command_buffer_data = downcast_ref(command_buffer_data); - Context::command_buffer_drop(self, &command_buffer, command_buffer_data) + Context::command_buffer_drop(self, command_buffer_data) } - fn render_bundle_drop(&self, render_bundle: &ObjectId, render_bundle_data: &crate::Data) { - let render_bundle = ::from(*render_bundle); + fn render_bundle_drop(&self, render_bundle_data: &crate::Data) { let render_bundle_data = downcast_ref(render_bundle_data); - Context::render_bundle_drop(self, &render_bundle, render_bundle_data) + Context::render_bundle_drop(self, render_bundle_data) } - fn compute_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data) { - let pipeline = ::from(*pipeline); + fn compute_pipeline_drop(&self, pipeline_data: &crate::Data) { let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pipeline_drop(self, &pipeline, pipeline_data) + Context::compute_pipeline_drop(self, pipeline_data) } - fn render_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data) { - let pipeline = ::from(*pipeline); + fn render_pipeline_drop(&self, pipeline_data: &crate::Data) { let pipeline_data = downcast_ref(pipeline_data); - Context::render_pipeline_drop(self, &pipeline, pipeline_data) + Context::render_pipeline_drop(self, pipeline_data) } - fn pipeline_cache_drop(&self, cache: &ObjectId, cache_data: &crate::Data) { - let cache = ::from(*cache); + fn pipeline_cache_drop(&self, cache_data: &crate::Data) { let cache_data = downcast_ref(cache_data); - Context::pipeline_cache_drop(self, &cache, cache_data) + Context::pipeline_cache_drop(self, cache_data) } fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box) { - let pipeline = ::from(*pipeline); + ) -> Box { let pipeline_data = downcast_ref(pipeline_data); - let (bind_group_layout, data) = - Context::compute_pipeline_get_bind_group_layout(self, &pipeline, pipeline_data, index); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::compute_pipeline_get_bind_group_layout(self, pipeline_data, index); + Box::new(data) as _ } fn render_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box) { - let pipeline = ::from(*pipeline); + ) -> Box { let pipeline_data = downcast_ref(pipeline_data); - let (bind_group_layout, data) = - Context::render_pipeline_get_bind_group_layout(self, &pipeline, pipeline_data, index); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::render_pipeline_get_bind_group_layout(self, pipeline_data, index); + Box::new(data) as _ } fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - source: &ObjectId, source_data: &crate::Data, source_offset: BufferAddress, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, copy_size: BufferAddress, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let source = ::from(*source); let source_data = downcast_ref(source_data); - let destination = ::from(*destination); let destination_data = downcast_ref(destination_data); Context::command_encoder_copy_buffer_to_buffer( self, - &encoder, encoder_data, - &source, source_data, source_offset, - &destination, destination_data, destination_offset, copy_size, @@ -2730,17 +1954,14 @@ where fn command_encoder_copy_buffer_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_buffer_to_texture( self, - &encoder, encoder_data, source, destination, @@ -2750,17 +1971,14 @@ where fn command_encoder_copy_texture_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_texture_to_buffer( self, - &encoder, encoder_data, source, destination, @@ -2770,17 +1988,14 @@ where fn command_encoder_copy_texture_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_texture_to_texture( self, - &encoder, encoder_data, source, destination, @@ -2790,148 +2005,96 @@ where fn command_encoder_begin_compute_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &ComputePassDescriptor<'_>, - ) -> (ObjectId, Box) { - let encoder = ::from(*encoder); + ) -> Box { let encoder_data = downcast_ref(encoder_data); - let (compute_pass, data) = - Context::command_encoder_begin_compute_pass(self, &encoder, encoder_data, desc); - (compute_pass.into(), Box::new(data) as _) + let data = Context::command_encoder_begin_compute_pass(self, encoder_data, desc); + Box::new(data) as _ } fn command_encoder_begin_render_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &RenderPassDescriptor<'_>, - ) -> (ObjectId, Box) { - let encoder = ::from(*encoder); + ) -> Box { let encoder_data = downcast_ref(encoder_data); - let (render_pass, data) = - Context::command_encoder_begin_render_pass(self, &encoder, encoder_data, desc); - (render_pass.into(), Box::new(data) as _) + let data = Context::command_encoder_begin_render_pass(self, encoder_data, desc); + Box::new(data) as _ } - fn command_encoder_finish( - &self, - encoder: ObjectId, - encoder_data: &mut crate::Data, - ) -> (ObjectId, Box) { - let (command_buffer, data) = - Context::command_encoder_finish(self, encoder.into(), downcast_mut(encoder_data)); - (command_buffer.into(), Box::new(data) as _) + fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box { + let data = Context::command_encoder_finish(self, downcast_mut(encoder_data)); + Box::new(data) as _ } fn command_encoder_clear_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - texture: &Texture, + texture_data: &crate::Data, subresource_range: &ImageSubresourceRange, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_clear_texture( - self, - &encoder, - encoder_data, - texture, - subresource_range, - ) + let texture_data = downcast_ref(texture_data); + Context::command_encoder_clear_texture(self, encoder_data, texture_data, subresource_range) } fn command_encoder_clear_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - buffer: &Buffer, + buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_clear_buffer(self, &encoder, encoder_data, buffer, offset, size) + let buffer_data = downcast_ref(buffer_data); + Context::command_encoder_clear_buffer(self, encoder_data, buffer_data, offset, size) } - fn command_encoder_insert_debug_marker( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ) { - let encoder = ::from(*encoder); + fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_insert_debug_marker(self, &encoder, encoder_data, label) + Context::command_encoder_insert_debug_marker(self, encoder_data, label) } - fn command_encoder_push_debug_group( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ) { - let encoder = ::from(*encoder); + fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_push_debug_group(self, &encoder, encoder_data, label) + Context::command_encoder_push_debug_group(self, encoder_data, label) } - fn command_encoder_pop_debug_group(&self, encoder: &ObjectId, encoder_data: &crate::Data) { - let encoder = ::from(*encoder); + fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_pop_debug_group(self, &encoder, encoder_data) + Context::command_encoder_pop_debug_group(self, encoder_data) } fn command_encoder_write_timestamp( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::command_encoder_write_timestamp( - self, - &encoder, - encoder_data, - &query_set, - query_set_data, - query_index, - ) + Context::command_encoder_write_timestamp(self, encoder_data, query_set_data, query_index) } fn command_encoder_resolve_query_set( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, first_query: u32, query_count: u32, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - let destination = ::from(*destination); let destination_data = downcast_ref(destination_data); Context::command_encoder_resolve_query_set( self, - &encoder, encoder_data, - &query_set, query_set_data, first_query, query_count, - &destination, destination_data, destination_offset, ) @@ -2939,406 +2102,263 @@ where fn render_bundle_encoder_finish( &self, - encoder: ObjectId, encoder_data: Box, desc: &RenderBundleDescriptor<'_>, - ) -> (ObjectId, Box) { + ) -> Box { let encoder_data = *encoder_data.downcast().unwrap(); - let (render_bundle, data) = - Context::render_bundle_encoder_finish(self, encoder.into(), encoder_data, desc); - (render_bundle.into(), Box::new(data) as _) + let data = Context::render_bundle_encoder_finish(self, encoder_data, desc); + Box::new(data) as _ } fn queue_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, data: &[u8], ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_write_buffer(self, &queue, queue_data, &buffer, buffer_data, offset, data) + Context::queue_write_buffer(self, queue_data, buffer_data, offset, data) } fn queue_validate_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()> { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_validate_write_buffer( - self, - &queue, - queue_data, - &buffer, - buffer_data, - offset, - size, - ) + Context::queue_validate_write_buffer(self, queue_data, buffer_data, offset, size) } fn queue_create_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, size: BufferSize, ) -> Option> { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_create_staging_buffer(self, &queue, queue_data, size) + Context::queue_create_staging_buffer(self, queue_data, size) } fn queue_write_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_write_staging_buffer( - self, - &queue, - queue_data, - &buffer, - buffer_data, - offset, - staging_buffer, - ) + Context::queue_write_staging_buffer(self, queue_data, buffer_data, offset, staging_buffer) } fn queue_write_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, texture: ImageCopyTexture<'_>, data: &[u8], data_layout: ImageDataLayout, size: Extent3d, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_write_texture(self, &queue, queue_data, texture, data, data_layout, size) + Context::queue_write_texture(self, queue_data, texture, data, data_layout, size) } #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_copy_external_image_to_texture(self, &queue, queue_data, source, dest, size) + Context::queue_copy_external_image_to_texture(self, queue_data, source, dest, size) } fn queue_submit( &self, - queue: &ObjectId, queue_data: &crate::Data, - command_buffers: &mut dyn Iterator)>, + command_buffers: &mut dyn Iterator>, ) -> Arc { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let command_buffers = command_buffers.map(|(id, data)| { - let command_buffer_data: ::CommandBufferData = *data.downcast().unwrap(); - (::from(id), command_buffer_data) - }); - let data = Context::queue_submit(self, &queue, queue_data, command_buffers); + let command_buffers = command_buffers.map(|data| *data.downcast().unwrap()); + let data = Context::queue_submit(self, queue_data, command_buffers); Arc::new(data) as _ } - fn queue_get_timestamp_period(&self, queue: &ObjectId, queue_data: &crate::Data) -> f32 { - let queue = ::from(*queue); + fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32 { let queue_data = downcast_ref(queue_data); - Context::queue_get_timestamp_period(self, &queue, queue_data) + Context::queue_get_timestamp_period(self, queue_data) } fn queue_on_submitted_work_done( &self, - queue: &ObjectId, queue_data: &crate::Data, callback: SubmittedWorkDoneCallback, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_on_submitted_work_done(self, &queue, queue_data, callback) + Context::queue_on_submitted_work_done(self, queue_data, callback) } - fn device_start_capture(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_start_capture(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_start_capture(self, &device, device_data) + Context::device_start_capture(self, device_data) } - fn device_stop_capture(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_stop_capture(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_stop_capture(self, &device, device_data) + Context::device_stop_capture(self, device_data) } - fn device_get_internal_counters( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> wgt::InternalCounters { - let device = ::from(*device); + fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters { let device_data = downcast_ref(device_data); - Context::device_get_internal_counters(self, &device, device_data) + Context::device_get_internal_counters(self, device_data) } - fn generate_allocator_report( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Option { - let device = ::from(*device); + fn generate_allocator_report(&self, device_data: &crate::Data) -> Option { let device_data = downcast_ref(device_data); - Context::device_generate_allocator_report(self, &device, device_data) + Context::device_generate_allocator_report(self, device_data) } - fn pipeline_cache_get_data( - &self, - cache: &ObjectId, - cache_data: &crate::Data, - ) -> Option> { - let cache = ::from(*cache); + fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option> { let cache_data = downcast_ref::(cache_data); - Context::pipeline_cache_get_data(self, &cache, cache_data) + Context::pipeline_cache_get_data(self, cache_data) } - fn compute_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ) { - let mut pass = ::from(*pass); + fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { let pass_data = downcast_mut::(pass_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pass_set_pipeline(self, &mut pass, pass_data, &pipeline, pipeline_data) + Context::compute_pass_set_pipeline(self, pass_data, pipeline_data) } fn compute_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let bind_group = ::from(*bind_group); let bind_group_data = downcast_ref(bind_group_data); - Context::compute_pass_set_bind_group( - self, - &mut pass, - pass_data, - index, - &bind_group, - bind_group_data, - offsets, - ) + Context::compute_pass_set_bind_group(self, pass_data, index, bind_group_data, offsets) } fn compute_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, offset: u32, data: &[u8], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::compute_pass_set_push_constants(self, &mut pass, pass_data, offset, data) + Context::compute_pass_set_push_constants(self, pass_data, offset, data) } - fn compute_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ) { - let mut pass = ::from(*pass); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_insert_debug_marker(self, &mut pass, pass_data, label) + Context::compute_pass_insert_debug_marker(self, pass_data, label) } - fn compute_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ) { - let mut pass = ::from(*pass); + fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_push_debug_group(self, &mut pass, pass_data, group_label) + Context::compute_pass_push_debug_group(self, pass_data, group_label) } - fn compute_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_pop_debug_group(self, &mut pass, pass_data) + Context::compute_pass_pop_debug_group(self, pass_data) } fn compute_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::compute_pass_write_timestamp( - self, - &mut pass, - pass_data, - &query_set, - query_set_data, - query_index, - ) + Context::compute_pass_write_timestamp(self, pass_data, query_set_data, query_index) } fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); Context::compute_pass_begin_pipeline_statistics_query( self, - &mut pass, pass_data, - &query_set, query_set_data, query_index, ) } - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ) { - let mut pass = ::from(*pass); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_end_pipeline_statistics_query(self, &mut pass, pass_data) + Context::compute_pass_end_pipeline_statistics_query(self, pass_data) } fn compute_pass_dispatch_workgroups( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, z: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::compute_pass_dispatch_workgroups(self, &mut pass, pass_data, x, y, z) + Context::compute_pass_dispatch_workgroups(self, pass_data, x, y, z) } fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::compute_pass_dispatch_workgroups_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) } - fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn compute_pass_end(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut(pass_data); - Context::compute_pass_end(self, &mut pass, pass_data) + Context::compute_pass_end(self, pass_data) } fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - pipeline: &ObjectId, pipeline_data: &crate::Data, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::render_bundle_encoder_set_pipeline( - self, - &mut encoder, - encoder_data, - &pipeline, - pipeline_data, - ) + Context::render_bundle_encoder_set_pipeline(self, encoder_data, pipeline_data) } fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let bind_group = ::from(*bind_group); let bind_group_data = downcast_ref(bind_group_data); Context::render_bundle_encoder_set_bind_group( self, - &mut encoder, encoder_data, index, - &bind_group, bind_group_data, offsets, ) @@ -3346,23 +2366,17 @@ where fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, size: Option, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_bundle_encoder_set_index_buffer( self, - &mut encoder, encoder_data, - &buffer, buffer_data, index_format, offset, @@ -3372,24 +2386,18 @@ where fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_bundle_encoder_set_vertex_buffer( self, - &mut encoder, encoder_data, slot, - &buffer, buffer_data, offset, size, @@ -3398,49 +2406,35 @@ where fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, stages: ShaderStages, offset: u32, data: &[u8], ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_set_push_constants( - self, - &mut encoder, - encoder_data, - stages, - offset, - data, - ) + Context::render_bundle_encoder_set_push_constants(self, encoder_data, stages, offset, data) } fn render_bundle_encoder_draw( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, vertices: Range, instances: Range, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_draw(self, &mut encoder, encoder_data, vertices, instances) + Context::render_bundle_encoder_draw(self, encoder_data, vertices, instances) } fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, indices: Range, base_vertex: i32, instances: Range, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); Context::render_bundle_encoder_draw_indexed( self, - &mut encoder, encoder_data, indices, base_vertex, @@ -3450,21 +2444,15 @@ where fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_draw_indirect( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) @@ -3472,21 +2460,15 @@ where fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_draw_indexed_indirect( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) @@ -3494,22 +2476,16 @@ where fn render_bundle_encoder_multi_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_multi_draw_indirect( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3518,22 +2494,16 @@ where fn render_bundle_encoder_multi_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_multi_draw_indexed_indirect( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3542,30 +2512,21 @@ where fn render_bundle_encoder_multi_draw_indirect_count( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_bundle_encoder_multi_draw_indirect_count( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, @@ -3574,93 +2535,58 @@ where fn render_bundle_encoder_multi_draw_indexed_indirect_count( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_bundle_encoder_multi_draw_indexed_indirect_count( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, ) } - fn render_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { let pass_data = downcast_mut::(pass_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::render_pass_set_pipeline(self, &mut pass, pass_data, &pipeline, pipeline_data) + Context::render_pass_set_pipeline(self, pass_data, pipeline_data) } fn render_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, bind_group_data: &crate::Data, offsets: &[DynamicOffset], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let bind_group = ::from(*bind_group); let bind_group_data = downcast_ref(bind_group_data); - Context::render_pass_set_bind_group( - self, - &mut pass, - pass_data, - index, - &bind_group, - bind_group_data, - offsets, - ) + Context::render_pass_set_bind_group(self, pass_data, index, bind_group_data, offsets) } fn render_pass_set_index_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, size: Option, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_pass_set_index_buffer( self, - &mut pass, pass_data, - &buffer, buffer_data, index_format, offset, @@ -3670,114 +2596,71 @@ where fn render_pass_set_vertex_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::render_pass_set_vertex_buffer( - self, - &mut pass, - pass_data, - slot, - &buffer, - buffer_data, - offset, - size, - ) + Context::render_pass_set_vertex_buffer(self, pass_data, slot, buffer_data, offset, size) } fn render_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, stages: ShaderStages, offset: u32, data: &[u8], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_push_constants(self, &mut pass, pass_data, stages, offset, data) + Context::render_pass_set_push_constants(self, pass_data, stages, offset, data) } fn render_pass_draw( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, vertices: Range, instances: Range, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw(self, &mut pass, pass_data, vertices, instances) + Context::render_pass_draw(self, pass_data, vertices, instances) } fn render_pass_draw_indexed( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, indices: Range, base_vertex: i32, instances: Range, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw_indexed( - self, - &mut pass, - pass_data, - indices, - base_vertex, - instances, - ) + Context::render_pass_draw_indexed(self, pass_data, indices, base_vertex, instances) } fn render_pass_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_draw_indirect( - self, - &mut pass, - pass_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - ) + Context::render_pass_draw_indirect(self, pass_data, indirect_buffer_data, indirect_offset) } fn render_pass_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_draw_indexed_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) @@ -3785,22 +2668,16 @@ where fn render_pass_multi_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_multi_draw_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3809,22 +2686,16 @@ where fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_multi_draw_indexed_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3833,30 +2704,21 @@ where fn render_pass_multi_draw_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_pass_multi_draw_indirect_count( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, @@ -3865,64 +2727,46 @@ where fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_pass_multi_draw_indexed_indirect_count( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, ) } - fn render_pass_set_blend_constant( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - color: Color, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_blend_constant(self, &mut pass, pass_data, color) + Context::render_pass_set_blend_constant(self, pass_data, color) } fn render_pass_set_scissor_rect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, width: u32, height: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_scissor_rect(self, &mut pass, pass_data, x, y, width, height) + Context::render_pass_set_scissor_rect(self, pass_data, x, y, width, height) } fn render_pass_set_viewport( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: f32, y: f32, @@ -3931,142 +2775,87 @@ where min_depth: f32, max_depth: f32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); Context::render_pass_set_viewport( - self, &mut pass, pass_data, x, y, width, height, min_depth, max_depth, + self, pass_data, x, y, width, height, min_depth, max_depth, ) } - fn render_pass_set_stencil_reference( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - reference: u32, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_stencil_reference(self, &mut pass, pass_data, reference) + Context::render_pass_set_stencil_reference(self, pass_data, reference) } - fn render_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ) { - let mut pass = ::from(*pass); + fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_insert_debug_marker(self, &mut pass, pass_data, label) + Context::render_pass_insert_debug_marker(self, pass_data, label) } - fn render_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ) { - let mut pass = ::from(*pass); + fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_push_debug_group(self, &mut pass, pass_data, group_label) + Context::render_pass_push_debug_group(self, pass_data, group_label) } - fn render_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_pop_debug_group(self, &mut pass, pass_data) + Context::render_pass_pop_debug_group(self, pass_data) } fn render_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::render_pass_write_timestamp( - self, - &mut pass, - pass_data, - &query_set, - query_set_data, - query_index, - ) + Context::render_pass_write_timestamp(self, pass_data, query_set_data, query_index) } - fn render_pass_begin_occlusion_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - query_index: u32, - ) { - let mut pass = ::from(*pass); + fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_begin_occlusion_query(self, &mut pass, pass_data, query_index) + Context::render_pass_begin_occlusion_query(self, pass_data, query_index) } - fn render_pass_end_occlusion_query(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_occlusion_query(self, &mut pass, pass_data) + Context::render_pass_end_occlusion_query(self, pass_data) } fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); Context::render_pass_begin_pipeline_statistics_query( self, - &mut pass, pass_data, - &query_set, query_set_data, query_index, ) } - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ) { - let mut pass = ::from(*pass); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_pipeline_statistics_query(self, &mut pass, pass_data) + Context::render_pass_end_pipeline_statistics_query(self, pass_data) } fn render_pass_execute_bundles( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let mut render_bundles = render_bundles.map(|(id, data)| { - let render_bundle_data: &::RenderBundleData = downcast_ref(data); - (::from(*id), render_bundle_data) - }); - Context::render_pass_execute_bundles(self, &mut pass, pass_data, &mut render_bundles) + let mut render_bundles = render_bundles.map(downcast_ref); + Context::render_pass_execute_bundles(self, pass_data, &mut render_bundles) } - fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_end(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut(pass_data); - Context::render_pass_end(self, &mut pass, pass_data) + Context::render_pass_end(self, pass_data) } } diff --git a/wgpu/src/util/mod.rs b/wgpu/src/util/mod.rs index ff4fb7ecf..11148179b 100644 --- a/wgpu/src/util/mod.rs +++ b/wgpu/src/util/mod.rs @@ -125,7 +125,6 @@ impl DownloadBuffer { let mapped_range = crate::context::DynContext::buffer_get_mapped_range( &*download.context, - &download.id, download.data.as_ref(), 0..size, );