Remove IDs from wgpu traits (#6134)

Remove `wgpu`'s `.global_id()` getters.

Implement `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord` for wgpu resources.
This commit is contained in:
Teodor Tanasoaia 2024-08-27 12:00:19 +02:00 committed by GitHub
parent c7e5d07dee
commit 338678ad5f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1268 additions and 3587 deletions

View File

@ -55,6 +55,16 @@ which we're hoping to build performance improvements upon in the future.
By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https://github.com/gfx-rs/wgpu/pull/6099), [#6100](https://github.com/gfx-rs/wgpu/pull/6100). By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https://github.com/gfx-rs/wgpu/pull/6099), [#6100](https://github.com/gfx-rs/wgpu/pull/6100).
#### `wgpu`'s resources no longer have `.global_id()` getters
`wgpu-core`'s internals no longer use nor need IDs and we are moving towards removing IDs
completely. This is a step in that direction.
Current users of `.global_id()` are encouraged to make use of the `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord`
traits that have now been implemented for `wgpu` resources.
By @teoxoy [#6134](https://github.com/gfx-rs/wgpu/pull/6134).
### New Features ### New Features
#### Naga #### Naga

View File

@ -31,129 +31,77 @@ static BIND_GROUP_LAYOUT_DEDUPLICATION: GpuTestConfiguration = GpuTestConfigurat
.run_async(bgl_dedupe); .run_async(bgl_dedupe);
async fn bgl_dedupe(ctx: TestingContext) { async fn bgl_dedupe(ctx: TestingContext) {
let entries_1 = &[]; let entries = &[];
let entries_2 = &[ENTRY]; let bgl_1a = ctx
.device
// Block so we can force all resource to die. .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
{
let bgl_1a = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
});
let bgl_2 = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_2,
});
let bgl_1b = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
});
let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None, label: None,
layout: &bgl_1a, entries,
entries: &[],
}); });
let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { let bgl_1b = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None, label: None,
layout: &bgl_1b, entries,
entries: &[],
}); });
let pipeline_layout = ctx let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
.device label: None,
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { layout: &bgl_1a,
label: None, entries: &[],
bind_group_layouts: &[&bgl_1b], });
push_constant_ranges: &[],
});
let module = ctx let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
.device label: None,
.create_shader_module(wgpu::ShaderModuleDescriptor { layout: &bgl_1b,
label: None, entries: &[],
source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), });
});
let desc = wgpu::ComputePipelineDescriptor { let pipeline_layout = ctx
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None, label: None,
layout: Some(&pipeline_layout), bind_group_layouts: &[&bgl_1b],
module: &module, push_constant_ranges: &[],
entry_point: Some("no_resources"),
compilation_options: Default::default(),
cache: None,
};
let pipeline = ctx.device.create_compute_pipeline(&desc);
let mut encoder = ctx.device.create_command_encoder(&Default::default());
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: None,
timestamp_writes: None,
}); });
pass.set_bind_group(0, &bg_1b, &[]); let module = ctx
pass.set_pipeline(&pipeline); .device
pass.dispatch_workgroups(1, 1, 1); .create_shader_module(wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()),
});
pass.set_bind_group(0, &bg_1a, &[]); let desc = wgpu::ComputePipelineDescriptor {
pass.dispatch_workgroups(1, 1, 1); label: None,
layout: Some(&pipeline_layout),
module: &module,
entry_point: Some("no_resources"),
compilation_options: Default::default(),
cache: None,
};
drop(pass); let pipeline = ctx.device.create_compute_pipeline(&desc);
ctx.queue.submit(Some(encoder.finish())); let mut encoder = ctx.device.create_command_encoder(&Default::default());
// Abuse the fact that global_id is really just the bitpacked ids when targeting wgpu-core. let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { label: None,
let bgl_1a_idx = bgl_1a.global_id().inner() & 0xFFFF_FFFF; timestamp_writes: None,
assert_eq!(bgl_1a_idx, 0); });
let bgl_2_idx = bgl_2.global_id().inner() & 0xFFFF_FFFF;
assert_eq!(bgl_2_idx, 1);
let bgl_1b_idx = bgl_1b.global_id().inner() & 0xFFFF_FFFF;
assert_eq!(bgl_1b_idx, 2);
}
}
ctx.async_poll(wgpu::Maintain::wait()) pass.set_bind_group(0, &bg_1b, &[]);
.await pass.set_pipeline(&pipeline);
.panic_on_timeout(); pass.dispatch_workgroups(1, 1, 1);
if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { pass.set_bind_group(0, &bg_1a, &[]);
// Indices are made reusable as soon as the handle is dropped so we keep them around pass.dispatch_workgroups(1, 1, 1);
// for the duration of the loop.
let mut bgls = Vec::new();
let mut indices = Vec::new();
// Now all of the BGL ids should be dead, so we should get the same ids again.
for _ in 0..=2 {
let test_bgl = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
});
let test_bgl_idx = test_bgl.global_id().inner() & 0xFFFF_FFFF; drop(pass);
bgls.push(test_bgl);
indices.push(test_bgl_idx); ctx.queue.submit(Some(encoder.finish()));
}
// We don't guarantee that the IDs will appear in the same order. Sort them
// and check that they all appear exactly once.
indices.sort();
for (i, index) in indices.iter().enumerate() {
assert_eq!(*index, i as u64);
}
}
} }
#[gpu_test] #[gpu_test]

View File

@ -1,6 +1,6 @@
use std::{future::Future, sync::Arc, thread}; use std::{future::Future, sync::Arc, thread};
use crate::context::{DeviceRequest, DynContext, ObjectId}; use crate::context::{DeviceRequest, DynContext};
use crate::*; use crate::*;
/// Handle to a physical graphics and/or compute device. /// Handle to a physical graphics and/or compute device.
@ -14,7 +14,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Adapter { pub struct Adapter {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
@ -23,7 +22,7 @@ static_assertions::assert_impl_all!(Adapter: Send, Sync);
impl Drop for Adapter { impl Drop for Adapter {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.adapter_drop(&self.id, self.data.as_ref()) self.context.adapter_drop(self.data.as_ref())
} }
} }
} }
@ -40,14 +39,6 @@ pub type RequestAdapterOptions<'a, 'b> = RequestAdapterOptionsBase<&'a Surface<'
static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync); static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync);
impl Adapter { impl Adapter {
/// Returns a globally-unique identifier for this `Adapter`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Requests a connection to a physical device, creating a logical device. /// Requests a connection to a physical device, creating a logical device.
/// ///
/// Returns the [`Device`] together with a [`Queue`] that executes command buffers. /// Returns the [`Device`] together with a [`Queue`] that executes command buffers.
@ -80,7 +71,6 @@ impl Adapter {
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
let device = DynContext::adapter_request_device( let device = DynContext::adapter_request_device(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
desc, desc,
trace_path, trace_path,
@ -88,20 +78,16 @@ impl Adapter {
async move { async move {
device.await.map( device.await.map(
|DeviceRequest { |DeviceRequest {
device_id,
device_data, device_data,
queue_id,
queue_data, queue_data,
}| { }| {
( (
Device { Device {
context: Arc::clone(&context), context: Arc::clone(&context),
id: device_id,
data: device_data, data: device_data,
}, },
Queue { Queue {
context, context,
id: queue_id,
data: queue_data, data: queue_data,
}, },
) )
@ -131,18 +117,21 @@ impl Adapter {
// Part of the safety requirements is that the device was generated from the same adapter. // Part of the safety requirements is that the device was generated from the same adapter.
// Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices. // Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices.
.unwrap() .unwrap()
.create_device_from_hal(&self.id.into(), hal_device, desc, trace_path) .create_device_from_hal(
crate::context::downcast_ref(&self.data),
hal_device,
desc,
trace_path,
)
} }
.map(|(device, queue)| { .map(|(device, queue)| {
( (
Device { Device {
context: Arc::clone(&context), context: Arc::clone(&context),
id: device.id().into(),
data: Box::new(device), data: Box::new(device),
}, },
Queue { Queue {
context, context,
id: queue.id().into(),
data: Box::new(queue), data: Box::new(queue),
}, },
) )
@ -178,7 +167,12 @@ impl Adapter {
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
{ {
unsafe { ctx.adapter_as_hal::<A, F, R>(self.id.into(), hal_adapter_callback) } unsafe {
ctx.adapter_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_adapter_callback,
)
}
} else { } else {
hal_adapter_callback(None) hal_adapter_callback(None)
} }
@ -188,31 +182,29 @@ impl Adapter {
pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool { pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool {
DynContext::adapter_is_surface_supported( DynContext::adapter_is_surface_supported(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
&surface.id,
surface.surface_data.as_ref(), surface.surface_data.as_ref(),
) )
} }
/// The features which can be used to create devices on this adapter. /// The features which can be used to create devices on this adapter.
pub fn features(&self) -> Features { pub fn features(&self) -> Features {
DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref()) DynContext::adapter_features(&*self.context, self.data.as_ref())
} }
/// The best limits which can be used to create devices on this adapter. /// The best limits which can be used to create devices on this adapter.
pub fn limits(&self) -> Limits { pub fn limits(&self) -> Limits {
DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref()) DynContext::adapter_limits(&*self.context, self.data.as_ref())
} }
/// Get info about the adapter itself. /// Get info about the adapter itself.
pub fn get_info(&self) -> AdapterInfo { pub fn get_info(&self) -> AdapterInfo {
DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref()) DynContext::adapter_get_info(&*self.context, self.data.as_ref())
} }
/// Get info about the adapter itself. /// Get info about the adapter itself.
pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities { pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref()) DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref())
} }
/// Returns the features supported for a given texture format by this adapter. /// Returns the features supported for a given texture format by this adapter.
@ -220,12 +212,7 @@ impl Adapter {
/// Note that the WebGPU spec further restricts the available usages/features. /// Note that the WebGPU spec further restricts the available usages/features.
/// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature. /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature.
pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures { pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
DynContext::adapter_get_texture_format_features( DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format)
&*self.context,
&self.id,
self.data.as_ref(),
format,
)
} }
/// Generates a timestamp using the clock used by the presentation engine. /// Generates a timestamp using the clock used by the presentation engine.
@ -250,6 +237,6 @@ impl Adapter {
// //
/// [Instant]: std::time::Instant /// [Instant]: std::time::Instant
pub fn get_presentation_timestamp(&self) -> PresentationTimestamp { pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref()) DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref())
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a binding group. /// Handle to a binding group.
@ -14,26 +13,17 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct BindGroup { pub struct BindGroup {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroup: Send, Sync); static_assertions::assert_impl_all!(BindGroup: Send, Sync);
impl BindGroup { super::impl_partialeq_eq_hash!(BindGroup);
/// Returns a globally-unique identifier for this `BindGroup`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for BindGroup { impl Drop for BindGroup {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.bind_group_drop(&self.id, self.data.as_ref()); self.context.bind_group_drop(self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a binding group layout. /// Handle to a binding group layout.
@ -17,27 +16,17 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct BindGroupLayout { pub struct BindGroupLayout {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync); static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
impl BindGroupLayout { super::impl_partialeq_eq_hash!(BindGroupLayout);
/// Returns a globally-unique identifier for this `BindGroupLayout`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for BindGroupLayout { impl Drop for BindGroupLayout {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.bind_group_layout_drop(self.data.as_ref());
.bind_group_layout_drop(&self.id, self.data.as_ref());
} }
} }
} }

View File

@ -7,7 +7,7 @@ use std::{
use parking_lot::Mutex; use parking_lot::Mutex;
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Handle to a GPU-accessible buffer. /// Handle to a GPU-accessible buffer.
@ -173,7 +173,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Buffer { pub struct Buffer {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
pub(crate) map_context: Mutex<MapContext>, pub(crate) map_context: Mutex<MapContext>,
pub(crate) size: wgt::BufferAddress, pub(crate) size: wgt::BufferAddress,
@ -183,15 +182,9 @@ pub struct Buffer {
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(Buffer: Send, Sync); static_assertions::assert_impl_all!(Buffer: Send, Sync);
impl Buffer { super::impl_partialeq_eq_hash!(Buffer);
/// Returns a globally-unique identifier for this `Buffer`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
impl Buffer {
/// Return the binding view of the entire buffer. /// Return the binding view of the entire buffer.
pub fn as_entire_binding(&self) -> BindingResource<'_> { pub fn as_entire_binding(&self) -> BindingResource<'_> {
BindingResource::Buffer(self.as_entire_buffer_binding()) BindingResource::Buffer(self.as_entire_buffer_binding())
@ -217,14 +210,17 @@ impl Buffer {
&self, &self,
hal_buffer_callback: F, hal_buffer_callback: F,
) -> R { ) -> R {
let id = self.id;
if let Some(ctx) = self if let Some(ctx) = self
.context .context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
{ {
unsafe { ctx.buffer_as_hal::<A, F, R>(id.into(), hal_buffer_callback) } unsafe {
ctx.buffer_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_buffer_callback,
)
}
} else { } else {
hal_buffer_callback(None) hal_buffer_callback(None)
} }
@ -256,12 +252,12 @@ impl Buffer {
/// Flushes any pending write operations and unmaps the buffer from host memory. /// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) { pub fn unmap(&self) {
self.map_context.lock().reset(); self.map_context.lock().reset();
DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref()); DynContext::buffer_unmap(&*self.context, self.data.as_ref());
} }
/// Destroy the associated native resources as soon as possible. /// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) { pub fn destroy(&self) {
DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref()); DynContext::buffer_destroy(&*self.context, self.data.as_ref());
} }
/// Returns the length of the buffer allocation in bytes. /// Returns the length of the buffer allocation in bytes.
@ -343,12 +339,7 @@ impl<'a> BufferSlice<'a> {
callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static, callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
) { ) {
let mut mc = self.buffer.map_context.lock(); let mut mc = self.buffer.map_context.lock();
assert_eq!( assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
mc.initial_range,
0..0,
"Buffer {:?} is already mapped",
self.buffer.id
);
let end = match self.size { let end = match self.size {
Some(s) => self.offset + s.get(), Some(s) => self.offset + s.get(),
None => mc.total_size, None => mc.total_size,
@ -357,7 +348,6 @@ impl<'a> BufferSlice<'a> {
DynContext::buffer_map_async( DynContext::buffer_map_async(
&*self.buffer.context, &*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(), self.buffer.data.as_ref(),
mode, mode,
self.offset..end, self.offset..end,
@ -383,7 +373,6 @@ impl<'a> BufferSlice<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size); let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range( let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context, &*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(), self.buffer.data.as_ref(),
self.offset..end, self.offset..end,
); );
@ -429,7 +418,6 @@ impl<'a> BufferSlice<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size); let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range( let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context, &*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(), self.buffer.data.as_ref(),
self.offset..end, self.offset..end,
); );
@ -680,7 +668,7 @@ impl Drop for BufferViewMut<'_> {
impl Drop for Buffer { impl Drop for Buffer {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.buffer_drop(&self.id, self.data.as_ref()); self.context.buffer_drop(self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a command buffer on the GPU. /// Handle to a command buffer on the GPU.
@ -13,7 +12,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct CommandBuffer { pub struct CommandBuffer {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: Option<ObjectId>,
pub(crate) data: Option<Box<Data>>, pub(crate) data: Option<Box<Data>>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
@ -22,9 +20,8 @@ static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
impl Drop for CommandBuffer { impl Drop for CommandBuffer {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
if let Some(id) = self.id.take() { if let Some(data) = self.data.take() {
self.context self.context.command_buffer_drop(data.as_ref());
.command_buffer_drop(&id, self.data.take().unwrap().as_ref());
} }
} }
} }

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Encodes a series of GPU operations. /// Encodes a series of GPU operations.
@ -15,7 +15,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct CommandEncoder { pub struct CommandEncoder {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: Option<ObjectId>,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
@ -24,9 +23,7 @@ static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
impl Drop for CommandEncoder { impl Drop for CommandEncoder {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
if let Some(id) = self.id.take() { self.context.command_encoder_drop(self.data.as_ref());
self.context.command_encoder_drop(&id, self.data.as_ref());
}
} }
} }
} }
@ -71,14 +68,9 @@ static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync);
impl CommandEncoder { impl CommandEncoder {
/// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution. /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution.
pub fn finish(mut self) -> CommandBuffer { pub fn finish(mut self) -> CommandBuffer {
let (id, data) = DynContext::command_encoder_finish( let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut());
&*self.context,
self.id.take().unwrap(),
self.data.as_mut(),
);
CommandBuffer { CommandBuffer {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id: Some(id),
data: Some(data), data: Some(data),
} }
} }
@ -97,16 +89,10 @@ impl CommandEncoder {
&'encoder mut self, &'encoder mut self,
desc: &RenderPassDescriptor<'_>, desc: &RenderPassDescriptor<'_>,
) -> RenderPass<'encoder> { ) -> RenderPass<'encoder> {
let id = self.id.as_ref().unwrap(); let data =
let (id, data) = DynContext::command_encoder_begin_render_pass( DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc);
&*self.context,
id,
self.data.as_ref(),
desc,
);
RenderPass { RenderPass {
inner: RenderPassInner { inner: RenderPassInner {
id,
data, data,
context: self.context.clone(), context: self.context.clone(),
}, },
@ -128,16 +114,13 @@ impl CommandEncoder {
&'encoder mut self, &'encoder mut self,
desc: &ComputePassDescriptor<'_>, desc: &ComputePassDescriptor<'_>,
) -> ComputePass<'encoder> { ) -> ComputePass<'encoder> {
let id = self.id.as_ref().unwrap(); let data = DynContext::command_encoder_begin_compute_pass(
let (id, data) = DynContext::command_encoder_begin_compute_pass(
&*self.context, &*self.context,
id,
self.data.as_ref(), self.data.as_ref(),
desc, desc,
); );
ComputePass { ComputePass {
inner: ComputePassInner { inner: ComputePassInner {
id,
data, data,
context: self.context.clone(), context: self.context.clone(),
}, },
@ -162,12 +145,9 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_copy_buffer_to_buffer( DynContext::command_encoder_copy_buffer_to_buffer(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
&source.id,
source.data.as_ref(), source.data.as_ref(),
source_offset, source_offset,
&destination.id,
destination.data.as_ref(), destination.data.as_ref(),
destination_offset, destination_offset,
copy_size, copy_size,
@ -183,7 +163,6 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_copy_buffer_to_texture( DynContext::command_encoder_copy_buffer_to_texture(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
source, source,
destination, destination,
@ -200,7 +179,6 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_copy_texture_to_buffer( DynContext::command_encoder_copy_texture_to_buffer(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
source, source,
destination, destination,
@ -223,7 +201,6 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_copy_texture_to_texture( DynContext::command_encoder_copy_texture_to_texture(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
source, source,
destination, destination,
@ -247,9 +224,8 @@ impl CommandEncoder {
pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) { pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
DynContext::command_encoder_clear_texture( DynContext::command_encoder_clear_texture(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
texture, texture.data.as_ref(),
subresource_range, subresource_range,
); );
} }
@ -268,9 +244,8 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_clear_buffer( DynContext::command_encoder_clear_buffer(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
buffer, buffer.data.as_ref(),
offset, offset,
size, size,
); );
@ -278,25 +253,17 @@ impl CommandEncoder {
/// Inserts debug marker. /// Inserts debug marker.
pub fn insert_debug_marker(&mut self, label: &str) { pub fn insert_debug_marker(&mut self, label: &str) {
let id = self.id.as_ref().unwrap(); DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label);
DynContext::command_encoder_insert_debug_marker(
&*self.context,
id,
self.data.as_ref(),
label,
);
} }
/// Start record commands and group it into debug marker group. /// Start record commands and group it into debug marker group.
pub fn push_debug_group(&mut self, label: &str) { pub fn push_debug_group(&mut self, label: &str) {
let id = self.id.as_ref().unwrap(); DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label);
DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label);
} }
/// Stops command recording and creates debug group. /// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) { pub fn pop_debug_group(&mut self) {
let id = self.id.as_ref().unwrap(); DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref());
DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref());
} }
/// Resolves a query set, writing the results into the supplied destination buffer. /// Resolves a query set, writing the results into the supplied destination buffer.
@ -312,13 +279,10 @@ impl CommandEncoder {
) { ) {
DynContext::command_encoder_resolve_query_set( DynContext::command_encoder_resolve_query_set(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(), self.data.as_ref(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_range.start, query_range.start,
query_range.end - query_range.start, query_range.end - query_range.start,
&destination.id,
destination.data.as_ref(), destination.data.as_ref(),
destination_offset, destination_offset,
) )
@ -341,14 +305,12 @@ impl CommandEncoder {
&mut self, &mut self,
hal_command_encoder_callback: F, hal_command_encoder_callback: F,
) -> Option<R> { ) -> Option<R> {
use wgc::id::CommandEncoderId;
self.context self.context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe { .map(|ctx| unsafe {
ctx.command_encoder_as_hal_mut::<A, F, R>( ctx.command_encoder_as_hal_mut::<A, F, R>(
CommandEncoderId::from(self.id.unwrap()), crate::context::downcast_ref(&self.data),
hal_command_encoder_callback, hal_command_encoder_callback,
) )
}) })
@ -372,9 +334,7 @@ impl CommandEncoder {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::command_encoder_write_timestamp( DynContext::command_encoder_write_timestamp(
&*self.context, &*self.context,
self.id.as_ref().unwrap(),
self.data.as_mut(), self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_index, query_index,
) )

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, sync::Arc, thread}; use std::{marker::PhantomData, sync::Arc, thread};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// In-progress recording of a compute pass. /// In-progress recording of a compute pass.
@ -53,10 +53,8 @@ impl<'encoder> ComputePass<'encoder> {
) { ) {
DynContext::compute_pass_set_bind_group( DynContext::compute_pass_set_bind_group(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
index, index,
&bind_group.id,
bind_group.data.as_ref(), bind_group.data.as_ref(),
offsets, offsets,
); );
@ -66,9 +64,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) { pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
DynContext::compute_pass_set_pipeline( DynContext::compute_pass_set_pipeline(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(), pipeline.data.as_ref(),
); );
} }
@ -77,7 +73,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn insert_debug_marker(&mut self, label: &str) { pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::compute_pass_insert_debug_marker( DynContext::compute_pass_insert_debug_marker(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
label, label,
); );
@ -87,7 +82,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn push_debug_group(&mut self, label: &str) { pub fn push_debug_group(&mut self, label: &str) {
DynContext::compute_pass_push_debug_group( DynContext::compute_pass_push_debug_group(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
label, label,
); );
@ -95,11 +89,7 @@ impl<'encoder> ComputePass<'encoder> {
/// Stops command recording and creates debug group. /// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) { pub fn pop_debug_group(&mut self) {
DynContext::compute_pass_pop_debug_group( DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
} }
/// Dispatches compute work operations. /// Dispatches compute work operations.
@ -108,7 +98,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
DynContext::compute_pass_dispatch_workgroups( DynContext::compute_pass_dispatch_workgroups(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
x, x,
y, y,
@ -126,9 +115,7 @@ impl<'encoder> ComputePass<'encoder> {
) { ) {
DynContext::compute_pass_dispatch_workgroups_indirect( DynContext::compute_pass_dispatch_workgroups_indirect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
); );
@ -148,7 +135,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) { pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
DynContext::compute_pass_set_push_constants( DynContext::compute_pass_set_push_constants(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
offset, offset,
data, data,
@ -167,9 +153,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_write_timestamp( DynContext::compute_pass_write_timestamp(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_index, query_index,
) )
@ -183,9 +167,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_begin_pipeline_statistics_query( DynContext::compute_pass_begin_pipeline_statistics_query(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_index, query_index,
); );
@ -196,7 +178,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn end_pipeline_statistics_query(&mut self) { pub fn end_pipeline_statistics_query(&mut self) {
DynContext::compute_pass_end_pipeline_statistics_query( DynContext::compute_pass_end_pipeline_statistics_query(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
); );
} }
@ -204,7 +185,6 @@ impl<'encoder> ComputePass<'encoder> {
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct ComputePassInner { pub(crate) struct ComputePassInner {
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
} }
@ -212,8 +192,7 @@ pub(crate) struct ComputePassInner {
impl Drop for ComputePassInner { impl Drop for ComputePassInner {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.compute_pass_end(self.data.as_mut());
.compute_pass_end(&mut self.id, self.data.as_mut());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a compute pipeline. /// Handle to a compute pipeline.
@ -12,38 +11,28 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct ComputePipeline { pub struct ComputePipeline {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(ComputePipeline: Send, Sync); static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
impl ComputePipeline { super::impl_partialeq_eq_hash!(ComputePipeline);
/// Returns a globally-unique identifier for this `ComputePipeline`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
impl ComputePipeline {
/// Get an object representing the bind group layout at a given index. /// Get an object representing the bind group layout at a given index.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
let (id, data) = self.context.compute_pipeline_get_bind_group_layout( let data = self
&self.id, .context
self.data.as_ref(), .compute_pipeline_get_bind_group_layout(self.data.as_ref(), index);
index, BindGroupLayout { context, data }
);
BindGroupLayout { context, id, data }
} }
} }
impl Drop for ComputePipeline { impl Drop for ComputePipeline {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.compute_pipeline_drop(self.data.as_ref());
.compute_pipeline_drop(&self.id, self.data.as_ref());
} }
} }
} }

View File

@ -2,7 +2,7 @@ use std::{error, fmt, future::Future, sync::Arc, thread};
use parking_lot::Mutex; use parking_lot::Mutex;
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Open connection to a graphics and/or compute device. /// Open connection to a graphics and/or compute device.
@ -16,7 +16,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Device { pub struct Device {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
@ -32,14 +31,6 @@ pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync); static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
impl Device { impl Device {
/// Returns a globally-unique identifier for this `Device`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed. /// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed.
/// ///
/// Return `true` if the queue is empty, or `false` if there are more queue /// Return `true` if the queue is empty, or `false` if there are more queue
@ -50,7 +41,7 @@ impl Device {
/// ///
/// When running on WebGPU, this is a no-op. `Device`s are automatically polled. /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
pub fn poll(&self, maintain: Maintain) -> MaintainResult { pub fn poll(&self, maintain: Maintain) -> MaintainResult {
DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain) DynContext::device_poll(&*self.context, self.data.as_ref(), maintain)
} }
/// The features which can be used on this device. /// The features which can be used on this device.
@ -58,7 +49,7 @@ impl Device {
/// No additional features can be used, even if the underlying adapter can support them. /// No additional features can be used, even if the underlying adapter can support them.
#[must_use] #[must_use]
pub fn features(&self) -> Features { pub fn features(&self) -> Features {
DynContext::device_features(&*self.context, &self.id, self.data.as_ref()) DynContext::device_features(&*self.context, self.data.as_ref())
} }
/// The limits which can be used on this device. /// The limits which can be used on this device.
@ -66,7 +57,7 @@ impl Device {
/// No better limits can be used, even if the underlying adapter can support them. /// No better limits can be used, even if the underlying adapter can support them.
#[must_use] #[must_use]
pub fn limits(&self) -> Limits { pub fn limits(&self) -> Limits {
DynContext::device_limits(&*self.context, &self.id, self.data.as_ref()) DynContext::device_limits(&*self.context, self.data.as_ref())
} }
/// Creates a shader module from either SPIR-V or WGSL source code. /// Creates a shader module from either SPIR-V or WGSL source code.
@ -85,16 +76,14 @@ impl Device {
/// </div> /// </div>
#[must_use] #[must_use]
pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule { pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module( let data = DynContext::device_create_shader_module(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
desc, desc,
wgt::ShaderBoundChecks::new(), wgt::ShaderBoundChecks::new(),
); );
ShaderModule { ShaderModule {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -114,16 +103,14 @@ impl Device {
&self, &self,
desc: ShaderModuleDescriptor<'_>, desc: ShaderModuleDescriptor<'_>,
) -> ShaderModule { ) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module( let data = DynContext::device_create_shader_module(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
desc, desc,
unsafe { wgt::ShaderBoundChecks::unchecked() }, unsafe { wgt::ShaderBoundChecks::unchecked() },
); );
ShaderModule { ShaderModule {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -141,17 +128,11 @@ impl Device {
&self, &self,
desc: &ShaderModuleDescriptorSpirV<'_>, desc: &ShaderModuleDescriptorSpirV<'_>,
) -> ShaderModule { ) -> ShaderModule {
let (id, data) = unsafe { let data = unsafe {
DynContext::device_create_shader_module_spirv( DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc)
&*self.context,
&self.id,
self.data.as_ref(),
desc,
)
}; };
ShaderModule { ShaderModule {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -159,15 +140,10 @@ impl Device {
/// Creates an empty [`CommandEncoder`]. /// Creates an empty [`CommandEncoder`].
#[must_use] #[must_use]
pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder { pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
let (id, data) = DynContext::device_create_command_encoder( let data =
&*self.context, DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc);
&self.id,
self.data.as_ref(),
desc,
);
CommandEncoder { CommandEncoder {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id: Some(id),
data, data,
} }
} }
@ -178,15 +154,13 @@ impl Device {
&self, &self,
desc: &RenderBundleEncoderDescriptor<'_>, desc: &RenderBundleEncoderDescriptor<'_>,
) -> RenderBundleEncoder<'_> { ) -> RenderBundleEncoder<'_> {
let (id, data) = DynContext::device_create_render_bundle_encoder( let data = DynContext::device_create_render_bundle_encoder(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
desc, desc,
); );
RenderBundleEncoder { RenderBundleEncoder {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
parent: self, parent: self,
_p: Default::default(), _p: Default::default(),
@ -196,15 +170,9 @@ impl Device {
/// Creates a new [`BindGroup`]. /// Creates a new [`BindGroup`].
#[must_use] #[must_use]
pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup { pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
let (id, data) = DynContext::device_create_bind_group( let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc);
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
BindGroup { BindGroup {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -215,15 +183,10 @@ impl Device {
&self, &self,
desc: &BindGroupLayoutDescriptor<'_>, desc: &BindGroupLayoutDescriptor<'_>,
) -> BindGroupLayout { ) -> BindGroupLayout {
let (id, data) = DynContext::device_create_bind_group_layout( let data =
&*self.context, DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc);
&self.id,
self.data.as_ref(),
desc,
);
BindGroupLayout { BindGroupLayout {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -231,15 +194,10 @@ impl Device {
/// Creates a [`PipelineLayout`]. /// Creates a [`PipelineLayout`].
#[must_use] #[must_use]
pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout { pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
let (id, data) = DynContext::device_create_pipeline_layout( let data =
&*self.context, DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc);
&self.id,
self.data.as_ref(),
desc,
);
PipelineLayout { PipelineLayout {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -247,15 +205,10 @@ impl Device {
/// Creates a [`RenderPipeline`]. /// Creates a [`RenderPipeline`].
#[must_use] #[must_use]
pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline { pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
let (id, data) = DynContext::device_create_render_pipeline( let data =
&*self.context, DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc);
&self.id,
self.data.as_ref(),
desc,
);
RenderPipeline { RenderPipeline {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -263,15 +216,10 @@ impl Device {
/// Creates a [`ComputePipeline`]. /// Creates a [`ComputePipeline`].
#[must_use] #[must_use]
pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline { pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
let (id, data) = DynContext::device_create_compute_pipeline( let data =
&*self.context, DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc);
&self.id,
self.data.as_ref(),
desc,
);
ComputePipeline { ComputePipeline {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -284,12 +232,10 @@ impl Device {
map_context.initial_range = 0..desc.size; map_context.initial_range = 0..desc.size;
} }
let (id, data) = let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc);
DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc);
Buffer { Buffer {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
map_context: Mutex::new(map_context), map_context: Mutex::new(map_context),
size: desc.size, size: desc.size,
@ -302,11 +248,9 @@ impl Device {
/// `desc` specifies the general format of the texture. /// `desc` specifies the general format of the texture.
#[must_use] #[must_use]
pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture { pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
let (id, data) = let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc);
DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc);
Texture { Texture {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
owned: true, owned: true,
descriptor: TextureDescriptor { descriptor: TextureDescriptor {
@ -340,13 +284,12 @@ impl Device {
.unwrap() .unwrap()
.create_texture_from_hal::<A>( .create_texture_from_hal::<A>(
hal_texture, hal_texture,
self.data.as_ref().downcast_ref().unwrap(), crate::context::downcast_ref(&self.data),
desc, desc,
) )
}; };
Texture { Texture {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id: ObjectId::from(texture.id()),
data: Box::new(texture), data: Box::new(texture),
owned: true, owned: true,
descriptor: TextureDescriptor { descriptor: TextureDescriptor {
@ -376,7 +319,7 @@ impl Device {
map_context.initial_range = 0..desc.size; map_context.initial_range = 0..desc.size;
} }
let (id, buffer) = unsafe { let buffer = unsafe {
self.context self.context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
@ -385,14 +328,13 @@ impl Device {
.unwrap() .unwrap()
.create_buffer_from_hal::<A>( .create_buffer_from_hal::<A>(
hal_buffer, hal_buffer,
self.data.as_ref().downcast_ref().unwrap(), crate::context::downcast_ref(&self.data),
desc, desc,
) )
}; };
Buffer { Buffer {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id: ObjectId::from(id),
data: Box::new(buffer), data: Box::new(buffer),
map_context: Mutex::new(map_context), map_context: Mutex::new(map_context),
size: desc.size, size: desc.size,
@ -405,11 +347,9 @@ impl Device {
/// `desc` specifies the behavior of the sampler. /// `desc` specifies the behavior of the sampler.
#[must_use] #[must_use]
pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler { pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
let (id, data) = let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc);
DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc);
Sampler { Sampler {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -417,11 +357,9 @@ impl Device {
/// Creates a new [`QuerySet`]. /// Creates a new [`QuerySet`].
#[must_use] #[must_use]
pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet { pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
let (id, data) = let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc);
DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc);
QuerySet { QuerySet {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -429,29 +367,28 @@ impl Device {
/// Set a callback for errors that are not handled in error scopes. /// Set a callback for errors that are not handled in error scopes.
pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) { pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
self.context self.context
.device_on_uncaptured_error(&self.id, self.data.as_ref(), handler); .device_on_uncaptured_error(self.data.as_ref(), handler);
} }
/// Push an error scope. /// Push an error scope.
pub fn push_error_scope(&self, filter: ErrorFilter) { pub fn push_error_scope(&self, filter: ErrorFilter) {
self.context self.context
.device_push_error_scope(&self.id, self.data.as_ref(), filter); .device_push_error_scope(self.data.as_ref(), filter);
} }
/// Pop an error scope. /// Pop an error scope.
pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend { pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
self.context self.context.device_pop_error_scope(self.data.as_ref())
.device_pop_error_scope(&self.id, self.data.as_ref())
} }
/// Starts frame capture. /// Starts frame capture.
pub fn start_capture(&self) { pub fn start_capture(&self) {
DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref()) DynContext::device_start_capture(&*self.context, self.data.as_ref())
} }
/// Stops frame capture. /// Stops frame capture.
pub fn stop_capture(&self) { pub fn stop_capture(&self) {
DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref()) DynContext::device_stop_capture(&*self.context, self.data.as_ref())
} }
/// Query internal counters from the native backend for debugging purposes. /// Query internal counters from the native backend for debugging purposes.
@ -462,7 +399,7 @@ impl Device {
/// If a counter is not set, its contains its default value (zero). /// If a counter is not set, its contains its default value (zero).
#[must_use] #[must_use]
pub fn get_internal_counters(&self) -> wgt::InternalCounters { pub fn get_internal_counters(&self) -> wgt::InternalCounters {
DynContext::device_get_internal_counters(&*self.context, &self.id, self.data.as_ref()) DynContext::device_get_internal_counters(&*self.context, self.data.as_ref())
} }
/// Generate an GPU memory allocation report if the underlying backend supports it. /// Generate an GPU memory allocation report if the underlying backend supports it.
@ -472,7 +409,7 @@ impl Device {
/// for example as a workaround for driver issues. /// for example as a workaround for driver issues.
#[must_use] #[must_use]
pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> { pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
DynContext::generate_allocator_report(&*self.context, &self.id, self.data.as_ref()) DynContext::generate_allocator_report(&*self.context, self.data.as_ref())
} }
/// Apply a callback to this `Device`'s underlying backend device. /// Apply a callback to this `Device`'s underlying backend device.
@ -504,7 +441,7 @@ impl Device {
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe { .map(|ctx| unsafe {
ctx.device_as_hal::<A, F, R>( ctx.device_as_hal::<A, F, R>(
self.data.as_ref().downcast_ref().unwrap(), crate::context::downcast_ref(&self.data),
hal_device_callback, hal_device_callback,
) )
}) })
@ -512,7 +449,7 @@ impl Device {
/// Destroy this device. /// Destroy this device.
pub fn destroy(&self) { pub fn destroy(&self) {
DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref()) DynContext::device_destroy(&*self.context, self.data.as_ref())
} }
/// Set a DeviceLostCallback on this device. /// Set a DeviceLostCallback on this device.
@ -522,7 +459,6 @@ impl Device {
) { ) {
DynContext::device_set_device_lost_callback( DynContext::device_set_device_lost_callback(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
Box::new(callback), Box::new(callback),
) )
@ -531,7 +467,7 @@ impl Device {
/// Test-only function to make this device invalid. /// Test-only function to make this device invalid.
#[doc(hidden)] #[doc(hidden)]
pub fn make_invalid(&self) { pub fn make_invalid(&self) {
DynContext::device_make_invalid(&*self.context, &self.id, self.data.as_ref()) DynContext::device_make_invalid(&*self.context, self.data.as_ref())
} }
/// Create a [`PipelineCache`] with initial data /// Create a [`PipelineCache`] with initial data
@ -576,17 +512,11 @@ impl Device {
&self, &self,
desc: &PipelineCacheDescriptor<'_>, desc: &PipelineCacheDescriptor<'_>,
) -> PipelineCache { ) -> PipelineCache {
let (id, data) = unsafe { let data = unsafe {
DynContext::device_create_pipeline_cache( DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc)
&*self.context,
&self.id,
self.data.as_ref(),
desc,
)
}; };
PipelineCache { PipelineCache {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -595,7 +525,7 @@ impl Device {
impl Drop for Device { impl Drop for Device {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.device_drop(&self.id, self.data.as_ref()); self.context.device_drop(self.data.as_ref());
} }
} }
} }

View File

@ -1,67 +0,0 @@
use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64};
use crate::context::ObjectId;
/// Opaque globally-unique identifier
#[repr(transparent)]
pub struct Id<T>(NonZeroU64, PhantomData<*mut T>);
impl<T> Id<T> {
/// Create a new `Id` from a ObjectID.
pub(crate) fn new(id: ObjectId) -> Self {
Id(id.global_id(), PhantomData)
}
/// For testing use only. We provide no guarantees about the actual value of the ids.
#[doc(hidden)]
pub fn inner(&self) -> u64 {
self.0.get()
}
}
// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids
// returned for different types , so `Id` can safely implement Send and Sync.
unsafe impl<T> Send for Id<T> {}
// SAFETY: See the implementation for `Send`.
unsafe impl<T> Sync for Id<T> {}
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Id<T> {}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Id").field(&self.0).finish()
}
}
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Id<T>) -> bool {
self.0 == other.0
}
}
impl<T> Eq for Id<T> {}
impl<T> PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Id<T>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Id<T> {
fn cmp(&self, other: &Id<T>) -> Ordering {
self.0.cmp(&other.0)
}
}
impl<T> std::hash::Hash for Id<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}

View File

@ -202,8 +202,6 @@ impl Instance {
/// - `backends` - Backends from which to enumerate adapters. /// - `backends` - Backends from which to enumerate adapters.
#[cfg(native)] #[cfg(native)]
pub fn enumerate_adapters(&self, backends: Backends) -> Vec<Adapter> { pub fn enumerate_adapters(&self, backends: Backends) -> Vec<Adapter> {
use crate::context::ObjectId;
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
self.context self.context
.as_any() .as_any()
@ -211,10 +209,9 @@ impl Instance {
.map(|ctx| { .map(|ctx| {
ctx.enumerate_adapters(backends) ctx.enumerate_adapters(backends)
.into_iter() .into_iter()
.map(move |id| crate::Adapter { .map(move |adapter| crate::Adapter {
context: Arc::clone(&context), context: Arc::clone(&context),
id: ObjectId::from(id), data: Box::new(adapter),
data: Box::new(()),
}) })
.collect() .collect()
}) })
@ -234,11 +231,7 @@ impl Instance {
) -> impl Future<Output = Option<Adapter>> + WasmNotSend { ) -> impl Future<Output = Option<Adapter>> + WasmNotSend {
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
let adapter = self.context.instance_request_adapter(options); let adapter = self.context.instance_request_adapter(options);
async move { async move { adapter.await.map(|data| Adapter { context, data }) }
adapter
.await
.map(|(id, data)| Adapter { context, id, data })
}
} }
/// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`]. /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`].
@ -252,18 +245,16 @@ impl Instance {
hal_adapter: hal::ExposedAdapter<A>, hal_adapter: hal::ExposedAdapter<A>,
) -> Adapter { ) -> Adapter {
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
let id = unsafe { let adapter = unsafe {
context context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
.unwrap() .unwrap()
.create_adapter_from_hal(hal_adapter) .create_adapter_from_hal(hal_adapter)
.into()
}; };
Adapter { Adapter {
context, context,
id, data: Box::new(adapter),
data: Box::new(()),
} }
} }
@ -355,12 +346,11 @@ impl Instance {
&self, &self,
target: SurfaceTargetUnsafe, target: SurfaceTargetUnsafe,
) -> Result<Surface<'window>, CreateSurfaceError> { ) -> Result<Surface<'window>, CreateSurfaceError> {
let (id, data) = unsafe { self.context.instance_create_surface(target) }?; let data = unsafe { self.context.instance_create_surface(target) }?;
Ok(Surface { Ok(Surface {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
_handle_source: None, _handle_source: None,
id,
surface_data: data, surface_data: data,
config: Mutex::new(None), config: Mutex::new(None),
}) })

View File

@ -32,7 +32,6 @@ mod common_pipeline;
mod compute_pass; mod compute_pass;
mod compute_pipeline; mod compute_pipeline;
mod device; mod device;
mod id;
mod instance; mod instance;
mod pipeline_cache; mod pipeline_cache;
mod pipeline_layout; mod pipeline_layout;
@ -59,7 +58,6 @@ pub use common_pipeline::*;
pub use compute_pass::*; pub use compute_pass::*;
pub use compute_pipeline::*; pub use compute_pipeline::*;
pub use device::*; pub use device::*;
pub use id::*;
pub use instance::*; pub use instance::*;
pub use pipeline_cache::*; pub use pipeline_cache::*;
pub use pipeline_layout::*; pub use pipeline_layout::*;
@ -78,3 +76,35 @@ pub use texture_view::*;
/// Object debugging label. /// Object debugging label.
pub type Label<'a> = Option<&'a str>; pub type Label<'a> = Option<&'a str>;
macro_rules! impl_partialeq_eq_hash {
($ty:ty) => {
impl PartialEq for $ty {
fn eq(&self, other: &Self) -> bool {
std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref())
}
}
impl Eq for $ty {}
impl std::hash::Hash for $ty {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let ptr = self.data.as_ref() as *const Data as *const ();
ptr.hash(state);
}
}
impl PartialOrd for $ty {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for $ty {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let a = self.data.as_ref() as *const Data as *const ();
let b = other.data.as_ref() as *const Data as *const ();
a.cmp(&b)
}
}
};
}
pub(crate) use impl_partialeq_eq_hash;

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a pipeline cache, which is used to accelerate /// Handle to a pipeline cache, which is used to accelerate
@ -68,7 +67,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct PipelineCache { pub struct PipelineCache {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
@ -83,16 +81,14 @@ impl PipelineCache {
/// ///
/// This function is unique to the Rust API of `wgpu`. /// This function is unique to the Rust API of `wgpu`.
pub fn get_data(&self) -> Option<Vec<u8>> { pub fn get_data(&self) -> Option<Vec<u8>> {
self.context self.context.pipeline_cache_get_data(self.data.as_ref())
.pipeline_cache_get_data(&self.id, self.data.as_ref())
} }
} }
impl Drop for PipelineCache { impl Drop for PipelineCache {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.pipeline_cache_drop(self.data.as_ref());
.pipeline_cache_drop(&self.id, self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a pipeline layout. /// Handle to a pipeline layout.
@ -12,27 +11,17 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct PipelineLayout { pub struct PipelineLayout {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(PipelineLayout: Send, Sync); static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
impl PipelineLayout { super::impl_partialeq_eq_hash!(PipelineLayout);
/// Returns a globally-unique identifier for this `PipelineLayout`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for PipelineLayout { impl Drop for PipelineLayout {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.pipeline_layout_drop(self.data.as_ref());
.pipeline_layout_drop(&self.id, self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a query set. /// Handle to a query set.
@ -11,27 +10,18 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct QuerySet { pub struct QuerySet {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(QuerySet: Send, Sync); static_assertions::assert_impl_all!(QuerySet: Send, Sync);
impl QuerySet { super::impl_partialeq_eq_hash!(QuerySet);
/// Returns a globally-unique identifier for this `QuerySet`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for QuerySet { impl Drop for QuerySet {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.query_set_drop(&self.id, self.data.as_ref()); self.context.query_set_drop(self.data.as_ref());
} }
} }
} }

View File

@ -4,7 +4,7 @@ use std::{
thread, thread,
}; };
use crate::context::{DynContext, ObjectId, QueueWriteBuffer}; use crate::context::{DynContext, QueueWriteBuffer};
use crate::*; use crate::*;
/// Handle to a command queue on a device. /// Handle to a command queue on a device.
@ -17,7 +17,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Queue { pub struct Queue {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
@ -26,7 +25,7 @@ static_assertions::assert_impl_all!(Queue: Send, Sync);
impl Drop for Queue { impl Drop for Queue {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.queue_drop(&self.id, self.data.as_ref()); self.context.queue_drop(self.data.as_ref());
} }
} }
} }
@ -87,9 +86,7 @@ impl<'a> Drop for QueueWriteBufferView<'a> {
fn drop(&mut self) { fn drop(&mut self) {
DynContext::queue_write_staging_buffer( DynContext::queue_write_staging_buffer(
&*self.queue.context, &*self.queue.context,
&self.queue.id,
self.queue.data.as_ref(), self.queue.data.as_ref(),
&self.buffer.id,
self.buffer.data.as_ref(), self.buffer.data.as_ref(),
self.offset, self.offset,
&*self.inner, &*self.inner,
@ -121,9 +118,7 @@ impl Queue {
pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) { pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
DynContext::queue_write_buffer( DynContext::queue_write_buffer(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(), buffer.data.as_ref(),
offset, offset,
data, data,
@ -168,19 +163,13 @@ impl Queue {
profiling::scope!("Queue::write_buffer_with"); profiling::scope!("Queue::write_buffer_with");
DynContext::queue_validate_write_buffer( DynContext::queue_validate_write_buffer(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(), buffer.data.as_ref(),
offset, offset,
size, size,
)?; )?;
let staging_buffer = DynContext::queue_create_staging_buffer( let staging_buffer =
&*self.context, DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?;
&self.id,
self.data.as_ref(),
size,
)?;
Some(QueueWriteBufferView { Some(QueueWriteBufferView {
queue: self, queue: self,
buffer, buffer,
@ -222,7 +211,6 @@ impl Queue {
) { ) {
DynContext::queue_write_texture( DynContext::queue_write_texture(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
texture, texture,
data, data,
@ -241,7 +229,6 @@ impl Queue {
) { ) {
DynContext::queue_copy_external_image_to_texture( DynContext::queue_copy_external_image_to_texture(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
source, source,
dest, dest,
@ -256,14 +243,10 @@ impl Queue {
) -> SubmissionIndex { ) -> SubmissionIndex {
let mut command_buffers = command_buffers let mut command_buffers = command_buffers
.into_iter() .into_iter()
.map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())); .map(|mut comb| comb.data.take().unwrap());
let data = DynContext::queue_submit( let data =
&*self.context, DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers);
&self.id,
self.data.as_ref(),
&mut command_buffers,
);
SubmissionIndex(data) SubmissionIndex(data)
} }
@ -275,7 +258,7 @@ impl Queue {
/// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>` /// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
/// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required. /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
pub fn get_timestamp_period(&self) -> f32 { pub fn get_timestamp_period(&self) -> f32 {
DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref()) DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref())
} }
/// Registers a callback when the previous call to submit finishes running on the gpu. This callback /// Registers a callback when the previous call to submit finishes running on the gpu. This callback
@ -292,7 +275,6 @@ impl Queue {
pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) { pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
DynContext::queue_on_submitted_work_done( DynContext::queue_on_submitted_work_done(
&*self.context, &*self.context,
&self.id,
self.data.as_ref(), self.data.as_ref(),
Box::new(callback), Box::new(callback),
) )

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Pre-prepared reusable bundle of GPU operations. /// Pre-prepared reusable bundle of GPU operations.
@ -15,27 +14,17 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct RenderBundle { pub struct RenderBundle {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderBundle: Send, Sync); static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
impl RenderBundle { super::impl_partialeq_eq_hash!(RenderBundle);
/// Returns a globally-unique identifier for this `RenderBundle`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for RenderBundle { impl Drop for RenderBundle {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.render_bundle_drop(self.data.as_ref());
.render_bundle_drop(&self.id, self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc}; use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Encodes a series of GPU operations into a reusable "render bundle". /// Encodes a series of GPU operations into a reusable "render bundle".
@ -17,7 +17,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct RenderBundleEncoder<'a> { pub struct RenderBundleEncoder<'a> {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
pub(crate) parent: &'a Device, pub(crate) parent: &'a Device,
/// This type should be !Send !Sync, because it represents an allocation on this thread's /// This type should be !Send !Sync, because it represents an allocation on this thread's
@ -53,11 +52,9 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn
impl<'a> RenderBundleEncoder<'a> { impl<'a> RenderBundleEncoder<'a> {
/// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes. /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes.
pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle { pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle {
let (id, data) = let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc);
DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc);
RenderBundle { RenderBundle {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
@ -74,10 +71,8 @@ impl<'a> RenderBundleEncoder<'a> {
) { ) {
DynContext::render_bundle_encoder_set_bind_group( DynContext::render_bundle_encoder_set_bind_group(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
index, index,
&bind_group.id,
bind_group.data.as_ref(), bind_group.data.as_ref(),
offsets, offsets,
) )
@ -89,9 +84,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) { pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
DynContext::render_bundle_encoder_set_pipeline( DynContext::render_bundle_encoder_set_pipeline(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(), pipeline.data.as_ref(),
) )
} }
@ -103,9 +96,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
DynContext::render_bundle_encoder_set_index_buffer( DynContext::render_bundle_encoder_set_index_buffer(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(), buffer_slice.buffer.data.as_ref(),
index_format, index_format,
buffer_slice.offset, buffer_slice.offset,
@ -126,10 +117,8 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
DynContext::render_bundle_encoder_set_vertex_buffer( DynContext::render_bundle_encoder_set_vertex_buffer(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
slot, slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(), buffer_slice.buffer.data.as_ref(),
buffer_slice.offset, buffer_slice.offset,
buffer_slice.size, buffer_slice.size,
@ -157,7 +146,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) { pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw( DynContext::render_bundle_encoder_draw(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
vertices, vertices,
instances, instances,
@ -188,7 +176,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) { pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw_indexed( DynContext::render_bundle_encoder_draw_indexed(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
indices, indices,
base_vertex, base_vertex,
@ -204,9 +191,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) { pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
DynContext::render_bundle_encoder_draw_indirect( DynContext::render_bundle_encoder_draw_indirect(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
); );
@ -226,9 +211,7 @@ impl<'a> RenderBundleEncoder<'a> {
) { ) {
DynContext::render_bundle_encoder_draw_indexed_indirect( DynContext::render_bundle_encoder_draw_indexed_indirect(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
); );
@ -268,7 +251,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_bundle_encoder_set_push_constants( DynContext::render_bundle_encoder_set_push_constants(
&*self.parent.context, &*self.parent.context,
&mut self.id,
self.data.as_mut(), self.data.as_mut(),
stages, stages,
offset, offset,

View File

@ -1,11 +1,10 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct RenderPassInner { pub(crate) struct RenderPassInner {
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
} }
@ -13,8 +12,7 @@ pub(crate) struct RenderPassInner {
impl Drop for RenderPassInner { impl Drop for RenderPassInner {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.render_pass_end(self.data.as_mut());
.render_pass_end(&mut self.id, self.data.as_mut());
} }
} }
} }
@ -84,10 +82,8 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_set_bind_group( DynContext::render_pass_set_bind_group(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
index, index,
&bind_group.id,
bind_group.data.as_ref(), bind_group.data.as_ref(),
offsets, offsets,
) )
@ -99,9 +95,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) { pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) {
DynContext::render_pass_set_pipeline( DynContext::render_pass_set_pipeline(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(), pipeline.data.as_ref(),
) )
} }
@ -114,7 +108,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_blend_constant(&mut self, color: Color) { pub fn set_blend_constant(&mut self, color: Color) {
DynContext::render_pass_set_blend_constant( DynContext::render_pass_set_blend_constant(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
color, color,
) )
@ -127,9 +120,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) {
DynContext::render_pass_set_index_buffer( DynContext::render_pass_set_index_buffer(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(), buffer_slice.buffer.data.as_ref(),
index_format, index_format,
buffer_slice.offset, buffer_slice.offset,
@ -150,10 +141,8 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) {
DynContext::render_pass_set_vertex_buffer( DynContext::render_pass_set_vertex_buffer(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
slot, slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(), buffer_slice.buffer.data.as_ref(),
buffer_slice.offset, buffer_slice.offset,
buffer_slice.size, buffer_slice.size,
@ -172,7 +161,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
DynContext::render_pass_set_scissor_rect( DynContext::render_pass_set_scissor_rect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
x, x,
y, y,
@ -190,7 +178,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) { pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
DynContext::render_pass_set_viewport( DynContext::render_pass_set_viewport(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
x, x,
y, y,
@ -208,7 +195,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_stencil_reference(&mut self, reference: u32) { pub fn set_stencil_reference(&mut self, reference: u32) {
DynContext::render_pass_set_stencil_reference( DynContext::render_pass_set_stencil_reference(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
reference, reference,
); );
@ -218,7 +204,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn insert_debug_marker(&mut self, label: &str) { pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::render_pass_insert_debug_marker( DynContext::render_pass_insert_debug_marker(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
label, label,
); );
@ -228,7 +213,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn push_debug_group(&mut self, label: &str) { pub fn push_debug_group(&mut self, label: &str) {
DynContext::render_pass_push_debug_group( DynContext::render_pass_push_debug_group(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
label, label,
); );
@ -236,11 +220,7 @@ impl<'encoder> RenderPass<'encoder> {
/// Stops command recording and creates debug group. /// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) { pub fn pop_debug_group(&mut self) {
DynContext::render_pass_pop_debug_group( DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
} }
/// Draws primitives from the active vertex buffer(s). /// Draws primitives from the active vertex buffer(s).
@ -267,7 +247,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) { pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_pass_draw( DynContext::render_pass_draw(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
vertices, vertices,
instances, instances,
@ -301,7 +280,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) { pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_pass_draw_indexed( DynContext::render_pass_draw_indexed(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
indices, indices,
base_vertex, base_vertex,
@ -325,9 +303,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) { pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) {
DynContext::render_pass_draw_indirect( DynContext::render_pass_draw_indirect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
); );
@ -354,9 +330,7 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_draw_indexed_indirect( DynContext::render_pass_draw_indexed_indirect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
); );
@ -371,13 +345,10 @@ impl<'encoder> RenderPass<'encoder> {
&mut self, &mut self,
render_bundles: I, render_bundles: I,
) { ) {
let mut render_bundles = render_bundles let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref());
.into_iter()
.map(|rb| (&rb.id, rb.data.as_ref()));
DynContext::render_pass_execute_bundles( DynContext::render_pass_execute_bundles(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&mut render_bundles, &mut render_bundles,
) )
@ -404,9 +375,7 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_multi_draw_indirect( DynContext::render_pass_multi_draw_indirect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
count, count,
@ -432,9 +401,7 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_multi_draw_indexed_indirect( DynContext::render_pass_multi_draw_indexed_indirect(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
count, count,
@ -476,12 +443,9 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_multi_draw_indirect_count( DynContext::render_pass_multi_draw_indirect_count(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(), count_buffer.data.as_ref(),
count_offset, count_offset,
max_count, max_count,
@ -523,12 +487,9 @@ impl<'encoder> RenderPass<'encoder> {
) { ) {
DynContext::render_pass_multi_draw_indexed_indirect_count( DynContext::render_pass_multi_draw_indexed_indirect_count(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(), indirect_buffer.data.as_ref(),
indirect_offset, indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(), count_buffer.data.as_ref(),
count_offset, count_offset,
max_count, max_count,
@ -581,7 +542,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_pass_set_push_constants( DynContext::render_pass_set_push_constants(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
stages, stages,
offset, offset,
@ -602,9 +562,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_write_timestamp( DynContext::render_pass_write_timestamp(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_index, query_index,
) )
@ -617,7 +575,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn begin_occlusion_query(&mut self, query_index: u32) { pub fn begin_occlusion_query(&mut self, query_index: u32) {
DynContext::render_pass_begin_occlusion_query( DynContext::render_pass_begin_occlusion_query(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
query_index, query_index,
); );
@ -626,11 +583,7 @@ impl<'encoder> RenderPass<'encoder> {
/// End the occlusion query on this render pass. It can be started with /// End the occlusion query on this render pass. It can be started with
/// `begin_occlusion_query`. Occlusion queries may not be nested. /// `begin_occlusion_query`. Occlusion queries may not be nested.
pub fn end_occlusion_query(&mut self) { pub fn end_occlusion_query(&mut self) {
DynContext::render_pass_end_occlusion_query( DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut());
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
} }
} }
@ -641,9 +594,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_begin_pipeline_statistics_query( DynContext::render_pass_begin_pipeline_statistics_query(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(), query_set.data.as_ref(),
query_index, query_index,
); );
@ -654,7 +605,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn end_pipeline_statistics_query(&mut self) { pub fn end_pipeline_statistics_query(&mut self) {
DynContext::render_pass_end_pipeline_statistics_query( DynContext::render_pass_end_pipeline_statistics_query(
&*self.inner.context, &*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(), self.inner.data.as_mut(),
); );
} }

View File

@ -1,6 +1,5 @@
use std::{num::NonZeroU32, sync::Arc, thread}; use std::{num::NonZeroU32, sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a rendering (graphics) pipeline. /// Handle to a rendering (graphics) pipeline.
@ -12,37 +11,29 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct RenderPipeline { pub struct RenderPipeline {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderPipeline: Send, Sync); static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
super::impl_partialeq_eq_hash!(RenderPipeline);
impl Drop for RenderPipeline { impl Drop for RenderPipeline {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.render_pipeline_drop(self.data.as_ref());
.render_pipeline_drop(&self.id, self.data.as_ref());
} }
} }
} }
impl RenderPipeline { impl RenderPipeline {
/// Returns a globally-unique identifier for this `RenderPipeline`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Get an object representing the bind group layout at a given index. /// Get an object representing the bind group layout at a given index.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context); let context = Arc::clone(&self.context);
let (id, data) = let data = self
self.context .context
.render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index); .render_pipeline_get_bind_group_layout(self.data.as_ref(), index);
BindGroupLayout { context, id, data } BindGroupLayout { context, data }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a sampler. /// Handle to a sampler.
@ -15,26 +14,17 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Sampler { pub struct Sampler {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(Sampler: Send, Sync); static_assertions::assert_impl_all!(Sampler: Send, Sync);
impl Sampler { super::impl_partialeq_eq_hash!(Sampler);
/// Returns a globally-unique identifier for this `Sampler`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
impl Drop for Sampler { impl Drop for Sampler {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.sampler_drop(&self.id, self.data.as_ref()); self.context.sampler_drop(self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread}; use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a compiled shader module. /// Handle to a compiled shader module.
@ -14,34 +13,25 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct ShaderModule { pub struct ShaderModule {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(ShaderModule: Send, Sync); static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
super::impl_partialeq_eq_hash!(ShaderModule);
impl Drop for ShaderModule { impl Drop for ShaderModule {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.shader_module_drop(self.data.as_ref());
.shader_module_drop(&self.id, self.data.as_ref());
} }
} }
} }
impl ShaderModule { impl ShaderModule {
/// Returns a globally-unique identifier for this `ShaderModule`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Get the compilation info for the shader module. /// Get the compilation info for the shader module.
pub fn get_compilation_info(&self) -> impl Future<Output = CompilationInfo> + WasmNotSend { pub fn get_compilation_info(&self) -> impl Future<Output = CompilationInfo> + WasmNotSend {
self.context self.context.shader_get_compilation_info(self.data.as_ref())
.shader_get_compilation_info(&self.id, self.data.as_ref())
} }
} }

View File

@ -3,7 +3,7 @@ use std::{error, fmt, sync::Arc, thread};
use parking_lot::Mutex; use parking_lot::Mutex;
use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Describes a [`Surface`]. /// Describes a [`Surface`].
@ -32,9 +32,6 @@ pub struct Surface<'window> {
/// would become invalid when the window is dropped. /// would become invalid when the window is dropped.
pub(crate) _handle_source: Option<Box<dyn WindowHandle + 'window>>, pub(crate) _handle_source: Option<Box<dyn WindowHandle + 'window>>,
/// Wgpu-core surface id.
pub(crate) id: ObjectId,
/// Additional surface data returned by [`DynContext::instance_create_surface`]. /// Additional surface data returned by [`DynContext::instance_create_surface`].
pub(crate) surface_data: Box<Data>, pub(crate) surface_data: Box<Data>,
@ -48,23 +45,13 @@ pub struct Surface<'window> {
} }
impl Surface<'_> { impl Surface<'_> {
/// Returns a globally-unique identifier for this `Surface`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Surface<'_>> {
Id::new(self.id)
}
/// Returns the capabilities of the surface when used with the given adapter. /// Returns the capabilities of the surface when used with the given adapter.
/// ///
/// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter. /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter.
pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities { pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
DynContext::surface_get_capabilities( DynContext::surface_get_capabilities(
&*self.context, &*self.context,
&self.id,
self.surface_data.as_ref(), self.surface_data.as_ref(),
&adapter.id,
adapter.data.as_ref(), adapter.data.as_ref(),
) )
} }
@ -101,9 +88,7 @@ impl Surface<'_> {
pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) { pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
DynContext::surface_configure( DynContext::surface_configure(
&*self.context, &*self.context,
&self.id,
self.surface_data.as_ref(), self.surface_data.as_ref(),
&device.id,
device.data.as_ref(), device.data.as_ref(),
config, config,
); );
@ -121,11 +106,8 @@ impl Surface<'_> {
/// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated, /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated,
/// recreating the swapchain will panic. /// recreating the swapchain will panic.
pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> { pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
let (texture_id, texture_data, status, detail) = DynContext::surface_get_current_texture( let (texture_data, status, detail) =
&*self.context, DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref());
&self.id,
self.surface_data.as_ref(),
);
let suboptimal = match status { let suboptimal = match status {
SurfaceStatus::Good => false, SurfaceStatus::Good => false,
@ -155,12 +137,10 @@ impl Surface<'_> {
view_formats: &[], view_formats: &[],
}; };
texture_id texture_data
.zip(texture_data) .map(|data| SurfaceTexture {
.map(|(id, data)| SurfaceTexture {
texture: Texture { texture: Texture {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
owned: false, owned: false,
descriptor, descriptor,
@ -188,7 +168,7 @@ impl Surface<'_> {
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe { .map(|ctx| unsafe {
ctx.surface_as_hal::<A, F, R>( ctx.surface_as_hal::<A, F, R>(
self.surface_data.downcast_ref().unwrap(), crate::context::downcast_ref(&self.surface_data),
hal_surface_callback, hal_surface_callback,
) )
}) })
@ -209,7 +189,6 @@ impl<'window> fmt::Debug for Surface<'window> {
"None" "None"
}, },
) )
.field("id", &self.id)
.field("data", &self.surface_data) .field("data", &self.surface_data)
.field("config", &self.config) .field("config", &self.config)
.finish() .finish()
@ -222,8 +201,7 @@ static_assertions::assert_impl_all!(Surface<'_>: Send, Sync);
impl Drop for Surface<'_> { impl Drop for Surface<'_> {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context self.context.surface_drop(self.surface_data.as_ref())
.surface_drop(&self.id, self.surface_data.as_ref())
} }
} }
} }

View File

@ -1,6 +1,6 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::{DynContext, ObjectId}; use crate::context::DynContext;
use crate::*; use crate::*;
/// Handle to a texture on the GPU. /// Handle to a texture on the GPU.
@ -11,7 +11,6 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct Texture { pub struct Texture {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
pub(crate) owned: bool, pub(crate) owned: bool,
pub(crate) descriptor: TextureDescriptor<'static>, pub(crate) descriptor: TextureDescriptor<'static>,
@ -19,15 +18,9 @@ pub struct Texture {
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(Texture: Send, Sync); static_assertions::assert_impl_all!(Texture: Send, Sync);
impl Texture { super::impl_partialeq_eq_hash!(Texture);
/// Returns a globally-unique identifier for this `Texture`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
impl Texture {
/// Returns the inner hal Texture using a callback. The hal texture will be `None` if the /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the
/// backend type argument does not match with this wgpu Texture /// backend type argument does not match with this wgpu Texture
/// ///
@ -39,14 +32,17 @@ impl Texture {
&self, &self,
hal_texture_callback: F, hal_texture_callback: F,
) -> R { ) -> R {
let texture = self.data.as_ref().downcast_ref().unwrap();
if let Some(ctx) = self if let Some(ctx) = self
.context .context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
{ {
unsafe { ctx.texture_as_hal::<A, F, R>(texture, hal_texture_callback) } unsafe {
ctx.texture_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_texture_callback,
)
}
} else { } else {
hal_texture_callback(None) hal_texture_callback(None)
} }
@ -54,18 +50,16 @@ impl Texture {
/// Creates a view of this texture. /// Creates a view of this texture.
pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView { pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView {
let (id, data) = let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc);
DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc);
TextureView { TextureView {
context: Arc::clone(&self.context), context: Arc::clone(&self.context),
id,
data, data,
} }
} }
/// Destroy the associated native resources as soon as possible. /// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) { pub fn destroy(&self) {
DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref()); DynContext::texture_destroy(&*self.context, self.data.as_ref());
} }
/// Make an `ImageCopyTexture` representing the whole texture. /// Make an `ImageCopyTexture` representing the whole texture.
@ -145,7 +139,7 @@ impl Texture {
impl Drop for Texture { impl Drop for Texture {
fn drop(&mut self) { fn drop(&mut self) {
if self.owned && !thread::panicking() { if self.owned && !thread::panicking() {
self.context.texture_drop(&self.id, self.data.as_ref()); self.context.texture_drop(self.data.as_ref());
} }
} }
} }

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread}; use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*; use crate::*;
/// Handle to a texture view. /// Handle to a texture view.
@ -12,21 +11,14 @@ use crate::*;
#[derive(Debug)] #[derive(Debug)]
pub struct TextureView { pub struct TextureView {
pub(crate) context: Arc<C>, pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>, pub(crate) data: Box<Data>,
} }
#[cfg(send_sync)] #[cfg(send_sync)]
static_assertions::assert_impl_all!(TextureView: Send, Sync); static_assertions::assert_impl_all!(TextureView: Send, Sync);
impl TextureView { super::impl_partialeq_eq_hash!(TextureView);
/// Returns a globally-unique identifier for this `TextureView`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
impl TextureView {
/// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the /// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the
/// backend type argument does not match with this wgpu Texture /// backend type argument does not match with this wgpu Texture
/// ///
@ -38,17 +30,16 @@ impl TextureView {
&self, &self,
hal_texture_view_callback: F, hal_texture_view_callback: F,
) -> R { ) -> R {
use wgc::id::TextureViewId;
let texture_view_id = TextureViewId::from(self.id);
if let Some(ctx) = self if let Some(ctx) = self
.context .context
.as_any() .as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>() .downcast_ref::<crate::backend::ContextWgpuCore>()
{ {
unsafe { unsafe {
ctx.texture_view_as_hal::<A, F, R>(texture_view_id, hal_texture_view_callback) ctx.texture_view_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_texture_view_callback,
)
} }
} else { } else {
hal_texture_view_callback(None) hal_texture_view_callback(None)
@ -59,7 +50,7 @@ impl TextureView {
impl Drop for TextureView { impl Drop for TextureView {
fn drop(&mut self) { fn drop(&mut self) {
if !thread::panicking() { if !thread::panicking() {
self.context.texture_view_drop(&self.id, self.data.as_ref()); self.context.texture_view_drop(self.data.as_ref());
} }
} }
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -125,7 +125,6 @@ impl DownloadBuffer {
let mapped_range = crate::context::DynContext::buffer_get_mapped_range( let mapped_range = crate::context::DynContext::buffer_get_mapped_range(
&*download.context, &*download.context,
&download.id,
download.data.as_ref(), download.data.as_ref(),
0..size, 0..size,
); );