Remove IDs from wgpu traits (#6134)

Remove `wgpu`'s `.global_id()` getters.

Implement `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord` for wgpu resources.
This commit is contained in:
Teodor Tanasoaia 2024-08-27 12:00:19 +02:00 committed by GitHub
parent c7e5d07dee
commit 338678ad5f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1268 additions and 3587 deletions

View File

@ -55,6 +55,16 @@ which we're hoping to build performance improvements upon in the future.
By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https://github.com/gfx-rs/wgpu/pull/6099), [#6100](https://github.com/gfx-rs/wgpu/pull/6100).
#### `wgpu`'s resources no longer have `.global_id()` getters
`wgpu-core`'s internals no longer use nor need IDs and we are moving towards removing IDs
completely. This is a step in that direction.
Current users of `.global_id()` are encouraged to make use of the `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord`
traits that have now been implemented for `wgpu` resources.
By @teoxoy [#6134](https://github.com/gfx-rs/wgpu/pull/6134).
### New Features
#### Naga

View File

@ -31,31 +31,20 @@ static BIND_GROUP_LAYOUT_DEDUPLICATION: GpuTestConfiguration = GpuTestConfigurat
.run_async(bgl_dedupe);
async fn bgl_dedupe(ctx: TestingContext) {
let entries_1 = &[];
let entries = &[];
let entries_2 = &[ENTRY];
// Block so we can force all resource to die.
{
let bgl_1a = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
});
let bgl_2 = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_2,
entries,
});
let bgl_1b = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
entries,
});
let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -113,47 +102,6 @@ async fn bgl_dedupe(ctx: TestingContext) {
drop(pass);
ctx.queue.submit(Some(encoder.finish()));
// Abuse the fact that global_id is really just the bitpacked ids when targeting wgpu-core.
if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu {
let bgl_1a_idx = bgl_1a.global_id().inner() & 0xFFFF_FFFF;
assert_eq!(bgl_1a_idx, 0);
let bgl_2_idx = bgl_2.global_id().inner() & 0xFFFF_FFFF;
assert_eq!(bgl_2_idx, 1);
let bgl_1b_idx = bgl_1b.global_id().inner() & 0xFFFF_FFFF;
assert_eq!(bgl_1b_idx, 2);
}
}
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu {
// Indices are made reusable as soon as the handle is dropped so we keep them around
// for the duration of the loop.
let mut bgls = Vec::new();
let mut indices = Vec::new();
// Now all of the BGL ids should be dead, so we should get the same ids again.
for _ in 0..=2 {
let test_bgl = ctx
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: entries_1,
});
let test_bgl_idx = test_bgl.global_id().inner() & 0xFFFF_FFFF;
bgls.push(test_bgl);
indices.push(test_bgl_idx);
}
// We don't guarantee that the IDs will appear in the same order. Sort them
// and check that they all appear exactly once.
indices.sort();
for (i, index) in indices.iter().enumerate() {
assert_eq!(*index, i as u64);
}
}
}
#[gpu_test]

View File

@ -1,6 +1,6 @@
use std::{future::Future, sync::Arc, thread};
use crate::context::{DeviceRequest, DynContext, ObjectId};
use crate::context::{DeviceRequest, DynContext};
use crate::*;
/// Handle to a physical graphics and/or compute device.
@ -14,7 +14,6 @@ use crate::*;
#[derive(Debug)]
pub struct Adapter {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
@ -23,7 +22,7 @@ static_assertions::assert_impl_all!(Adapter: Send, Sync);
impl Drop for Adapter {
fn drop(&mut self) {
if !thread::panicking() {
self.context.adapter_drop(&self.id, self.data.as_ref())
self.context.adapter_drop(self.data.as_ref())
}
}
}
@ -40,14 +39,6 @@ pub type RequestAdapterOptions<'a, 'b> = RequestAdapterOptionsBase<&'a Surface<'
static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync);
impl Adapter {
/// Returns a globally-unique identifier for this `Adapter`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Requests a connection to a physical device, creating a logical device.
///
/// Returns the [`Device`] together with a [`Queue`] that executes command buffers.
@ -80,7 +71,6 @@ impl Adapter {
let context = Arc::clone(&self.context);
let device = DynContext::adapter_request_device(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
trace_path,
@ -88,20 +78,16 @@ impl Adapter {
async move {
device.await.map(
|DeviceRequest {
device_id,
device_data,
queue_id,
queue_data,
}| {
(
Device {
context: Arc::clone(&context),
id: device_id,
data: device_data,
},
Queue {
context,
id: queue_id,
data: queue_data,
},
)
@ -131,18 +117,21 @@ impl Adapter {
// Part of the safety requirements is that the device was generated from the same adapter.
// Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices.
.unwrap()
.create_device_from_hal(&self.id.into(), hal_device, desc, trace_path)
.create_device_from_hal(
crate::context::downcast_ref(&self.data),
hal_device,
desc,
trace_path,
)
}
.map(|(device, queue)| {
(
Device {
context: Arc::clone(&context),
id: device.id().into(),
data: Box::new(device),
},
Queue {
context,
id: queue.id().into(),
data: Box::new(queue),
},
)
@ -178,7 +167,12 @@ impl Adapter {
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
unsafe { ctx.adapter_as_hal::<A, F, R>(self.id.into(), hal_adapter_callback) }
unsafe {
ctx.adapter_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_adapter_callback,
)
}
} else {
hal_adapter_callback(None)
}
@ -188,31 +182,29 @@ impl Adapter {
pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool {
DynContext::adapter_is_surface_supported(
&*self.context,
&self.id,
self.data.as_ref(),
&surface.id,
surface.surface_data.as_ref(),
)
}
/// The features which can be used to create devices on this adapter.
pub fn features(&self) -> Features {
DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref())
DynContext::adapter_features(&*self.context, self.data.as_ref())
}
/// The best limits which can be used to create devices on this adapter.
pub fn limits(&self) -> Limits {
DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref())
DynContext::adapter_limits(&*self.context, self.data.as_ref())
}
/// Get info about the adapter itself.
pub fn get_info(&self) -> AdapterInfo {
DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref())
DynContext::adapter_get_info(&*self.context, self.data.as_ref())
}
/// Get info about the adapter itself.
pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref())
DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref())
}
/// Returns the features supported for a given texture format by this adapter.
@ -220,12 +212,7 @@ impl Adapter {
/// Note that the WebGPU spec further restricts the available usages/features.
/// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature.
pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
DynContext::adapter_get_texture_format_features(
&*self.context,
&self.id,
self.data.as_ref(),
format,
)
DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format)
}
/// Generates a timestamp using the clock used by the presentation engine.
@ -250,6 +237,6 @@ impl Adapter {
//
/// [Instant]: std::time::Instant
pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref())
DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref())
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a binding group.
@ -14,26 +13,17 @@ use crate::*;
#[derive(Debug)]
pub struct BindGroup {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroup: Send, Sync);
impl BindGroup {
/// Returns a globally-unique identifier for this `BindGroup`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(BindGroup);
impl Drop for BindGroup {
fn drop(&mut self) {
if !thread::panicking() {
self.context.bind_group_drop(&self.id, self.data.as_ref());
self.context.bind_group_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a binding group layout.
@ -17,27 +16,17 @@ use crate::*;
#[derive(Debug)]
pub struct BindGroupLayout {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
impl BindGroupLayout {
/// Returns a globally-unique identifier for this `BindGroupLayout`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(BindGroupLayout);
impl Drop for BindGroupLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.bind_group_layout_drop(&self.id, self.data.as_ref());
self.context.bind_group_layout_drop(self.data.as_ref());
}
}
}

View File

@ -7,7 +7,7 @@ use std::{
use parking_lot::Mutex;
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Handle to a GPU-accessible buffer.
@ -173,7 +173,6 @@ use crate::*;
#[derive(Debug)]
pub struct Buffer {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
pub(crate) map_context: Mutex<MapContext>,
pub(crate) size: wgt::BufferAddress,
@ -183,15 +182,9 @@ pub struct Buffer {
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Buffer: Send, Sync);
impl Buffer {
/// Returns a globally-unique identifier for this `Buffer`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
super::impl_partialeq_eq_hash!(Buffer);
impl Buffer {
/// Return the binding view of the entire buffer.
pub fn as_entire_binding(&self) -> BindingResource<'_> {
BindingResource::Buffer(self.as_entire_buffer_binding())
@ -217,14 +210,17 @@ impl Buffer {
&self,
hal_buffer_callback: F,
) -> R {
let id = self.id;
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
unsafe { ctx.buffer_as_hal::<A, F, R>(id.into(), hal_buffer_callback) }
unsafe {
ctx.buffer_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_buffer_callback,
)
}
} else {
hal_buffer_callback(None)
}
@ -256,12 +252,12 @@ impl Buffer {
/// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) {
self.map_context.lock().reset();
DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref());
DynContext::buffer_unmap(&*self.context, self.data.as_ref());
}
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref());
DynContext::buffer_destroy(&*self.context, self.data.as_ref());
}
/// Returns the length of the buffer allocation in bytes.
@ -343,12 +339,7 @@ impl<'a> BufferSlice<'a> {
callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
) {
let mut mc = self.buffer.map_context.lock();
assert_eq!(
mc.initial_range,
0..0,
"Buffer {:?} is already mapped",
self.buffer.id
);
assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
let end = match self.size {
Some(s) => self.offset + s.get(),
None => mc.total_size,
@ -357,7 +348,6 @@ impl<'a> BufferSlice<'a> {
DynContext::buffer_map_async(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
mode,
self.offset..end,
@ -383,7 +373,6 @@ impl<'a> BufferSlice<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset..end,
);
@ -429,7 +418,6 @@ impl<'a> BufferSlice<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset..end,
);
@ -680,7 +668,7 @@ impl Drop for BufferViewMut<'_> {
impl Drop for Buffer {
fn drop(&mut self) {
if !thread::panicking() {
self.context.buffer_drop(&self.id, self.data.as_ref());
self.context.buffer_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a command buffer on the GPU.
@ -13,7 +12,6 @@ use crate::*;
#[derive(Debug)]
pub struct CommandBuffer {
pub(crate) context: Arc<C>,
pub(crate) id: Option<ObjectId>,
pub(crate) data: Option<Box<Data>>,
}
#[cfg(send_sync)]
@ -22,9 +20,8 @@ static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
impl Drop for CommandBuffer {
fn drop(&mut self) {
if !thread::panicking() {
if let Some(id) = self.id.take() {
self.context
.command_buffer_drop(&id, self.data.take().unwrap().as_ref());
if let Some(data) = self.data.take() {
self.context.command_buffer_drop(data.as_ref());
}
}
}

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Encodes a series of GPU operations.
@ -15,7 +15,6 @@ use crate::*;
#[derive(Debug)]
pub struct CommandEncoder {
pub(crate) context: Arc<C>,
pub(crate) id: Option<ObjectId>,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
@ -24,9 +23,7 @@ static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
impl Drop for CommandEncoder {
fn drop(&mut self) {
if !thread::panicking() {
if let Some(id) = self.id.take() {
self.context.command_encoder_drop(&id, self.data.as_ref());
}
self.context.command_encoder_drop(self.data.as_ref());
}
}
}
@ -71,14 +68,9 @@ static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync);
impl CommandEncoder {
/// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution.
pub fn finish(mut self) -> CommandBuffer {
let (id, data) = DynContext::command_encoder_finish(
&*self.context,
self.id.take().unwrap(),
self.data.as_mut(),
);
let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut());
CommandBuffer {
context: Arc::clone(&self.context),
id: Some(id),
data: Some(data),
}
}
@ -97,16 +89,10 @@ impl CommandEncoder {
&'encoder mut self,
desc: &RenderPassDescriptor<'_>,
) -> RenderPass<'encoder> {
let id = self.id.as_ref().unwrap();
let (id, data) = DynContext::command_encoder_begin_render_pass(
&*self.context,
id,
self.data.as_ref(),
desc,
);
let data =
DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc);
RenderPass {
inner: RenderPassInner {
id,
data,
context: self.context.clone(),
},
@ -128,16 +114,13 @@ impl CommandEncoder {
&'encoder mut self,
desc: &ComputePassDescriptor<'_>,
) -> ComputePass<'encoder> {
let id = self.id.as_ref().unwrap();
let (id, data) = DynContext::command_encoder_begin_compute_pass(
let data = DynContext::command_encoder_begin_compute_pass(
&*self.context,
id,
self.data.as_ref(),
desc,
);
ComputePass {
inner: ComputePassInner {
id,
data,
context: self.context.clone(),
},
@ -162,12 +145,9 @@ impl CommandEncoder {
) {
DynContext::command_encoder_copy_buffer_to_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
&source.id,
source.data.as_ref(),
source_offset,
&destination.id,
destination.data.as_ref(),
destination_offset,
copy_size,
@ -183,7 +163,6 @@ impl CommandEncoder {
) {
DynContext::command_encoder_copy_buffer_to_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
@ -200,7 +179,6 @@ impl CommandEncoder {
) {
DynContext::command_encoder_copy_texture_to_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
@ -223,7 +201,6 @@ impl CommandEncoder {
) {
DynContext::command_encoder_copy_texture_to_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
@ -247,9 +224,8 @@ impl CommandEncoder {
pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
DynContext::command_encoder_clear_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
texture,
texture.data.as_ref(),
subresource_range,
);
}
@ -268,9 +244,8 @@ impl CommandEncoder {
) {
DynContext::command_encoder_clear_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
buffer,
buffer.data.as_ref(),
offset,
size,
);
@ -278,25 +253,17 @@ impl CommandEncoder {
/// Inserts debug marker.
pub fn insert_debug_marker(&mut self, label: &str) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_insert_debug_marker(
&*self.context,
id,
self.data.as_ref(),
label,
);
DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label);
}
/// Start record commands and group it into debug marker group.
pub fn push_debug_group(&mut self, label: &str) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label);
DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label);
}
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref());
DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref());
}
/// Resolves a query set, writing the results into the supplied destination buffer.
@ -312,13 +279,10 @@ impl CommandEncoder {
) {
DynContext::command_encoder_resolve_query_set(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
&query_set.id,
query_set.data.as_ref(),
query_range.start,
query_range.end - query_range.start,
&destination.id,
destination.data.as_ref(),
destination_offset,
)
@ -341,14 +305,12 @@ impl CommandEncoder {
&mut self,
hal_command_encoder_callback: F,
) -> Option<R> {
use wgc::id::CommandEncoderId;
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.command_encoder_as_hal_mut::<A, F, R>(
CommandEncoderId::from(self.id.unwrap()),
crate::context::downcast_ref(&self.data),
hal_command_encoder_callback,
)
})
@ -372,9 +334,7 @@ impl CommandEncoder {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::command_encoder_write_timestamp(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, sync::Arc, thread};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// In-progress recording of a compute pass.
@ -53,10 +53,8 @@ impl<'encoder> ComputePass<'encoder> {
) {
DynContext::compute_pass_set_bind_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
);
@ -66,9 +64,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
DynContext::compute_pass_set_pipeline(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
);
}
@ -77,7 +73,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::compute_pass_insert_debug_marker(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
label,
);
@ -87,7 +82,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn push_debug_group(&mut self, label: &str) {
DynContext::compute_pass_push_debug_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
label,
);
@ -95,11 +89,7 @@ impl<'encoder> ComputePass<'encoder> {
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
DynContext::compute_pass_pop_debug_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
}
/// Dispatches compute work operations.
@ -108,7 +98,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
DynContext::compute_pass_dispatch_workgroups(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
x,
y,
@ -126,9 +115,7 @@ impl<'encoder> ComputePass<'encoder> {
) {
DynContext::compute_pass_dispatch_workgroups_indirect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
@ -148,7 +135,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
DynContext::compute_pass_set_push_constants(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
offset,
data,
@ -167,9 +153,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_write_timestamp(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)
@ -183,9 +167,7 @@ impl<'encoder> ComputePass<'encoder> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_begin_pipeline_statistics_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
);
@ -196,7 +178,6 @@ impl<'encoder> ComputePass<'encoder> {
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::compute_pass_end_pipeline_statistics_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
}
@ -204,7 +185,6 @@ impl<'encoder> ComputePass<'encoder> {
#[derive(Debug)]
pub(crate) struct ComputePassInner {
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>,
}
@ -212,8 +192,7 @@ pub(crate) struct ComputePassInner {
impl Drop for ComputePassInner {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.compute_pass_end(&mut self.id, self.data.as_mut());
self.context.compute_pass_end(self.data.as_mut());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a compute pipeline.
@ -12,38 +11,28 @@ use crate::*;
#[derive(Debug)]
pub struct ComputePipeline {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
impl ComputePipeline {
/// Returns a globally-unique identifier for this `ComputePipeline`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
super::impl_partialeq_eq_hash!(ComputePipeline);
impl ComputePipeline {
/// Get an object representing the bind group layout at a given index.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let (id, data) = self.context.compute_pipeline_get_bind_group_layout(
&self.id,
self.data.as_ref(),
index,
);
BindGroupLayout { context, id, data }
let data = self
.context
.compute_pipeline_get_bind_group_layout(self.data.as_ref(), index);
BindGroupLayout { context, data }
}
}
impl Drop for ComputePipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.compute_pipeline_drop(&self.id, self.data.as_ref());
self.context.compute_pipeline_drop(self.data.as_ref());
}
}
}

View File

@ -2,7 +2,7 @@ use std::{error, fmt, future::Future, sync::Arc, thread};
use parking_lot::Mutex;
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Open connection to a graphics and/or compute device.
@ -16,7 +16,6 @@ use crate::*;
#[derive(Debug)]
pub struct Device {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
@ -32,14 +31,6 @@ pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
impl Device {
/// Returns a globally-unique identifier for this `Device`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed.
///
/// Return `true` if the queue is empty, or `false` if there are more queue
@ -50,7 +41,7 @@ impl Device {
///
/// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
pub fn poll(&self, maintain: Maintain) -> MaintainResult {
DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain)
DynContext::device_poll(&*self.context, self.data.as_ref(), maintain)
}
/// The features which can be used on this device.
@ -58,7 +49,7 @@ impl Device {
/// No additional features can be used, even if the underlying adapter can support them.
#[must_use]
pub fn features(&self) -> Features {
DynContext::device_features(&*self.context, &self.id, self.data.as_ref())
DynContext::device_features(&*self.context, self.data.as_ref())
}
/// The limits which can be used on this device.
@ -66,7 +57,7 @@ impl Device {
/// No better limits can be used, even if the underlying adapter can support them.
#[must_use]
pub fn limits(&self) -> Limits {
DynContext::device_limits(&*self.context, &self.id, self.data.as_ref())
DynContext::device_limits(&*self.context, self.data.as_ref())
}
/// Creates a shader module from either SPIR-V or WGSL source code.
@ -85,16 +76,14 @@ impl Device {
/// </div>
#[must_use]
pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module(
let data = DynContext::device_create_shader_module(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
wgt::ShaderBoundChecks::new(),
);
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -114,16 +103,14 @@ impl Device {
&self,
desc: ShaderModuleDescriptor<'_>,
) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module(
let data = DynContext::device_create_shader_module(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
unsafe { wgt::ShaderBoundChecks::unchecked() },
);
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -141,17 +128,11 @@ impl Device {
&self,
desc: &ShaderModuleDescriptorSpirV<'_>,
) -> ShaderModule {
let (id, data) = unsafe {
DynContext::device_create_shader_module_spirv(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
)
let data = unsafe {
DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc)
};
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -159,15 +140,10 @@ impl Device {
/// Creates an empty [`CommandEncoder`].
#[must_use]
pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
let (id, data) = DynContext::device_create_command_encoder(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data =
DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc);
CommandEncoder {
context: Arc::clone(&self.context),
id: Some(id),
data,
}
}
@ -178,15 +154,13 @@ impl Device {
&self,
desc: &RenderBundleEncoderDescriptor<'_>,
) -> RenderBundleEncoder<'_> {
let (id, data) = DynContext::device_create_render_bundle_encoder(
let data = DynContext::device_create_render_bundle_encoder(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
RenderBundleEncoder {
context: Arc::clone(&self.context),
id,
data,
parent: self,
_p: Default::default(),
@ -196,15 +170,9 @@ impl Device {
/// Creates a new [`BindGroup`].
#[must_use]
pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
let (id, data) = DynContext::device_create_bind_group(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc);
BindGroup {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -215,15 +183,10 @@ impl Device {
&self,
desc: &BindGroupLayoutDescriptor<'_>,
) -> BindGroupLayout {
let (id, data) = DynContext::device_create_bind_group_layout(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data =
DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc);
BindGroupLayout {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -231,15 +194,10 @@ impl Device {
/// Creates a [`PipelineLayout`].
#[must_use]
pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
let (id, data) = DynContext::device_create_pipeline_layout(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data =
DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc);
PipelineLayout {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -247,15 +205,10 @@ impl Device {
/// Creates a [`RenderPipeline`].
#[must_use]
pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
let (id, data) = DynContext::device_create_render_pipeline(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data =
DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc);
RenderPipeline {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -263,15 +216,10 @@ impl Device {
/// Creates a [`ComputePipeline`].
#[must_use]
pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
let (id, data) = DynContext::device_create_compute_pipeline(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
let data =
DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc);
ComputePipeline {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -284,12 +232,10 @@ impl Device {
map_context.initial_range = 0..desc.size;
}
let (id, data) =
DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc);
let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc);
Buffer {
context: Arc::clone(&self.context),
id,
data,
map_context: Mutex::new(map_context),
size: desc.size,
@ -302,11 +248,9 @@ impl Device {
/// `desc` specifies the general format of the texture.
#[must_use]
pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
let (id, data) =
DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc);
let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc);
Texture {
context: Arc::clone(&self.context),
id,
data,
owned: true,
descriptor: TextureDescriptor {
@ -340,13 +284,12 @@ impl Device {
.unwrap()
.create_texture_from_hal::<A>(
hal_texture,
self.data.as_ref().downcast_ref().unwrap(),
crate::context::downcast_ref(&self.data),
desc,
)
};
Texture {
context: Arc::clone(&self.context),
id: ObjectId::from(texture.id()),
data: Box::new(texture),
owned: true,
descriptor: TextureDescriptor {
@ -376,7 +319,7 @@ impl Device {
map_context.initial_range = 0..desc.size;
}
let (id, buffer) = unsafe {
let buffer = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
@ -385,14 +328,13 @@ impl Device {
.unwrap()
.create_buffer_from_hal::<A>(
hal_buffer,
self.data.as_ref().downcast_ref().unwrap(),
crate::context::downcast_ref(&self.data),
desc,
)
};
Buffer {
context: Arc::clone(&self.context),
id: ObjectId::from(id),
data: Box::new(buffer),
map_context: Mutex::new(map_context),
size: desc.size,
@ -405,11 +347,9 @@ impl Device {
/// `desc` specifies the behavior of the sampler.
#[must_use]
pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
let (id, data) =
DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc);
let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc);
Sampler {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -417,11 +357,9 @@ impl Device {
/// Creates a new [`QuerySet`].
#[must_use]
pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
let (id, data) =
DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc);
let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc);
QuerySet {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -429,29 +367,28 @@ impl Device {
/// Set a callback for errors that are not handled in error scopes.
pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
self.context
.device_on_uncaptured_error(&self.id, self.data.as_ref(), handler);
.device_on_uncaptured_error(self.data.as_ref(), handler);
}
/// Push an error scope.
pub fn push_error_scope(&self, filter: ErrorFilter) {
self.context
.device_push_error_scope(&self.id, self.data.as_ref(), filter);
.device_push_error_scope(self.data.as_ref(), filter);
}
/// Pop an error scope.
pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
self.context
.device_pop_error_scope(&self.id, self.data.as_ref())
self.context.device_pop_error_scope(self.data.as_ref())
}
/// Starts frame capture.
pub fn start_capture(&self) {
DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref())
DynContext::device_start_capture(&*self.context, self.data.as_ref())
}
/// Stops frame capture.
pub fn stop_capture(&self) {
DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref())
DynContext::device_stop_capture(&*self.context, self.data.as_ref())
}
/// Query internal counters from the native backend for debugging purposes.
@ -462,7 +399,7 @@ impl Device {
/// If a counter is not set, its contains its default value (zero).
#[must_use]
pub fn get_internal_counters(&self) -> wgt::InternalCounters {
DynContext::device_get_internal_counters(&*self.context, &self.id, self.data.as_ref())
DynContext::device_get_internal_counters(&*self.context, self.data.as_ref())
}
/// Generate an GPU memory allocation report if the underlying backend supports it.
@ -472,7 +409,7 @@ impl Device {
/// for example as a workaround for driver issues.
#[must_use]
pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
DynContext::generate_allocator_report(&*self.context, &self.id, self.data.as_ref())
DynContext::generate_allocator_report(&*self.context, self.data.as_ref())
}
/// Apply a callback to this `Device`'s underlying backend device.
@ -504,7 +441,7 @@ impl Device {
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.device_as_hal::<A, F, R>(
self.data.as_ref().downcast_ref().unwrap(),
crate::context::downcast_ref(&self.data),
hal_device_callback,
)
})
@ -512,7 +449,7 @@ impl Device {
/// Destroy this device.
pub fn destroy(&self) {
DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref())
DynContext::device_destroy(&*self.context, self.data.as_ref())
}
/// Set a DeviceLostCallback on this device.
@ -522,7 +459,6 @@ impl Device {
) {
DynContext::device_set_device_lost_callback(
&*self.context,
&self.id,
self.data.as_ref(),
Box::new(callback),
)
@ -531,7 +467,7 @@ impl Device {
/// Test-only function to make this device invalid.
#[doc(hidden)]
pub fn make_invalid(&self) {
DynContext::device_make_invalid(&*self.context, &self.id, self.data.as_ref())
DynContext::device_make_invalid(&*self.context, self.data.as_ref())
}
/// Create a [`PipelineCache`] with initial data
@ -576,17 +512,11 @@ impl Device {
&self,
desc: &PipelineCacheDescriptor<'_>,
) -> PipelineCache {
let (id, data) = unsafe {
DynContext::device_create_pipeline_cache(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
)
let data = unsafe {
DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc)
};
PipelineCache {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -595,7 +525,7 @@ impl Device {
impl Drop for Device {
fn drop(&mut self) {
if !thread::panicking() {
self.context.device_drop(&self.id, self.data.as_ref());
self.context.device_drop(self.data.as_ref());
}
}
}

View File

@ -1,67 +0,0 @@
use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64};
use crate::context::ObjectId;
/// Opaque globally-unique identifier
#[repr(transparent)]
pub struct Id<T>(NonZeroU64, PhantomData<*mut T>);
impl<T> Id<T> {
/// Create a new `Id` from a ObjectID.
pub(crate) fn new(id: ObjectId) -> Self {
Id(id.global_id(), PhantomData)
}
/// For testing use only. We provide no guarantees about the actual value of the ids.
#[doc(hidden)]
pub fn inner(&self) -> u64 {
self.0.get()
}
}
// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids
// returned for different types , so `Id` can safely implement Send and Sync.
unsafe impl<T> Send for Id<T> {}
// SAFETY: See the implementation for `Send`.
unsafe impl<T> Sync for Id<T> {}
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Id<T> {}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Id").field(&self.0).finish()
}
}
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Id<T>) -> bool {
self.0 == other.0
}
}
impl<T> Eq for Id<T> {}
impl<T> PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Id<T>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Id<T> {
fn cmp(&self, other: &Id<T>) -> Ordering {
self.0.cmp(&other.0)
}
}
impl<T> std::hash::Hash for Id<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}

View File

@ -202,8 +202,6 @@ impl Instance {
/// - `backends` - Backends from which to enumerate adapters.
#[cfg(native)]
pub fn enumerate_adapters(&self, backends: Backends) -> Vec<Adapter> {
use crate::context::ObjectId;
let context = Arc::clone(&self.context);
self.context
.as_any()
@ -211,10 +209,9 @@ impl Instance {
.map(|ctx| {
ctx.enumerate_adapters(backends)
.into_iter()
.map(move |id| crate::Adapter {
.map(move |adapter| crate::Adapter {
context: Arc::clone(&context),
id: ObjectId::from(id),
data: Box::new(()),
data: Box::new(adapter),
})
.collect()
})
@ -234,11 +231,7 @@ impl Instance {
) -> impl Future<Output = Option<Adapter>> + WasmNotSend {
let context = Arc::clone(&self.context);
let adapter = self.context.instance_request_adapter(options);
async move {
adapter
.await
.map(|(id, data)| Adapter { context, id, data })
}
async move { adapter.await.map(|data| Adapter { context, data }) }
}
/// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`].
@ -252,18 +245,16 @@ impl Instance {
hal_adapter: hal::ExposedAdapter<A>,
) -> Adapter {
let context = Arc::clone(&self.context);
let id = unsafe {
let adapter = unsafe {
context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
.unwrap()
.create_adapter_from_hal(hal_adapter)
.into()
};
Adapter {
context,
id,
data: Box::new(()),
data: Box::new(adapter),
}
}
@ -355,12 +346,11 @@ impl Instance {
&self,
target: SurfaceTargetUnsafe,
) -> Result<Surface<'window>, CreateSurfaceError> {
let (id, data) = unsafe { self.context.instance_create_surface(target) }?;
let data = unsafe { self.context.instance_create_surface(target) }?;
Ok(Surface {
context: Arc::clone(&self.context),
_handle_source: None,
id,
surface_data: data,
config: Mutex::new(None),
})

View File

@ -32,7 +32,6 @@ mod common_pipeline;
mod compute_pass;
mod compute_pipeline;
mod device;
mod id;
mod instance;
mod pipeline_cache;
mod pipeline_layout;
@ -59,7 +58,6 @@ pub use common_pipeline::*;
pub use compute_pass::*;
pub use compute_pipeline::*;
pub use device::*;
pub use id::*;
pub use instance::*;
pub use pipeline_cache::*;
pub use pipeline_layout::*;
@ -78,3 +76,35 @@ pub use texture_view::*;
/// Object debugging label.
pub type Label<'a> = Option<&'a str>;
macro_rules! impl_partialeq_eq_hash {
($ty:ty) => {
impl PartialEq for $ty {
fn eq(&self, other: &Self) -> bool {
std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref())
}
}
impl Eq for $ty {}
impl std::hash::Hash for $ty {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let ptr = self.data.as_ref() as *const Data as *const ();
ptr.hash(state);
}
}
impl PartialOrd for $ty {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for $ty {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let a = self.data.as_ref() as *const Data as *const ();
let b = other.data.as_ref() as *const Data as *const ();
a.cmp(&b)
}
}
};
}
pub(crate) use impl_partialeq_eq_hash;

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a pipeline cache, which is used to accelerate
@ -68,7 +67,6 @@ use crate::*;
#[derive(Debug)]
pub struct PipelineCache {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
@ -83,16 +81,14 @@ impl PipelineCache {
///
/// This function is unique to the Rust API of `wgpu`.
pub fn get_data(&self) -> Option<Vec<u8>> {
self.context
.pipeline_cache_get_data(&self.id, self.data.as_ref())
self.context.pipeline_cache_get_data(self.data.as_ref())
}
}
impl Drop for PipelineCache {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.pipeline_cache_drop(&self.id, self.data.as_ref());
self.context.pipeline_cache_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a pipeline layout.
@ -12,27 +11,17 @@ use crate::*;
#[derive(Debug)]
pub struct PipelineLayout {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
impl PipelineLayout {
/// Returns a globally-unique identifier for this `PipelineLayout`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(PipelineLayout);
impl Drop for PipelineLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.pipeline_layout_drop(&self.id, self.data.as_ref());
self.context.pipeline_layout_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a query set.
@ -11,27 +10,18 @@ use crate::*;
#[derive(Debug)]
pub struct QuerySet {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
#[cfg(send_sync)]
static_assertions::assert_impl_all!(QuerySet: Send, Sync);
impl QuerySet {
/// Returns a globally-unique identifier for this `QuerySet`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(QuerySet);
impl Drop for QuerySet {
fn drop(&mut self) {
if !thread::panicking() {
self.context.query_set_drop(&self.id, self.data.as_ref());
self.context.query_set_drop(self.data.as_ref());
}
}
}

View File

@ -4,7 +4,7 @@ use std::{
thread,
};
use crate::context::{DynContext, ObjectId, QueueWriteBuffer};
use crate::context::{DynContext, QueueWriteBuffer};
use crate::*;
/// Handle to a command queue on a device.
@ -17,7 +17,6 @@ use crate::*;
#[derive(Debug)]
pub struct Queue {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
@ -26,7 +25,7 @@ static_assertions::assert_impl_all!(Queue: Send, Sync);
impl Drop for Queue {
fn drop(&mut self) {
if !thread::panicking() {
self.context.queue_drop(&self.id, self.data.as_ref());
self.context.queue_drop(self.data.as_ref());
}
}
}
@ -87,9 +86,7 @@ impl<'a> Drop for QueueWriteBufferView<'a> {
fn drop(&mut self) {
DynContext::queue_write_staging_buffer(
&*self.queue.context,
&self.queue.id,
self.queue.data.as_ref(),
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset,
&*self.inner,
@ -121,9 +118,7 @@ impl Queue {
pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
DynContext::queue_write_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(),
offset,
data,
@ -168,19 +163,13 @@ impl Queue {
profiling::scope!("Queue::write_buffer_with");
DynContext::queue_validate_write_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(),
offset,
size,
)?;
let staging_buffer = DynContext::queue_create_staging_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
size,
)?;
let staging_buffer =
DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?;
Some(QueueWriteBufferView {
queue: self,
buffer,
@ -222,7 +211,6 @@ impl Queue {
) {
DynContext::queue_write_texture(
&*self.context,
&self.id,
self.data.as_ref(),
texture,
data,
@ -241,7 +229,6 @@ impl Queue {
) {
DynContext::queue_copy_external_image_to_texture(
&*self.context,
&self.id,
self.data.as_ref(),
source,
dest,
@ -256,14 +243,10 @@ impl Queue {
) -> SubmissionIndex {
let mut command_buffers = command_buffers
.into_iter()
.map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap()));
.map(|mut comb| comb.data.take().unwrap());
let data = DynContext::queue_submit(
&*self.context,
&self.id,
self.data.as_ref(),
&mut command_buffers,
);
let data =
DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers);
SubmissionIndex(data)
}
@ -275,7 +258,7 @@ impl Queue {
/// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
/// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
pub fn get_timestamp_period(&self) -> f32 {
DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref())
DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref())
}
/// Registers a callback when the previous call to submit finishes running on the gpu. This callback
@ -292,7 +275,6 @@ impl Queue {
pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
DynContext::queue_on_submitted_work_done(
&*self.context,
&self.id,
self.data.as_ref(),
Box::new(callback),
)

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Pre-prepared reusable bundle of GPU operations.
@ -15,27 +14,17 @@ use crate::*;
#[derive(Debug)]
pub struct RenderBundle {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
impl RenderBundle {
/// Returns a globally-unique identifier for this `RenderBundle`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(RenderBundle);
impl Drop for RenderBundle {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.render_bundle_drop(&self.id, self.data.as_ref());
self.context.render_bundle_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,6 @@
use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Encodes a series of GPU operations into a reusable "render bundle".
@ -17,7 +17,6 @@ use crate::*;
#[derive(Debug)]
pub struct RenderBundleEncoder<'a> {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
pub(crate) parent: &'a Device,
/// This type should be !Send !Sync, because it represents an allocation on this thread's
@ -53,11 +52,9 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn
impl<'a> RenderBundleEncoder<'a> {
/// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes.
pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle {
let (id, data) =
DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc);
let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc);
RenderBundle {
context: Arc::clone(&self.context),
id,
data,
}
}
@ -74,10 +71,8 @@ impl<'a> RenderBundleEncoder<'a> {
) {
DynContext::render_bundle_encoder_set_bind_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
)
@ -89,9 +84,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
DynContext::render_bundle_encoder_set_pipeline(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
)
}
@ -103,9 +96,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
DynContext::render_bundle_encoder_set_index_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
index_format,
buffer_slice.offset,
@ -126,10 +117,8 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
DynContext::render_bundle_encoder_set_vertex_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
buffer_slice.offset,
buffer_slice.size,
@ -157,7 +146,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
vertices,
instances,
@ -188,7 +176,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw_indexed(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
indices,
base_vertex,
@ -204,9 +191,7 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
DynContext::render_bundle_encoder_draw_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
@ -226,9 +211,7 @@ impl<'a> RenderBundleEncoder<'a> {
) {
DynContext::render_bundle_encoder_draw_indexed_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
@ -268,7 +251,6 @@ impl<'a> RenderBundleEncoder<'a> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_bundle_encoder_set_push_constants(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
stages,
offset,

View File

@ -1,11 +1,10 @@
use std::{marker::PhantomData, ops::Range, sync::Arc, thread};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
#[derive(Debug)]
pub(crate) struct RenderPassInner {
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
pub(crate) context: Arc<C>,
}
@ -13,8 +12,7 @@ pub(crate) struct RenderPassInner {
impl Drop for RenderPassInner {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.render_pass_end(&mut self.id, self.data.as_mut());
self.context.render_pass_end(self.data.as_mut());
}
}
}
@ -84,10 +82,8 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_set_bind_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
)
@ -99,9 +95,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) {
DynContext::render_pass_set_pipeline(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
)
}
@ -114,7 +108,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_blend_constant(&mut self, color: Color) {
DynContext::render_pass_set_blend_constant(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
color,
)
@ -127,9 +120,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) {
DynContext::render_pass_set_index_buffer(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
index_format,
buffer_slice.offset,
@ -150,10 +141,8 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) {
DynContext::render_pass_set_vertex_buffer(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
buffer_slice.offset,
buffer_slice.size,
@ -172,7 +161,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
DynContext::render_pass_set_scissor_rect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
x,
y,
@ -190,7 +178,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
DynContext::render_pass_set_viewport(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
x,
y,
@ -208,7 +195,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_stencil_reference(&mut self, reference: u32) {
DynContext::render_pass_set_stencil_reference(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
reference,
);
@ -218,7 +204,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::render_pass_insert_debug_marker(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
label,
);
@ -228,7 +213,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn push_debug_group(&mut self, label: &str) {
DynContext::render_pass_push_debug_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
label,
);
@ -236,11 +220,7 @@ impl<'encoder> RenderPass<'encoder> {
/// Stops command recording and creates debug group.
pub fn pop_debug_group(&mut self) {
DynContext::render_pass_pop_debug_group(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut());
}
/// Draws primitives from the active vertex buffer(s).
@ -267,7 +247,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_pass_draw(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
vertices,
instances,
@ -301,7 +280,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_pass_draw_indexed(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
indices,
base_vertex,
@ -325,9 +303,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) {
DynContext::render_pass_draw_indirect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
@ -354,9 +330,7 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_draw_indexed_indirect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
@ -371,13 +345,10 @@ impl<'encoder> RenderPass<'encoder> {
&mut self,
render_bundles: I,
) {
let mut render_bundles = render_bundles
.into_iter()
.map(|rb| (&rb.id, rb.data.as_ref()));
let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref());
DynContext::render_pass_execute_bundles(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&mut render_bundles,
)
@ -404,9 +375,7 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_multi_draw_indirect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
count,
@ -432,9 +401,7 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_multi_draw_indexed_indirect(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
count,
@ -476,12 +443,9 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_multi_draw_indirect_count(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(),
count_offset,
max_count,
@ -523,12 +487,9 @@ impl<'encoder> RenderPass<'encoder> {
) {
DynContext::render_pass_multi_draw_indexed_indirect_count(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(),
count_offset,
max_count,
@ -581,7 +542,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_pass_set_push_constants(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
stages,
offset,
@ -602,9 +562,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_write_timestamp(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)
@ -617,7 +575,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn begin_occlusion_query(&mut self, query_index: u32) {
DynContext::render_pass_begin_occlusion_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
query_index,
);
@ -626,11 +583,7 @@ impl<'encoder> RenderPass<'encoder> {
/// End the occlusion query on this render pass. It can be started with
/// `begin_occlusion_query`. Occlusion queries may not be nested.
pub fn end_occlusion_query(&mut self) {
DynContext::render_pass_end_occlusion_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut());
}
}
@ -641,9 +594,7 @@ impl<'encoder> RenderPass<'encoder> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_begin_pipeline_statistics_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
);
@ -654,7 +605,6 @@ impl<'encoder> RenderPass<'encoder> {
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::render_pass_end_pipeline_statistics_query(
&*self.inner.context,
&mut self.inner.id,
self.inner.data.as_mut(),
);
}

View File

@ -1,6 +1,5 @@
use std::{num::NonZeroU32, sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a rendering (graphics) pipeline.
@ -12,37 +11,29 @@ use crate::*;
#[derive(Debug)]
pub struct RenderPipeline {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
super::impl_partialeq_eq_hash!(RenderPipeline);
impl Drop for RenderPipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.render_pipeline_drop(&self.id, self.data.as_ref());
self.context.render_pipeline_drop(self.data.as_ref());
}
}
}
impl RenderPipeline {
/// Returns a globally-unique identifier for this `RenderPipeline`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Get an object representing the bind group layout at a given index.
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let (id, data) =
self.context
.render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index);
BindGroupLayout { context, id, data }
let data = self
.context
.render_pipeline_get_bind_group_layout(self.data.as_ref(), index);
BindGroupLayout { context, data }
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a sampler.
@ -15,26 +14,17 @@ use crate::*;
#[derive(Debug)]
pub struct Sampler {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Sampler: Send, Sync);
impl Sampler {
/// Returns a globally-unique identifier for this `Sampler`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
}
super::impl_partialeq_eq_hash!(Sampler);
impl Drop for Sampler {
fn drop(&mut self) {
if !thread::panicking() {
self.context.sampler_drop(&self.id, self.data.as_ref());
self.context.sampler_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a compiled shader module.
@ -14,34 +13,25 @@ use crate::*;
#[derive(Debug)]
pub struct ShaderModule {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
super::impl_partialeq_eq_hash!(ShaderModule);
impl Drop for ShaderModule {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.shader_module_drop(&self.id, self.data.as_ref());
self.context.shader_module_drop(self.data.as_ref());
}
}
}
impl ShaderModule {
/// Returns a globally-unique identifier for this `ShaderModule`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
/// Get the compilation info for the shader module.
pub fn get_compilation_info(&self) -> impl Future<Output = CompilationInfo> + WasmNotSend {
self.context
.shader_get_compilation_info(&self.id, self.data.as_ref())
self.context.shader_get_compilation_info(self.data.as_ref())
}
}

View File

@ -3,7 +3,7 @@ use std::{error, fmt, sync::Arc, thread};
use parking_lot::Mutex;
use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Describes a [`Surface`].
@ -32,9 +32,6 @@ pub struct Surface<'window> {
/// would become invalid when the window is dropped.
pub(crate) _handle_source: Option<Box<dyn WindowHandle + 'window>>,
/// Wgpu-core surface id.
pub(crate) id: ObjectId,
/// Additional surface data returned by [`DynContext::instance_create_surface`].
pub(crate) surface_data: Box<Data>,
@ -48,23 +45,13 @@ pub struct Surface<'window> {
}
impl Surface<'_> {
/// Returns a globally-unique identifier for this `Surface`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Surface<'_>> {
Id::new(self.id)
}
/// Returns the capabilities of the surface when used with the given adapter.
///
/// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter.
pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
DynContext::surface_get_capabilities(
&*self.context,
&self.id,
self.surface_data.as_ref(),
&adapter.id,
adapter.data.as_ref(),
)
}
@ -101,9 +88,7 @@ impl Surface<'_> {
pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
DynContext::surface_configure(
&*self.context,
&self.id,
self.surface_data.as_ref(),
&device.id,
device.data.as_ref(),
config,
);
@ -121,11 +106,8 @@ impl Surface<'_> {
/// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated,
/// recreating the swapchain will panic.
pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
let (texture_id, texture_data, status, detail) = DynContext::surface_get_current_texture(
&*self.context,
&self.id,
self.surface_data.as_ref(),
);
let (texture_data, status, detail) =
DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref());
let suboptimal = match status {
SurfaceStatus::Good => false,
@ -155,12 +137,10 @@ impl Surface<'_> {
view_formats: &[],
};
texture_id
.zip(texture_data)
.map(|(id, data)| SurfaceTexture {
texture_data
.map(|data| SurfaceTexture {
texture: Texture {
context: Arc::clone(&self.context),
id,
data,
owned: false,
descriptor,
@ -188,7 +168,7 @@ impl Surface<'_> {
.downcast_ref::<crate::backend::ContextWgpuCore>()
.map(|ctx| unsafe {
ctx.surface_as_hal::<A, F, R>(
self.surface_data.downcast_ref().unwrap(),
crate::context::downcast_ref(&self.surface_data),
hal_surface_callback,
)
})
@ -209,7 +189,6 @@ impl<'window> fmt::Debug for Surface<'window> {
"None"
},
)
.field("id", &self.id)
.field("data", &self.surface_data)
.field("config", &self.config)
.finish()
@ -222,8 +201,7 @@ static_assertions::assert_impl_all!(Surface<'_>: Send, Sync);
impl Drop for Surface<'_> {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.surface_drop(&self.id, self.surface_data.as_ref())
self.context.surface_drop(self.surface_data.as_ref())
}
}
}

View File

@ -1,6 +1,6 @@
use std::{sync::Arc, thread};
use crate::context::{DynContext, ObjectId};
use crate::context::DynContext;
use crate::*;
/// Handle to a texture on the GPU.
@ -11,7 +11,6 @@ use crate::*;
#[derive(Debug)]
pub struct Texture {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
pub(crate) owned: bool,
pub(crate) descriptor: TextureDescriptor<'static>,
@ -19,15 +18,9 @@ pub struct Texture {
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Texture: Send, Sync);
impl Texture {
/// Returns a globally-unique identifier for this `Texture`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
super::impl_partialeq_eq_hash!(Texture);
impl Texture {
/// Returns the inner hal Texture using a callback. The hal texture will be `None` if the
/// backend type argument does not match with this wgpu Texture
///
@ -39,14 +32,17 @@ impl Texture {
&self,
hal_texture_callback: F,
) -> R {
let texture = self.data.as_ref().downcast_ref().unwrap();
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
unsafe { ctx.texture_as_hal::<A, F, R>(texture, hal_texture_callback) }
unsafe {
ctx.texture_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_texture_callback,
)
}
} else {
hal_texture_callback(None)
}
@ -54,18 +50,16 @@ impl Texture {
/// Creates a view of this texture.
pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView {
let (id, data) =
DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc);
let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc);
TextureView {
context: Arc::clone(&self.context),
id,
data,
}
}
/// Destroy the associated native resources as soon as possible.
pub fn destroy(&self) {
DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref());
DynContext::texture_destroy(&*self.context, self.data.as_ref());
}
/// Make an `ImageCopyTexture` representing the whole texture.
@ -145,7 +139,7 @@ impl Texture {
impl Drop for Texture {
fn drop(&mut self) {
if self.owned && !thread::panicking() {
self.context.texture_drop(&self.id, self.data.as_ref());
self.context.texture_drop(self.data.as_ref());
}
}
}

View File

@ -1,6 +1,5 @@
use std::{sync::Arc, thread};
use crate::context::ObjectId;
use crate::*;
/// Handle to a texture view.
@ -12,21 +11,14 @@ use crate::*;
#[derive(Debug)]
pub struct TextureView {
pub(crate) context: Arc<C>,
pub(crate) id: ObjectId,
pub(crate) data: Box<Data>,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(TextureView: Send, Sync);
impl TextureView {
/// Returns a globally-unique identifier for this `TextureView`.
///
/// Calling this method multiple times on the same object will always return the same value.
/// The returned value is guaranteed to be different for all resources created from the same `Instance`.
pub fn global_id(&self) -> Id<Self> {
Id::new(self.id)
}
super::impl_partialeq_eq_hash!(TextureView);
impl TextureView {
/// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the
/// backend type argument does not match with this wgpu Texture
///
@ -38,17 +30,16 @@ impl TextureView {
&self,
hal_texture_view_callback: F,
) -> R {
use wgc::id::TextureViewId;
let texture_view_id = TextureViewId::from(self.id);
if let Some(ctx) = self
.context
.as_any()
.downcast_ref::<crate::backend::ContextWgpuCore>()
{
unsafe {
ctx.texture_view_as_hal::<A, F, R>(texture_view_id, hal_texture_view_callback)
ctx.texture_view_as_hal::<A, F, R>(
crate::context::downcast_ref(&self.data),
hal_texture_view_callback,
)
}
} else {
hal_texture_view_callback(None)
@ -59,7 +50,7 @@ impl TextureView {
impl Drop for TextureView {
fn drop(&mut self) {
if !thread::panicking() {
self.context.texture_view_drop(&self.id, self.data.as_ref());
self.context.texture_view_drop(self.data.as_ref());
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -125,7 +125,6 @@ impl DownloadBuffer {
let mapped_range = crate::context::DynContext::buffer_get_mapped_range(
&*download.context,
&download.id,
download.data.as_ref(),
0..size,
);