mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-22 14:55:05 +00:00
Merge #489
489: All the missing Gecko fixes r=grovesNL a=kvark Last auto-integration in #474 got borked, partially because of the original change being reverted in mozilla-central. This is the manual sync-up from m-c. Hopefully it doesn't break anything new we got here. Fixes #457 Fixes #463 Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
commit
557ed0674c
38
ffi/wgpu.h
38
ffi/wgpu.h
@ -255,14 +255,14 @@ typedef enum {
|
||||
WGPUVertexFormat_Int4 = 48,
|
||||
} WGPUVertexFormat;
|
||||
|
||||
typedef uint64_t WGPUId_Device_Dummy;
|
||||
|
||||
typedef WGPUId_Device_Dummy WGPUDeviceId;
|
||||
|
||||
typedef uint64_t WGPUId_Adapter_Dummy;
|
||||
|
||||
typedef WGPUId_Adapter_Dummy WGPUAdapterId;
|
||||
|
||||
typedef uint64_t WGPUId_Device_Dummy;
|
||||
|
||||
typedef WGPUId_Device_Dummy WGPUDeviceId;
|
||||
|
||||
typedef struct {
|
||||
bool anisotropic_filtering;
|
||||
} WGPUExtensions;
|
||||
@ -413,6 +413,8 @@ typedef WGPURawPass *WGPUComputePassId;
|
||||
|
||||
typedef const char *WGPURawString;
|
||||
|
||||
typedef uint32_t WGPUDynamicOffset;
|
||||
|
||||
typedef uint64_t WGPUId_ComputePipeline_Dummy;
|
||||
|
||||
typedef WGPUId_ComputePipeline_Dummy WGPUComputePipelineId;
|
||||
@ -706,6 +708,8 @@ typedef struct {
|
||||
uint32_t array_layer_count;
|
||||
} WGPUTextureViewDescriptor;
|
||||
|
||||
void wgpu_adapter_destroy(WGPUAdapterId adapter_id);
|
||||
|
||||
WGPUDeviceId wgpu_adapter_request_device(WGPUAdapterId adapter_id,
|
||||
const WGPUDeviceDescriptor *desc);
|
||||
|
||||
@ -729,6 +733,13 @@ void wgpu_buffer_unmap(WGPUBufferId buffer_id);
|
||||
|
||||
void wgpu_command_buffer_destroy(WGPUCommandBufferId command_buffer_id);
|
||||
|
||||
/**
|
||||
* # Safety
|
||||
*
|
||||
* This function is unsafe because improper use may lead to memory
|
||||
* problems. For example, a double-free may occur if the function is called
|
||||
* twice on the same raw pointer.
|
||||
*/
|
||||
WGPURawPass *wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id,
|
||||
const WGPUComputePassDescriptor *_desc);
|
||||
|
||||
@ -769,6 +780,8 @@ void wgpu_command_encoder_destroy(WGPUCommandEncoderId command_encoder_id);
|
||||
WGPUCommandBufferId wgpu_command_encoder_finish(WGPUCommandEncoderId encoder_id,
|
||||
const WGPUCommandBufferDescriptor *desc);
|
||||
|
||||
void wgpu_compute_pass_destroy(WGPURawPass *pass);
|
||||
|
||||
void wgpu_compute_pass_dispatch(WGPURawPass *pass,
|
||||
uint32_t groups_x,
|
||||
uint32_t groups_y,
|
||||
@ -778,15 +791,10 @@ void wgpu_compute_pass_dispatch_indirect(WGPURawPass *pass,
|
||||
WGPUBufferId buffer_id,
|
||||
WGPUBufferAddress offset);
|
||||
|
||||
/**
|
||||
* # Safety
|
||||
*
|
||||
* This function is unsafe because improper use may lead to memory
|
||||
* problems. For example, a double-free may occur if the function is called
|
||||
* twice on the same raw pointer.
|
||||
*/
|
||||
void wgpu_compute_pass_end_pass(WGPUComputePassId pass_id);
|
||||
|
||||
const uint8_t *wgpu_compute_pass_finish(WGPURawPass *pass, uintptr_t *length);
|
||||
|
||||
void wgpu_compute_pass_insert_debug_marker(WGPURawPass *_pass, WGPURawString _label);
|
||||
|
||||
void wgpu_compute_pass_pop_debug_group(WGPURawPass *_pass);
|
||||
@ -802,7 +810,7 @@ void wgpu_compute_pass_push_debug_group(WGPURawPass *_pass, WGPURawString _label
|
||||
void wgpu_compute_pass_set_bind_group(WGPURawPass *pass,
|
||||
uint32_t index,
|
||||
WGPUBindGroupId bind_group_id,
|
||||
const WGPUBufferAddress *offsets,
|
||||
const WGPUDynamicOffset *offsets,
|
||||
uintptr_t offset_length);
|
||||
|
||||
void wgpu_compute_pass_set_pipeline(WGPURawPass *pass, WGPUComputePipelineId pipeline_id);
|
||||
@ -874,6 +882,8 @@ void wgpu_queue_submit(WGPUQueueId queue_id,
|
||||
const WGPUCommandBufferId *command_buffers,
|
||||
uintptr_t command_buffers_length);
|
||||
|
||||
void wgpu_render_pass_destroy(WGPURawRenderPass *pass);
|
||||
|
||||
void wgpu_render_pass_draw(WGPURawRenderPass *pass,
|
||||
uint32_t vertex_count,
|
||||
uint32_t instance_count,
|
||||
@ -908,6 +918,8 @@ void wgpu_render_pass_execute_bundles(WGPURawRenderPass *_pass,
|
||||
const WGPURenderBundleId *_bundles,
|
||||
uintptr_t _bundles_length);
|
||||
|
||||
const uint8_t *wgpu_render_pass_finish(WGPURawRenderPass *pass, uintptr_t *length);
|
||||
|
||||
void wgpu_render_pass_insert_debug_marker(WGPURawRenderPass *_pass, WGPURawString _label);
|
||||
|
||||
void wgpu_render_pass_pop_debug_group(WGPURawRenderPass *_pass);
|
||||
@ -923,7 +935,7 @@ void wgpu_render_pass_push_debug_group(WGPURawRenderPass *_pass, WGPURawString _
|
||||
void wgpu_render_pass_set_bind_group(WGPURawRenderPass *pass,
|
||||
uint32_t index,
|
||||
WGPUBindGroupId bind_group_id,
|
||||
const WGPUBufferAddress *offsets,
|
||||
const WGPUDynamicOffset *offsets,
|
||||
uintptr_t offset_length);
|
||||
|
||||
void wgpu_render_pass_set_blend_color(WGPURawRenderPass *pass, const WGPUColor *color);
|
||||
|
@ -49,4 +49,4 @@ gfx-backend-dx11 = { version = "0.4" }
|
||||
gfx-backend-vulkan = { version = "0.4" }
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "dragonfly", target_os = "freebsd"))'.dependencies]
|
||||
battery = "0.7"
|
||||
battery = { version = "0.7", optional = true }
|
||||
|
@ -6,7 +6,7 @@ use crate::{
|
||||
binding_model::BindGroup,
|
||||
hub::GfxBackend,
|
||||
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId},
|
||||
BufferAddress,
|
||||
DynamicOffset,
|
||||
Stored,
|
||||
};
|
||||
|
||||
@ -26,7 +26,7 @@ pub struct BindGroupPair {
|
||||
#[derive(Debug)]
|
||||
pub enum LayoutChange<'a> {
|
||||
Unchanged,
|
||||
Match(BindGroupId, &'a [BufferAddress]),
|
||||
Match(BindGroupId, &'a [DynamicOffset]),
|
||||
Mismatch,
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ pub struct FollowUpIter<'a> {
|
||||
iter: slice::Iter<'a, BindGroupEntry>,
|
||||
}
|
||||
impl<'a> Iterator for FollowUpIter<'a> {
|
||||
type Item = (BindGroupId, &'a [BufferAddress]);
|
||||
type Item = (BindGroupId, &'a [DynamicOffset]);
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter
|
||||
.next()
|
||||
@ -58,7 +58,7 @@ impl<'a> Iterator for FollowUpIter<'a> {
|
||||
pub struct BindGroupEntry {
|
||||
expected_layout_id: Option<BindGroupLayoutId>,
|
||||
provided: Option<BindGroupPair>,
|
||||
dynamic_offsets: Vec<BufferAddress>,
|
||||
dynamic_offsets: Vec<DynamicOffset>,
|
||||
}
|
||||
|
||||
impl BindGroupEntry {
|
||||
@ -66,7 +66,7 @@ impl BindGroupEntry {
|
||||
&mut self,
|
||||
bind_group_id: BindGroupId,
|
||||
bind_group: &BindGroup<B>,
|
||||
offsets: &[BufferAddress],
|
||||
offsets: &[DynamicOffset],
|
||||
) -> Provision {
|
||||
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
|
||||
|
||||
@ -167,7 +167,7 @@ impl Binder {
|
||||
index: usize,
|
||||
bind_group_id: BindGroupId,
|
||||
bind_group: &BindGroup<B>,
|
||||
offsets: &[BufferAddress],
|
||||
offsets: &[DynamicOffset],
|
||||
) -> Option<(PipelineLayoutId, FollowUpIter<'a>)> {
|
||||
log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
|
||||
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
|
||||
|
@ -13,6 +13,7 @@ use crate::{
|
||||
id,
|
||||
resource::BufferUsage,
|
||||
BufferAddress,
|
||||
DynamicOffset,
|
||||
};
|
||||
|
||||
use hal::command::CommandBuffer as _;
|
||||
@ -27,7 +28,7 @@ enum ComputeCommand {
|
||||
index: u8,
|
||||
num_dynamic_offsets: u8,
|
||||
bind_group_id: id::BindGroupId,
|
||||
phantom_offsets: PhantomSlice<BufferAddress>,
|
||||
phantom_offsets: PhantomSlice<DynamicOffset>,
|
||||
},
|
||||
SetPipeline(id::ComputePipelineId),
|
||||
Dispatch([u32; 3]),
|
||||
@ -43,8 +44,9 @@ impl super::RawPass {
|
||||
Self::from_vec(Vec::<ComputeCommand>::with_capacity(1), parent)
|
||||
}
|
||||
|
||||
pub unsafe fn finish_compute(self) -> (Vec<u8>, id::CommandEncoderId) {
|
||||
self.finish_with(ComputeCommand::End)
|
||||
pub unsafe fn finish_compute(mut self) -> (Vec<u8>, id::CommandEncoderId) {
|
||||
self.finish(ComputeCommand::End);
|
||||
self.into_vec()
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,14 +89,14 @@ impl<F> Global<F> {
|
||||
match command {
|
||||
ComputeCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => {
|
||||
let (new_peeker, offsets) = unsafe {
|
||||
phantom_offsets.decode(peeker, num_dynamic_offsets as usize, raw_data_end)
|
||||
phantom_offsets.decode_unaligned(peeker, num_dynamic_offsets as usize, raw_data_end)
|
||||
};
|
||||
peeker = new_peeker;
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
for off in offsets {
|
||||
assert_eq!(
|
||||
*off % BIND_BUFFER_ALIGNMENT,
|
||||
*off as BufferAddress % BIND_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Misaligned dynamic buffer offset: {} does not align with {}",
|
||||
off,
|
||||
@ -221,6 +223,7 @@ pub mod compute_ffi {
|
||||
use crate::{
|
||||
id,
|
||||
BufferAddress,
|
||||
DynamicOffset,
|
||||
RawString,
|
||||
};
|
||||
use std::{convert::TryInto, slice};
|
||||
@ -236,7 +239,7 @@ pub mod compute_ffi {
|
||||
pass: &mut RawPass,
|
||||
index: u32,
|
||||
bind_group_id: id::BindGroupId,
|
||||
offsets: *const BufferAddress,
|
||||
offsets: *const DynamicOffset,
|
||||
offset_length: usize,
|
||||
) {
|
||||
pass.encode(&ComputeCommand::SetBindGroup {
|
||||
@ -302,4 +305,14 @@ pub mod compute_ffi {
|
||||
) {
|
||||
//TODO
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_finish(
|
||||
pass: &mut RawPass,
|
||||
length: &mut usize,
|
||||
) -> *const u8 {
|
||||
pass.finish(ComputeCommand::End);
|
||||
*length = pass.size();
|
||||
pass.base
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ use crate::{
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
mem,
|
||||
ptr,
|
||||
slice,
|
||||
thread::ThreadId,
|
||||
};
|
||||
@ -44,7 +45,7 @@ impl<T> PhantomSlice<T> {
|
||||
PhantomSlice(PhantomData)
|
||||
}
|
||||
|
||||
unsafe fn decode<'a>(
|
||||
unsafe fn decode_unaligned<'a>(
|
||||
self, pointer: *const u8, count: usize, bound: *const u8
|
||||
) -> (*const u8, &'a [T]) {
|
||||
let align_offset = pointer.align_offset(mem::align_of::<T>());
|
||||
@ -84,18 +85,29 @@ impl RawPass {
|
||||
///
|
||||
/// The last command is provided, yet the encoder
|
||||
/// is guaranteed to have exactly `C::max_size()` space for it.
|
||||
unsafe fn finish_with<C: peek_poke::Poke>(
|
||||
mut self, command: C
|
||||
) -> (Vec<u8>, id::CommandEncoderId) {
|
||||
unsafe fn finish<C: peek_poke::Poke>(
|
||||
&mut self, command: C
|
||||
) {
|
||||
self.ensure_extra_size(C::max_size());
|
||||
command.poke_into(self.data);
|
||||
let size = self.data as usize + C::max_size() - self.base as usize;
|
||||
let extended_end = self.data.add(C::max_size());
|
||||
let end = command.poke_into(self.data);
|
||||
ptr::write_bytes(end, 0, extended_end as usize - end as usize);
|
||||
self.data = extended_end;
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.data as usize - self.base as usize
|
||||
}
|
||||
|
||||
pub unsafe fn into_vec(self) -> (Vec<u8>, id::CommandEncoderId) {
|
||||
let size = self.size();
|
||||
assert!(size <= self.capacity);
|
||||
(Vec::from_raw_parts(self.base, size, self.capacity), self.parent)
|
||||
let vec = Vec::from_raw_parts(self.base, size, self.capacity);
|
||||
(vec, self.parent)
|
||||
}
|
||||
|
||||
unsafe fn ensure_extra_size(&mut self, extra_size: usize) {
|
||||
let size = self.data as usize - self.base as usize;
|
||||
let size = self.size();
|
||||
if size + extra_size > self.capacity {
|
||||
let mut vec = Vec::from_raw_parts(self.base, size, self.capacity);
|
||||
vec.reserve(extra_size);
|
||||
@ -196,15 +208,6 @@ pub struct CommandBufferDescriptor {
|
||||
pub todo: u32,
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_command_encoder_begin_compute_pass(
|
||||
encoder_id: id::CommandEncoderId,
|
||||
_desc: Option<&ComputePassDescriptor>,
|
||||
) -> *mut RawPass {
|
||||
let pass = RawPass::new_compute(encoder_id);
|
||||
Box::into_raw(Box::new(pass))
|
||||
}
|
||||
|
||||
type RawRenderPassColorAttachmentDescriptor =
|
||||
RenderPassColorAttachmentDescriptorBase<id::TextureViewId, id::TextureViewId>;
|
||||
|
||||
|
@ -24,6 +24,7 @@ use crate::{
|
||||
track::TrackerSet,
|
||||
BufferAddress,
|
||||
Color,
|
||||
DynamicOffset,
|
||||
Stored,
|
||||
};
|
||||
|
||||
@ -106,7 +107,7 @@ enum RenderCommand {
|
||||
index: u8,
|
||||
num_dynamic_offsets: u8,
|
||||
bind_group_id: id::BindGroupId,
|
||||
phantom_offsets: PhantomSlice<BufferAddress>,
|
||||
phantom_offsets: PhantomSlice<DynamicOffset>,
|
||||
},
|
||||
SetPipeline(id::RenderPipelineId),
|
||||
SetIndexBuffer {
|
||||
@ -159,8 +160,9 @@ impl super::RawPass {
|
||||
}
|
||||
|
||||
impl super::RawRenderPass {
|
||||
pub unsafe fn finish_render(self) -> (Vec<u8>, id::CommandEncoderId, RawRenderTargets) {
|
||||
let (vec, parent_id) = self.raw.finish_with(RenderCommand::End);
|
||||
pub unsafe fn finish_render(mut self) -> (Vec<u8>, id::CommandEncoderId, RawRenderTargets) {
|
||||
self.raw.finish(RenderCommand::End);
|
||||
let (vec, parent_id) = self.raw.into_vec();
|
||||
(vec, parent_id, self.targets)
|
||||
}
|
||||
}
|
||||
@ -784,14 +786,14 @@ impl<F> Global<F> {
|
||||
match command {
|
||||
RenderCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => {
|
||||
let (new_peeker, offsets) = unsafe {
|
||||
phantom_offsets.decode(peeker, num_dynamic_offsets as usize, raw_data_end)
|
||||
phantom_offsets.decode_unaligned(peeker, num_dynamic_offsets as usize, raw_data_end)
|
||||
};
|
||||
peeker = new_peeker;
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
for off in offsets {
|
||||
assert_eq!(
|
||||
*off % BIND_BUFFER_ALIGNMENT,
|
||||
*off as BufferAddress % BIND_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Misaligned dynamic buffer offset: {} does not align with {}",
|
||||
off,
|
||||
@ -943,10 +945,10 @@ impl<F> Global<F> {
|
||||
}
|
||||
RenderCommand::SetVertexBuffers { start_index, count, phantom_buffer_ids, phantom_offsets } => {
|
||||
let (new_peeker, buffer_ids) = unsafe {
|
||||
phantom_buffer_ids.decode(peeker, count as usize, raw_data_end)
|
||||
phantom_buffer_ids.decode_unaligned(peeker, count as usize, raw_data_end)
|
||||
};
|
||||
let (new_peeker, offsets) = unsafe {
|
||||
phantom_offsets.decode(new_peeker, count as usize, raw_data_end)
|
||||
phantom_offsets.decode_unaligned(new_peeker, count as usize, raw_data_end)
|
||||
};
|
||||
peeker = new_peeker;
|
||||
|
||||
@ -1117,6 +1119,7 @@ pub mod render_ffi {
|
||||
id,
|
||||
BufferAddress,
|
||||
Color,
|
||||
DynamicOffset,
|
||||
RawString,
|
||||
};
|
||||
use std::{convert::TryInto, slice};
|
||||
@ -1132,7 +1135,7 @@ pub mod render_ffi {
|
||||
pass: &mut RawRenderPass,
|
||||
index: u32,
|
||||
bind_group_id: id::BindGroupId,
|
||||
offsets: *const BufferAddress,
|
||||
offsets: *const DynamicOffset,
|
||||
offset_length: usize,
|
||||
) {
|
||||
pass.raw.encode(&RenderCommand::SetBindGroup {
|
||||
@ -1327,4 +1330,20 @@ pub mod render_ffi {
|
||||
) {
|
||||
//TODO
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_render_pass_finish(
|
||||
pass: &mut RawRenderPass,
|
||||
length: &mut usize,
|
||||
) -> *const u8 {
|
||||
//TODO: put target information into the byte stream
|
||||
pass.raw.finish(RenderCommand::End);
|
||||
*length = pass.raw.size();
|
||||
pass.raw.base
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: *mut RawRenderPass) {
|
||||
let _ = Box::from_raw(pass).raw.into_vec();
|
||||
}
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ pub struct LifetimeTracker<B: hal::Backend> {
|
||||
ready_to_map: Vec<id::BufferId>,
|
||||
}
|
||||
|
||||
impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
impl<B: hal::Backend> LifetimeTracker<B> {
|
||||
pub fn new() -> Self {
|
||||
LifetimeTracker {
|
||||
mapped: Vec::new(),
|
||||
@ -202,13 +202,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
.fold(std::usize::MAX, |v, active| active.index.min(v))
|
||||
}
|
||||
|
||||
/// Returns the last submission index that is done.
|
||||
fn check_last_done(
|
||||
&mut self,
|
||||
device: &B::Device,
|
||||
force_wait: bool,
|
||||
) -> SubmissionIndex {
|
||||
if force_wait && !self.active.is_empty() {
|
||||
fn wait_idle(&self, device: &B::Device) {
|
||||
if !self.active.is_empty() {
|
||||
let status = unsafe {
|
||||
device.wait_for_fences(
|
||||
self.active.iter().map(|a| &a.fence),
|
||||
@ -218,7 +213,13 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
};
|
||||
assert_eq!(status, Ok(true), "GPU got stuck :(");
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last submission index that is done.
|
||||
fn check_last_done(
|
||||
&mut self,
|
||||
device: &B::Device,
|
||||
) -> SubmissionIndex {
|
||||
//TODO: enable when `is_sorted_by_key` is stable
|
||||
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
|
||||
let done_count = self
|
||||
@ -251,7 +252,10 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
heaps_mutex: &Mutex<Heaps<B>>,
|
||||
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
|
||||
) -> SubmissionIndex {
|
||||
let last_done = self.check_last_done(device, force_wait);
|
||||
if force_wait {
|
||||
self.wait_idle(device);
|
||||
}
|
||||
let last_done = self.check_last_done(device);
|
||||
unsafe {
|
||||
self.free_resources.clean(
|
||||
device,
|
||||
@ -261,7 +265,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
}
|
||||
last_done
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: GfxBackend> LifetimeTracker<B> {
|
||||
pub(crate) fn triage_suspected<F: AllIdentityFilter>(
|
||||
&mut self,
|
||||
global: &Global<F>,
|
||||
|
@ -110,6 +110,11 @@ pub(crate) type RenderPassContext = AttachmentData<resource::TextureFormat>;
|
||||
type BufferMapResult = Result<*mut u8, hal::device::MapError>;
|
||||
type BufferMapPendingCallback = (resource::BufferMapOperation, BufferMapResult);
|
||||
|
||||
pub type BufferMapReadCallback =
|
||||
unsafe extern "C" fn(status: resource::BufferMapAsyncStatus, data: *const u8, userdata: *mut u8);
|
||||
pub type BufferMapWriteCallback =
|
||||
unsafe extern "C" fn(status: resource::BufferMapAsyncStatus, data: *mut u8, userdata: *mut u8);
|
||||
|
||||
fn map_buffer<B: hal::Backend>(
|
||||
raw: &B::Device,
|
||||
buffer: &mut resource::Buffer<B>,
|
||||
@ -164,6 +169,29 @@ fn unmap_buffer<B: hal::Backend>(
|
||||
buffer.memory.unmap(raw);
|
||||
}
|
||||
|
||||
//Note: this logic is specifically moved out of `handle_mapping()` in order to
|
||||
// have nothing locked by the time we execute users callback code.
|
||||
fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
|
||||
for (operation, result) in callbacks {
|
||||
let (status, ptr) = match result {
|
||||
Ok(ptr) => (resource::BufferMapAsyncStatus::Success, ptr),
|
||||
Err(e) => {
|
||||
log::error!("failed to map buffer: {:?}", e);
|
||||
(resource::BufferMapAsyncStatus::Error, ptr::null_mut())
|
||||
}
|
||||
};
|
||||
match operation {
|
||||
resource::BufferMapOperation::Read(on_read) => {
|
||||
on_read(status, ptr)
|
||||
}
|
||||
resource::BufferMapOperation::Write(on_write) => {
|
||||
on_write(status, ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Device<B: hal::Backend> {
|
||||
pub(crate) raw: B::Device,
|
||||
@ -269,28 +297,6 @@ impl<B: GfxBackend> Device<B> {
|
||||
callbacks
|
||||
}
|
||||
|
||||
//Note: this logic is specifically moved out of `handle_mapping()` in order to
|
||||
// have nothing locked by the time we execute users callback code.
|
||||
fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
|
||||
for (operation, result) in callbacks {
|
||||
let (status, ptr) = match result {
|
||||
Ok(ptr) => (resource::BufferMapAsyncStatus::Success, ptr),
|
||||
Err(e) => {
|
||||
log::error!("failed to map buffer: {:?}", e);
|
||||
(resource::BufferMapAsyncStatus::Error, ptr::null_mut())
|
||||
}
|
||||
};
|
||||
match operation {
|
||||
resource::BufferMapOperation::Read(on_read) => {
|
||||
on_read(status, ptr)
|
||||
}
|
||||
resource::BufferMapOperation::Write(on_write) => {
|
||||
on_write(status, ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_buffer(
|
||||
&self,
|
||||
self_id: id::DeviceId,
|
||||
@ -461,6 +467,12 @@ impl<B: hal::Backend> Device<B> {
|
||||
}
|
||||
|
||||
pub(crate) fn dispose(self) {
|
||||
self.life_tracker.lock().cleanup(
|
||||
&self.raw,
|
||||
true,
|
||||
&self.mem_allocator,
|
||||
&self.desc_allocator,
|
||||
);
|
||||
self.com_allocator.destroy(&self.raw);
|
||||
let desc_alloc = self.desc_allocator.into_inner();
|
||||
let mem_alloc = self.mem_allocator.into_inner();
|
||||
@ -487,6 +499,8 @@ impl<F: IdentityFilter<id::BufferId>> Global<F> {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
log::info!("Create buffer {:?} with ID {:?}", desc, id_in);
|
||||
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
let device = &device_guard[device_id];
|
||||
let buffer = device.create_buffer(device_id, desc);
|
||||
@ -873,14 +887,16 @@ impl<F: IdentityFilter<id::BindGroupLayoutId>> Global<F> {
|
||||
.map(|b| (b.binding, b))
|
||||
.collect();
|
||||
|
||||
{
|
||||
let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token);
|
||||
let bind_group_layout =
|
||||
bind_group_layout_guard
|
||||
.iter(device_id.backend())
|
||||
.find(|(_, bgl)| bgl.bindings == bindings_map);
|
||||
// TODO: deduplicate the bind group layouts at some level.
|
||||
// We can't do it right here, because in the remote scenario
|
||||
// the client need to know if the same ID can be used, or not.
|
||||
if false {
|
||||
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
|
||||
let bind_group_layout_id = bgl_guard
|
||||
.iter(device_id.backend())
|
||||
.find(|(_, bgl)| bgl.bindings == bindings_map);
|
||||
|
||||
if let Some((id, _)) = bind_group_layout {
|
||||
if let Some((id, _)) = bind_group_layout_id {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
@ -914,6 +930,13 @@ impl<F: IdentityFilter<id::BindGroupLayoutId>> Global<F> {
|
||||
hub.bind_group_layouts
|
||||
.register_identity(id_in, layout, &mut token)
|
||||
}
|
||||
|
||||
pub fn bind_group_layout_destroy<B: GfxBackend>(&self, bind_group_layout_id: id::BindGroupLayoutId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
//TODO: track usage by GPU
|
||||
hub.bind_group_layouts.unregister(bind_group_layout_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: IdentityFilter<id::PipelineLayoutId>> Global<F> {
|
||||
@ -956,6 +979,13 @@ impl<F: IdentityFilter<id::PipelineLayoutId>> Global<F> {
|
||||
hub.pipeline_layouts
|
||||
.register_identity(id_in, layout, &mut token)
|
||||
}
|
||||
|
||||
pub fn pipeline_layout_destroy<B: GfxBackend>(&self, pipeline_layout_id: id::PipelineLayoutId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
//TODO: track usage by GPU
|
||||
hub.pipeline_layouts.unregister(pipeline_layout_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: IdentityFilter<id::BindGroupId>> Global<F> {
|
||||
@ -1189,6 +1219,13 @@ impl<F: IdentityFilter<id::ShaderModuleId>> Global<F> {
|
||||
hub.shader_modules
|
||||
.register_identity(id_in, shader, &mut token)
|
||||
}
|
||||
|
||||
pub fn shader_module_destroy<B: GfxBackend>(&self, shader_module_id: id::ShaderModuleId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
//TODO: track usage by GPU
|
||||
hub.shader_modules.unregister(shader_module_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: IdentityFilter<id::CommandEncoderId>> Global<F> {
|
||||
@ -1435,7 +1472,7 @@ impl<F: AllIdentityFilter + IdentityFilter<id::CommandBufferId>> Global<F> {
|
||||
callbacks
|
||||
};
|
||||
|
||||
Device::<B>::fire_map_callbacks(callbacks);
|
||||
fire_map_callbacks(callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1701,6 +1738,13 @@ impl<F: IdentityFilter<id::RenderPipelineId>> Global<F> {
|
||||
hub.render_pipelines
|
||||
.register_identity(id_in, pipeline, &mut token)
|
||||
}
|
||||
|
||||
pub fn render_pipeline_destroy<B: GfxBackend>(&self, render_pipeline_id: id::RenderPipelineId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
//TODO: track usage by GPU
|
||||
hub.render_pipelines.unregister(render_pipeline_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: IdentityFilter<id::ComputePipelineId>> Global<F> {
|
||||
@ -1756,6 +1800,13 @@ impl<F: IdentityFilter<id::ComputePipelineId>> Global<F> {
|
||||
hub.compute_pipelines
|
||||
.register_identity(id_in, pipeline, &mut token)
|
||||
}
|
||||
|
||||
pub fn compute_pipeline_destroy<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
//TODO: track usage by GPU
|
||||
hub.compute_pipelines.unregister(compute_pipeline_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_swap_chain_descriptor(
|
||||
@ -1863,7 +1914,40 @@ impl<F: AllIdentityFilter> Global<F> {
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
device_guard[device_id].maintain(self, force_wait, &mut token)
|
||||
};
|
||||
Device::<B>::fire_map_callbacks(callbacks);
|
||||
fire_map_callbacks(callbacks);
|
||||
}
|
||||
|
||||
fn poll_devices<B: GfxBackend>(
|
||||
&self,
|
||||
force_wait: bool,
|
||||
callbacks: &mut Vec<BufferMapPendingCallback>,
|
||||
) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
for (_, device) in device_guard.iter(B::VARIANT) {
|
||||
let cbs = device.maintain(self, force_wait, &mut token);
|
||||
callbacks.extend(cbs);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_all_devices(&self, force_wait: bool) {
|
||||
use crate::backend;
|
||||
let mut callbacks = Vec::new();
|
||||
|
||||
#[cfg(any(
|
||||
not(any(target_os = "ios", target_os = "macos")),
|
||||
feature = "gfx-backend-vulkan"
|
||||
))]
|
||||
self.poll_devices::<backend::Vulkan>(force_wait, &mut callbacks);
|
||||
#[cfg(windows)]
|
||||
self.poll_devices::<backend::Dx11>(force_wait, &mut callbacks);
|
||||
#[cfg(windows)]
|
||||
self.poll_devices::<backend::Dx12>(force_wait, &mut callbacks);
|
||||
#[cfg(any(target_os = "ios", target_os = "macos"))]
|
||||
self.poll_devices::<backend::Metal>(force_wait, &mut callbacks);
|
||||
|
||||
fire_map_callbacks(callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ use vec_map::VecMap;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
use std::cell::Cell;
|
||||
use std::{marker::PhantomData, ops};
|
||||
use std::{fmt::Debug, marker::PhantomData, ops};
|
||||
|
||||
|
||||
/// A simple structure to manage identities of objects.
|
||||
@ -252,13 +252,13 @@ impl<'a, T> Drop for Token<'a, T> {
|
||||
}
|
||||
|
||||
|
||||
pub trait IdentityFilter<I> {
|
||||
type Input: Clone;
|
||||
pub trait IdentityFilter<I>: Debug {
|
||||
type Input: Clone + Debug;
|
||||
fn process(&self, id: Self::Input, backend: Backend) -> I;
|
||||
fn free(&self, id: I);
|
||||
}
|
||||
|
||||
impl<I: TypedId + Clone> IdentityFilter<I> for () {
|
||||
impl<I: TypedId + Clone + Debug> IdentityFilter<I> for () {
|
||||
type Input = I;
|
||||
fn process(&self, id: I, _backend: Backend) -> I {
|
||||
//debug_assert_eq!(id.unzip().2, backend);
|
||||
@ -267,7 +267,7 @@ impl<I: TypedId + Clone> IdentityFilter<I> for () {
|
||||
fn free(&self, _id: I) {}
|
||||
}
|
||||
|
||||
impl<I: TypedId> IdentityFilter<I> for Mutex<IdentityManager> {
|
||||
impl<I: TypedId + Debug> IdentityFilter<I> for Mutex<IdentityManager> {
|
||||
type Input = PhantomData<I>;
|
||||
fn process(&self, _id: Self::Input, backend: Backend) -> I {
|
||||
self.lock().alloc(backend)
|
||||
|
@ -56,27 +56,25 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub(crate) fn destroy_surface(&mut self, surface: Surface) {
|
||||
//TODO: fill out the proper destruction once we are on gfx-0.4
|
||||
#[cfg(any(
|
||||
not(any(target_os = "ios", target_os = "macos")),
|
||||
feature = "gfx-backend-vulkan"
|
||||
))]
|
||||
{
|
||||
if let Some(_suf) = surface.vulkan {
|
||||
//self.vulkan.as_mut().unwrap().destroy_surface(suf);
|
||||
unsafe {
|
||||
if let Some(suf) = surface.vulkan {
|
||||
self.vulkan.as_mut().unwrap().destroy_surface(suf);
|
||||
}
|
||||
}
|
||||
#[cfg(any(target_os = "ios", target_os = "macos"))]
|
||||
{
|
||||
let _ = surface;
|
||||
//self.metal.destroy_surface(surface.metal);
|
||||
unsafe {
|
||||
self.metal.destroy_surface(surface.metal);
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if let Some(_suf) = surface.dx12 {
|
||||
//self.dx12.as_mut().unwrap().destroy_surface(suf);
|
||||
unsafe {
|
||||
if let Some(suf) = surface.dx12 {
|
||||
self.dx12.as_mut().unwrap().destroy_surface(suf);
|
||||
}
|
||||
//self.dx11.destroy_surface(surface.dx11);
|
||||
self.dx11.destroy_surface(surface.dx11);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -432,6 +430,12 @@ impl<F: IdentityFilter<AdapterId>> Global<F> {
|
||||
let adapter = &adapter_guard[adapter_id];
|
||||
AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend())
|
||||
}
|
||||
|
||||
pub fn adapter_destroy<B: GfxBackend>(&self, adapter_id: AdapterId) {
|
||||
let hub = B::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (_adapter, _) = hub.adapters.unregister(adapter_id, &mut token);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: IdentityFilter<DeviceId>> Global<F> {
|
||||
|
@ -59,6 +59,7 @@ pub enum Backend {
|
||||
}
|
||||
|
||||
pub type BufferAddress = u64;
|
||||
pub type DynamicOffset = u32;
|
||||
pub type RawString = *const c_char;
|
||||
|
||||
//TODO: make it private. Currently used for swapchain creation impl.
|
||||
|
@ -1,3 +1,7 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -15,13 +19,13 @@ impl fmt::Display for Error {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
#[cfg(all(feature = "battery", any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "windows",
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd"
|
||||
))]
|
||||
)))]
|
||||
mod platform {
|
||||
use super::Error;
|
||||
use battery::{self, Manager, State};
|
||||
@ -45,13 +49,13 @@ mod platform {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(any(
|
||||
#[cfg(any(not(feature = "battery"), not(any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "windows",
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd"
|
||||
)))]
|
||||
))))]
|
||||
mod platform {
|
||||
use super::Error;
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
use crate::GLOBAL;
|
||||
|
||||
pub use core::command::{
|
||||
wgpu_command_encoder_begin_compute_pass,
|
||||
wgpu_command_encoder_begin_render_pass,
|
||||
compute_ffi::*,
|
||||
render_ffi::*,
|
||||
@ -124,8 +123,22 @@ pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: id::RenderPassId) {
|
||||
/// This function is unsafe because improper use may lead to memory
|
||||
/// problems. For example, a double-free may occur if the function is called
|
||||
/// twice on the same raw pointer.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
|
||||
encoder_id: id::CommandEncoderId,
|
||||
_desc: Option<&core::command::ComputePassDescriptor>,
|
||||
) -> *mut core::command::RawPass {
|
||||
let pass = core::command::RawPass::new_compute(encoder_id);
|
||||
Box::into_raw(Box::new(pass))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_end_pass(pass_id: id::ComputePassId) {
|
||||
let (pass_data, encoder_id) = Box::from_raw(pass_id).finish_compute();
|
||||
gfx_select!(encoder_id => GLOBAL.command_encoder_run_compute_pass(encoder_id, &pass_data))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: *mut core::command::RawPass) {
|
||||
let _ = Box::from_raw(pass).into_vec();
|
||||
}
|
||||
|
@ -13,10 +13,6 @@ use objc::{msg_send, runtime::Object, sel, sel_impl};
|
||||
|
||||
pub type RequestAdapterCallback =
|
||||
unsafe extern "C" fn(id: id::AdapterId, userdata: *mut std::ffi::c_void);
|
||||
pub type BufferMapReadCallback =
|
||||
unsafe extern "C" fn(status: core::resource::BufferMapAsyncStatus, data: *const u8, userdata: *mut u8);
|
||||
pub type BufferMapWriteCallback =
|
||||
unsafe extern "C" fn(status: core::resource::BufferMapAsyncStatus, data: *mut u8, userdata: *mut u8);
|
||||
|
||||
pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> id::SurfaceId {
|
||||
use raw_window_handle::RawWindowHandle as Rwh;
|
||||
@ -176,10 +172,15 @@ pub extern "C" fn wgpu_adapter_request_device(
|
||||
gfx_select!(adapter_id => GLOBAL.adapter_request_device(adapter_id, desc, PhantomData))
|
||||
}
|
||||
|
||||
pub fn wgpu_adapter_get_info(adapter_id: id::AdapterId) -> core::instance::AdapterInfo {
|
||||
pub fn adapter_get_info(adapter_id: id::AdapterId) -> core::instance::AdapterInfo {
|
||||
gfx_select!(adapter_id => GLOBAL.adapter_get_info(adapter_id))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_adapter_destroy(adapter_id: id::AdapterId) {
|
||||
gfx_select!(adapter_id => GLOBAL.adapter_destroy(adapter_id))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_device_get_limits(
|
||||
_device_id: id::DeviceId,
|
||||
@ -371,7 +372,7 @@ pub extern "C" fn wgpu_buffer_map_read_async(
|
||||
buffer_id: id::BufferId,
|
||||
start: core::BufferAddress,
|
||||
size: core::BufferAddress,
|
||||
callback: BufferMapReadCallback,
|
||||
callback: core::device::BufferMapReadCallback,
|
||||
userdata: *mut u8,
|
||||
) {
|
||||
let operation = core::resource::BufferMapOperation::Read(
|
||||
@ -387,7 +388,7 @@ pub extern "C" fn wgpu_buffer_map_write_async(
|
||||
buffer_id: id::BufferId,
|
||||
start: core::BufferAddress,
|
||||
size: core::BufferAddress,
|
||||
callback: BufferMapWriteCallback,
|
||||
callback: core::device::BufferMapWriteCallback,
|
||||
userdata: *mut u8,
|
||||
) {
|
||||
let operation = core::resource::BufferMapOperation::Write(
|
||||
|
@ -16,6 +16,7 @@ braces = "SameLine"
|
||||
line_length = 100
|
||||
tab_width = 2
|
||||
language = "C"
|
||||
style = "tag"
|
||||
|
||||
[export]
|
||||
prefix = "WGPU"
|
||||
@ -25,6 +26,8 @@ exclude = ["BufferMapResult"]
|
||||
parse_deps = true
|
||||
include = ["wgpu-core"]
|
||||
|
||||
extra_bindings = ["wgpu-core"]
|
||||
|
||||
[fn]
|
||||
prefix = "WGPU_INLINE"
|
||||
postfix = "WGPU_FUNC"
|
||||
|
@ -8,6 +8,12 @@ use core::{
|
||||
Backend,
|
||||
};
|
||||
|
||||
pub use core::command::{
|
||||
wgpu_command_encoder_begin_render_pass,
|
||||
compute_ffi::*,
|
||||
render_ffi::*,
|
||||
};
|
||||
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use std::{ptr, slice};
|
||||
@ -21,6 +27,11 @@ struct IdentityHub {
|
||||
devices: IdentityManager,
|
||||
buffers: IdentityManager,
|
||||
command_buffers: IdentityManager,
|
||||
bind_group_layouts: IdentityManager,
|
||||
pipeline_layouts: IdentityManager,
|
||||
bind_groups: IdentityManager,
|
||||
shader_modules: IdentityManager,
|
||||
compute_pipelines: IdentityManager,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@ -194,3 +205,151 @@ pub extern "C" fn wgpu_client_kill_encoder_id(
|
||||
.command_buffers
|
||||
.free(id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
|
||||
encoder_id: id::CommandEncoderId,
|
||||
_desc: Option<&core::command::ComputePassDescriptor>,
|
||||
) -> core::command::RawPass {
|
||||
core::command::RawPass::new_compute(encoder_id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: core::command::RawPass) {
|
||||
let _ = pass.into_vec();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_make_bind_group_layout_id(
|
||||
client: &Client,
|
||||
device_id: id::DeviceId,
|
||||
) -> id::BindGroupLayoutId {
|
||||
let backend = device_id.backend();
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(backend)
|
||||
.bind_group_layouts
|
||||
.alloc(backend)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_kill_bind_group_layout_id(
|
||||
client: &Client,
|
||||
id: id::BindGroupLayoutId,
|
||||
) {
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(id.backend())
|
||||
.bind_group_layouts
|
||||
.free(id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_make_pipeline_layout_id(
|
||||
client: &Client,
|
||||
device_id: id::DeviceId,
|
||||
) -> id::PipelineLayoutId {
|
||||
let backend = device_id.backend();
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(backend)
|
||||
.pipeline_layouts
|
||||
.alloc(backend)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_kill_pipeline_layout_id(
|
||||
client: &Client,
|
||||
id: id::PipelineLayoutId,
|
||||
) {
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(id.backend())
|
||||
.pipeline_layouts
|
||||
.free(id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_make_bind_group_id(
|
||||
client: &Client,
|
||||
device_id: id::DeviceId,
|
||||
) -> id::BindGroupId {
|
||||
let backend = device_id.backend();
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(backend)
|
||||
.bind_groups
|
||||
.alloc(backend)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_kill_bind_group_id(
|
||||
client: &Client,
|
||||
id: id::BindGroupId,
|
||||
) {
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(id.backend())
|
||||
.bind_groups
|
||||
.free(id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_make_shader_module_id(
|
||||
client: &Client,
|
||||
device_id: id::DeviceId,
|
||||
) -> id::ShaderModuleId {
|
||||
let backend = device_id.backend();
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(backend)
|
||||
.shader_modules
|
||||
.alloc(backend)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_kill_shader_module_id(
|
||||
client: &Client,
|
||||
id: id::ShaderModuleId,
|
||||
) {
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(id.backend())
|
||||
.shader_modules
|
||||
.free(id)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_make_compute_pipeline_id(
|
||||
client: &Client,
|
||||
device_id: id::DeviceId,
|
||||
) -> id::ComputePipelineId {
|
||||
let backend = device_id.backend();
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(backend)
|
||||
.compute_pipelines
|
||||
.alloc(backend)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_client_kill_compute_pipeline_id(
|
||||
client: &Client,
|
||||
id: id::ComputePipelineId,
|
||||
) {
|
||||
client
|
||||
.identities
|
||||
.lock()
|
||||
.select(id.backend())
|
||||
.compute_pipelines
|
||||
.free(id)
|
||||
}
|
||||
|
@ -26,6 +26,11 @@ pub unsafe extern "C" fn wgpu_server_delete(global: *mut Global) {
|
||||
log::info!("\t...done");
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_poll_all_devices(global: &Global, force_wait: bool) {
|
||||
global.poll_all_devices(force_wait);
|
||||
}
|
||||
|
||||
/// Request an adapter according to the specified options.
|
||||
/// Provide the list of IDs to pick from.
|
||||
///
|
||||
@ -62,6 +67,14 @@ pub extern "C" fn wgpu_server_adapter_request_device(
|
||||
gfx_select!(self_id => global.adapter_request_device(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_adapter_destroy(
|
||||
global: &Global,
|
||||
adapter_id: id::AdapterId,
|
||||
) {
|
||||
gfx_select!(adapter_id => global.adapter_destroy(adapter_id))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_destroy(global: &Global, self_id: id::DeviceId) {
|
||||
gfx_select!(self_id => global.device_destroy(self_id))
|
||||
@ -99,16 +112,25 @@ pub unsafe extern "C" fn wgpu_server_device_set_buffer_sub_data(
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `size` elements.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_server_device_get_buffer_sub_data(
|
||||
pub extern "C" fn wgpu_server_buffer_map_read(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
buffer_id: id::BufferId,
|
||||
offset: core::BufferAddress,
|
||||
data: *mut u8,
|
||||
start: core::BufferAddress,
|
||||
size: core::BufferAddress,
|
||||
callback: core::device::BufferMapReadCallback,
|
||||
userdata: *mut u8,
|
||||
) {
|
||||
let slice = slice::from_raw_parts_mut(data, size as usize);
|
||||
gfx_select!(self_id => global.device_get_buffer_sub_data(self_id, buffer_id, offset, slice));
|
||||
let operation = core::resource::BufferMapOperation::Read(
|
||||
Box::new(move |status, data| unsafe {
|
||||
callback(status, data, userdata)
|
||||
}),
|
||||
);
|
||||
gfx_select!(buffer_id => global.buffer_map_async(
|
||||
buffer_id,
|
||||
core::resource::BufferUsage::MAP_READ,
|
||||
start .. start + size,
|
||||
operation
|
||||
));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
@ -120,12 +142,19 @@ pub extern "C" fn wgpu_server_buffer_destroy(global: &Global, self_id: id::Buffe
|
||||
pub extern "C" fn wgpu_server_device_create_encoder(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
desc: &core::command::CommandEncoderDescriptor,
|
||||
new_id: id::CommandEncoderId,
|
||||
) {
|
||||
let desc = core::command::CommandEncoderDescriptor {
|
||||
todo: 0,
|
||||
};
|
||||
gfx_select!(self_id => global.device_create_command_encoder(self_id, &desc, encoder_id));
|
||||
gfx_select!(self_id => global.device_create_command_encoder(self_id, &desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_encoder_finish(
|
||||
global: &Global,
|
||||
self_id: id::CommandEncoderId,
|
||||
desc: &core::command::CommandBufferDescriptor,
|
||||
) {
|
||||
gfx_select!(self_id => global.command_encoder_finish(self_id, desc));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
@ -140,6 +169,27 @@ pub extern "C" fn wgpu_server_encoder_destroy(
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `byte_length` elements.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_command_buffer_destroy(
|
||||
global: &Global,
|
||||
self_id: id::CommandBufferId,
|
||||
) {
|
||||
gfx_select!(self_id => global.command_buffer_destroy(self_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_server_encoder_copy_buffer_to_buffer(
|
||||
global: &Global,
|
||||
self_id: id::CommandEncoderId,
|
||||
source_id: id::BufferId,
|
||||
source_offset: core::BufferAddress,
|
||||
destination_id: id::BufferId,
|
||||
destination_offset: core::BufferAddress,
|
||||
size: core::BufferAddress,
|
||||
) {
|
||||
gfx_select!(self_id => global.command_encoder_copy_buffer_to_buffer(self_id, source_id, source_offset, destination_id, destination_offset, size));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_server_encode_compute_pass(
|
||||
global: &Global,
|
||||
@ -185,3 +235,93 @@ pub unsafe extern "C" fn wgpu_server_queue_submit(
|
||||
let command_buffers = slice::from_raw_parts(command_buffer_ids, command_buffer_id_length);
|
||||
gfx_select!(self_id => global.queue_submit(self_id, command_buffers));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_bind_group_layout(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &core::binding_model::BindGroupLayoutDescriptor,
|
||||
new_id: id::BindGroupLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_bind_group_layout(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_bind_group_layout_destroy(
|
||||
global: &Global,
|
||||
self_id: id::BindGroupLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.bind_group_layout_destroy(self_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_pipeline_layout(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &core::binding_model::PipelineLayoutDescriptor,
|
||||
new_id: id::PipelineLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_pipeline_layout(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_pipeline_layout_destroy(
|
||||
global: &Global,
|
||||
self_id: id::PipelineLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.pipeline_layout_destroy(self_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_bind_group(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &core::binding_model::BindGroupDescriptor,
|
||||
new_id: id::BindGroupId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_bind_group(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_bind_group_destroy(
|
||||
global: &Global,
|
||||
self_id: id::BindGroupId,
|
||||
) {
|
||||
gfx_select!(self_id => global.bind_group_destroy(self_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_shader_module(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &core::pipeline::ShaderModuleDescriptor,
|
||||
new_id: id::ShaderModuleId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_shader_module(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_shader_module_destroy(
|
||||
global: &Global,
|
||||
self_id: id::ShaderModuleId,
|
||||
) {
|
||||
gfx_select!(self_id => global.shader_module_destroy(self_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_compute_pipeline(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &core::pipeline::ComputePipelineDescriptor,
|
||||
new_id: id::ComputePipelineId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_compute_pipeline(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_compute_pipeline_destroy(
|
||||
global: &Global,
|
||||
self_id: id::ComputePipelineId,
|
||||
) {
|
||||
gfx_select!(self_id => global.compute_pipeline_destroy(self_id));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user