Rustfmt stable pass

This commit is contained in:
Dzmitry Malyshau 2020-04-05 23:10:56 -04:00
parent 384606f2e2
commit a3aefe2535
29 changed files with 875 additions and 1066 deletions

View File

@ -1,4 +0,0 @@
newline_style = "Native"
blank_lines_upper_bound = 2
spaces_around_ranges = true
imports_layout = "HorizontalVertical"

View File

@ -4,16 +4,13 @@
use crate::{
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId},
track::{DUMMY_SELECTOR, TrackerSet},
FastHashMap,
LifeGuard,
RefCount,
Stored,
track::{TrackerSet, DUMMY_SELECTOR},
FastHashMap, LifeGuard, RefCount, Stored,
};
use wgt::{BufferAddress, TextureComponentType};
use arrayvec::ArrayVec;
use gfx_descriptor::{DescriptorCounts, DescriptorSet};
use wgt::{BufferAddress, TextureComponentType};
#[cfg(feature = "serde")]
use serde_crate::{Deserialize, Serialize};
@ -21,7 +18,11 @@ use std::borrow::Borrow;
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub enum BindingType {
UniformBuffer = 0,
StorageBuffer = 1,
@ -35,7 +36,11 @@ pub enum BindingType {
#[repr(C)]
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct BindGroupLayoutEntry {
pub binding: u32,
pub visibility: wgt::ShaderStage,
@ -80,7 +85,11 @@ pub struct PipelineLayout<B: hal::Backend> {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct BufferBinding {
pub buffer: BufferId,
pub offset: BufferAddress,
@ -89,7 +98,11 @@ pub struct BufferBinding {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub enum BindingResource {
Buffer(BufferBinding),
Sampler(SamplerId),
@ -98,7 +111,11 @@ pub enum BindingResource {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct BindGroupEntry {
pub binding: u32,
pub resource: BindingResource,

View File

@ -4,13 +4,7 @@
use super::CommandBuffer;
use crate::{
hub::GfxBackend,
id::DeviceId,
track::TrackerSet,
Features,
LifeGuard,
Stored,
SubmissionIndex,
hub::GfxBackend, id::DeviceId, track::TrackerSet, Features, LifeGuard, Stored, SubmissionIndex,
};
use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
@ -27,7 +21,7 @@ struct CommandPool<B: hal::Backend> {
impl<B: hal::Backend> CommandPool<B> {
fn maintain(&mut self, lowest_active_index: SubmissionIndex) {
for i in (0 .. self.pending.len()).rev() {
for i in (0..self.pending.len()).rev() {
let index = self.pending[i]
.life_guard
.submission_index
@ -55,7 +49,10 @@ impl<B: hal::Backend> CommandPool<B> {
fn allocate(&mut self) -> B::CommandBuffer {
if self.available.is_empty() {
unsafe { self.raw.allocate(20, hal::command::Level::Primary, &mut self.available) };
unsafe {
self.raw
.allocate(20, hal::command::Level::Primary, &mut self.available)
};
}
self.available.pop().unwrap()
}
@ -131,7 +128,10 @@ impl<B: hal::Backend> CommandAllocator<B> {
let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
if pool.available.is_empty() {
unsafe { pool.raw.allocate(20, hal::command::Level::Primary, &mut pool.available) };
unsafe {
pool.raw
.allocate(20, hal::command::Level::Primary, &mut pool.available)
};
}
pool.available.pop().unwrap()

View File

@ -10,8 +10,8 @@ use crate::{
};
use smallvec::{smallvec, SmallVec};
use wgt::DynamicOffset;
use std::slice;
use wgt::DynamicOffset;
pub const DEFAULT_BIND_GROUPS: usize = 4;
type BindGroupMask = u8;
@ -44,12 +44,7 @@ impl<'a> Iterator for FollowUpIter<'a> {
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.and_then(|entry| {
Some((
entry.actual_value()?,
entry.dynamic_offsets.as_slice(),
))
})
.and_then(|entry| Some((entry.actual_value()?, entry.dynamic_offsets.as_slice())))
}
}
@ -151,7 +146,7 @@ impl Binder {
}
pub(crate) fn reset_expectations(&mut self, length: usize) {
for entry in self.entries[length ..].iter_mut() {
for entry in self.entries[length..].iter_mut() {
entry.expected_layout_id = None;
}
}
@ -185,8 +180,8 @@ impl Binder {
Some((
self.pipeline_layout_id?,
FollowUpIter {
iter: self.entries[index + 1 .. end].iter(),
}
iter: self.entries[index + 1..end].iter(),
},
))
} else {
log::trace!("\t\tskipping above compatible {}", compatible_count);

View File

@ -5,17 +5,16 @@
use crate::{
command::{
bind::{Binder, LayoutChange},
CommandBuffer,
PhantomSlice,
CommandBuffer, PhantomSlice,
},
device::all_buffer_stages,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
};
use wgt::{BufferAddress, BufferUsage, DynamicOffset, BIND_BUFFER_ALIGNMENT};
use hal::command::CommandBuffer as _;
use peek_poke::{Peek, PeekPoke, Poke};
use wgt::{BufferAddress, BufferUsage, DynamicOffset, BIND_BUFFER_ALIGNMENT};
use std::iter;
@ -90,17 +89,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut pipeline_state = PipelineState::Required;
let mut peeker = raw_data.as_ptr();
let raw_data_end = unsafe {
raw_data.as_ptr().add(raw_data.len())
};
let raw_data_end = unsafe { raw_data.as_ptr().add(raw_data.len()) };
let mut command = ComputeCommand::Dispatch([0; 3]); // dummy
loop {
assert!(unsafe { peeker.add(ComputeCommand::max_size()) } <= raw_data_end);
peeker = unsafe { ComputeCommand::peek_from(peeker, &mut command) };
match command {
ComputeCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => {
ComputeCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
phantom_offsets,
} => {
let (new_peeker, offsets) = unsafe {
phantom_offsets.decode_unaligned(peeker, num_dynamic_offsets as usize, raw_data_end)
phantom_offsets.decode_unaligned(
peeker,
num_dynamic_offsets as usize,
raw_data_end,
)
};
peeker = new_peeker;
@ -136,11 +142,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_ups)) = binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
if let Some((pipeline_layout_id, follow_ups)) =
binder.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_ups.clone().map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()));
let bind_groups = iter::once(bind_group.raw.raw()).chain(
follow_ups
.clone()
.map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
);
unsafe {
raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw,
@ -156,7 +165,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
ComputeCommand::SetPipeline(pipeline_id) => {
pipeline_state = PipelineState::Set;
let pipeline = cmb.trackers
let pipeline = cmb
.trackers
.compute_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.unwrap();
@ -169,8 +179,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if binder.pipeline_layout_id != Some(pipeline.layout_id) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
binder.pipeline_layout_id = Some(pipeline.layout_id);
binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
binder.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in binder
@ -200,13 +209,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
ComputeCommand::Dispatch(groups) => {
assert_eq!(pipeline_state, PipelineState::Set, "Dispatch error: Pipeline is missing");
assert_eq!(
pipeline_state,
PipelineState::Set,
"Dispatch error: Pipeline is missing"
);
unsafe {
raw.dispatch(groups);
}
}
ComputeCommand::DispatchIndirect { buffer_id, offset } => {
assert_eq!(pipeline_state, PipelineState::Set, "Dispatch error: Pipeline is missing");
assert_eq!(
pipeline_state,
PipelineState::Set,
"Dispatch error: Pipeline is missing"
);
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
buffer_id,
@ -219,7 +236,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
unsafe {
raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
all_buffer_stages()..all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
@ -234,15 +251,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub mod compute_ffi {
use super::{
ComputeCommand,
super::{PhantomSlice, RawPass},
ComputeCommand,
};
use crate::{
id,
RawString,
};
use wgt::{BufferAddress, DynamicOffset};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, DynamicOffset};
/// # Safety
///
@ -264,9 +278,7 @@ use wgt::{BufferAddress, DynamicOffset};
bind_group_id,
phantom_offsets: PhantomSlice::default(),
});
pass.encode_slice(
slice::from_raw_parts(offsets, offset_length),
);
pass.encode_slice(slice::from_raw_parts(offsets, offset_length));
}
#[no_mangle]
@ -293,24 +305,16 @@ use wgt::{BufferAddress, DynamicOffset};
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.encode(&ComputeCommand::DispatchIndirect {
buffer_id,
offset,
});
pass.encode(&ComputeCommand::DispatchIndirect { buffer_id, offset });
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_push_debug_group(
_pass: &mut RawPass,
_label: RawString,
) {
pub extern "C" fn wgpu_compute_pass_push_debug_group(_pass: &mut RawPass, _label: RawString) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(
_pass: &mut RawPass,
) {
pub extern "C" fn wgpu_compute_pass_pop_debug_group(_pass: &mut RawPass) {
//TODO
}

View File

@ -14,30 +14,17 @@ pub use self::render::*;
pub use self::transfer::*;
use crate::{
device::{
MAX_COLOR_TARGETS,
all_buffer_stages,
all_image_stages,
},
device::{all_buffer_stages, all_image_stages, MAX_COLOR_TARGETS},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id,
resource::{Buffer, Texture},
track::TrackerSet,
Features,
LifeGuard,
Stored,
Features, LifeGuard, Stored,
};
use peek_poke::PeekPoke;
use std::{
marker::PhantomData,
mem,
ptr,
slice,
thread::ThreadId,
};
use std::{marker::PhantomData, mem, ptr, slice, thread::ThreadId};
#[derive(Clone, Copy, Debug, PeekPoke)]
struct PhantomSlice<T>(PhantomData<T>);
@ -50,7 +37,10 @@ impl<T> Default for PhantomSlice<T> {
impl<T> PhantomSlice<T> {
unsafe fn decode_unaligned<'a>(
self, pointer: *const u8, count: usize, bound: *const u8
self,
pointer: *const u8,
count: usize,
bound: *const u8,
) -> (*const u8, &'a [T]) {
let align_offset = pointer.align_offset(mem::align_of::<T>());
let aligned = pointer.add(align_offset);
@ -70,10 +60,7 @@ pub struct RawPass {
}
impl RawPass {
fn from_vec<T>(
mut vec: Vec<T>,
encoder_id: id::CommandEncoderId,
) -> Self {
fn from_vec<T>(mut vec: Vec<T>, encoder_id: id::CommandEncoderId) -> Self {
let ptr = vec.as_mut_ptr() as *mut u8;
let capacity = vec.capacity() * mem::size_of::<T>();
mem::forget(vec);
@ -89,9 +76,7 @@ impl RawPass {
///
/// The last command is provided, yet the encoder
/// is guaranteed to have exactly `C::max_size()` space for it.
unsafe fn finish<C: peek_poke::Poke>(
&mut self, command: C
) {
unsafe fn finish<C: peek_poke::Poke>(&mut self, command: C) {
self.ensure_extra_size(C::max_size());
let extended_end = self.data.add(C::max_size());
let end = command.poke_into(self.data);
@ -169,30 +154,26 @@ impl<B: GfxBackend> CommandBuffer<B> {
debug_assert_eq!(B::VARIANT, base.backend());
debug_assert_eq!(B::VARIANT, head.backend());
let buffer_barriers = base
.buffers
.merge_replace(&head.buffers)
.map(|pending| {
let buf = &buffer_guard[pending.id];
pending.into_hal(buf)
});
let texture_barriers = base
.textures
.merge_replace(&head.textures)
.map(|pending| {
let tex = &texture_guard[pending.id];
pending.into_hal(tex)
});
let buffer_barriers = base.buffers.merge_replace(&head.buffers).map(|pending| {
let buf = &buffer_guard[pending.id];
pending.into_hal(buf)
});
let texture_barriers = base.textures.merge_replace(&head.textures).map(|pending| {
let tex = &texture_guard[pending.id];
pending.into_hal(tex)
});
base.views.merge_extend(&head.views).unwrap();
base.bind_groups.merge_extend(&head.bind_groups).unwrap();
base.samplers.merge_extend(&head.samplers).unwrap();
base.compute_pipes.merge_extend(&head.compute_pipes).unwrap();
base.compute_pipes
.merge_extend(&head.compute_pipes)
.unwrap();
base.render_pipes.merge_extend(&head.render_pipes).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
raw.pipeline_barrier(
stages .. stages,
stages..stages,
hal::memory::Dependencies::empty(),
buffer_barriers.chain(texture_barriers),
);
@ -258,7 +239,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
comb.is_recording = false;
// stop tracking the swapchain image, if used
if let Some((ref sc_id, _)) = comb.used_swap_chain {
let view_id = swap_chain_guard[sc_id.value].acquired_view_id
let view_id = swap_chain_guard[sc_id.value]
.acquired_view_id
.as_ref()
.expect("Used swap chain frame has already presented");
comb.trackers.views.remove(view_id.value);

View File

@ -5,19 +5,12 @@
use crate::{
command::{
bind::{Binder, LayoutChange},
PassComponent,
PhantomSlice,
RawRenderPassColorAttachmentDescriptor,
RawRenderPassDepthStencilAttachmentDescriptor,
RawRenderTargets,
PassComponent, PhantomSlice, RawRenderPassColorAttachmentDescriptor,
RawRenderPassDepthStencilAttachmentDescriptor, RawRenderTargets,
},
conv,
device::{
FramebufferKey,
RenderPassContext,
RenderPassKey,
MAX_VERTEX_BUFFERS,
MAX_COLOR_TARGETS,
FramebufferKey, RenderPassContext, RenderPassKey, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS,
},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
@ -27,31 +20,17 @@ use crate::{
Stored,
};
use wgt::{
BufferAddress,
BufferUsage,
Color,
DynamicOffset,
IndexFormat,
InputStepMode,
LoadOp,
RenderPassColorAttachmentDescriptorBase,
RenderPassDepthStencilAttachmentDescriptorBase,
TextureUsage,
BIND_BUFFER_ALIGNMENT
};
use arrayvec::ArrayVec;
use hal::command::CommandBuffer as _;
use peek_poke::{Peek, PeekPoke, Poke};
use wgt::{
BufferAddress, BufferUsage, Color, DynamicOffset, IndexFormat, InputStepMode, LoadOp,
RenderPassColorAttachmentDescriptorBase, RenderPassDepthStencilAttachmentDescriptorBase,
TextureUsage, BIND_BUFFER_ALIGNMENT,
};
use std::{
borrow::Borrow,
collections::hash_map::Entry,
iter,
marker::PhantomData,
mem,
ops::Range,
slice,
borrow::Borrow, collections::hash_map::Entry, iter, marker::PhantomData, mem, ops::Range, slice,
};
pub type RenderPassColorAttachmentDescriptor =
@ -156,10 +135,10 @@ impl super::RawPass {
};
}
for (color, at) in targets.colors
.iter_mut()
.zip(slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length))
{
for (color, at) in targets.colors.iter_mut().zip(slice::from_raw_parts(
desc.color_attachments,
desc.color_attachments_length,
)) {
*color = RawRenderPassColorAttachmentDescriptor {
attachment: at.attachment.into_raw(),
resolve_target: at.resolve_target.map_or(0, |id| id.into_raw()),
@ -335,25 +314,22 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (view_guard, _) = hub.texture_views.read(&mut token);
let mut peeker = raw_data.as_ptr();
let raw_data_end = unsafe {
raw_data.as_ptr().add(raw_data.len())
};
let raw_data_end = unsafe { raw_data.as_ptr().add(raw_data.len()) };
let mut targets: RawRenderTargets = unsafe { mem::zeroed() };
assert!(unsafe { peeker.add(RawRenderTargets::max_size()) <= raw_data_end });
peeker = unsafe { RawRenderTargets::peek_from(peeker, &mut targets) };
let color_attachments = targets.colors
let color_attachments = targets
.colors
.iter()
.take_while(|at| at.attachment != 0)
.map(|at| {
RenderPassColorAttachmentDescriptor {
attachment: id::TextureViewId::from_raw(at.attachment).unwrap(),
resolve_target: id::TextureViewId::from_raw(at.resolve_target),
load_op: at.component.load_op,
store_op: at.component.store_op,
clear_color: at.component.clear_value,
}
.map(|at| RenderPassColorAttachmentDescriptor {
attachment: id::TextureViewId::from_raw(at.attachment).unwrap(),
resolve_target: id::TextureViewId::from_raw(at.resolve_target),
load_op: at.component.load_op,
store_op: at.component.store_op,
clear_color: at.component.clear_value,
})
.collect::<ArrayVec<[_; MAX_COLOR_TARGETS]>>();
let depth_stencil_attachment_body;
@ -401,7 +377,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&'a hal::image::SubresourceRange,
Option<TextureUsage>,
);
let mut output_attachments = ArrayVec::<[OutputAttachment; MAX_TOTAL_ATTACHMENTS]>::new();
let mut output_attachments =
ArrayVec::<[OutputAttachment; MAX_TOTAL_ATTACHMENTS]>::new();
log::trace!(
"Encoding render pass begin in command buffer {:?}",
@ -427,17 +404,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
// Using render pass for transition.
let consistent_usage = base_trackers.textures.query(
source_id.value,
view.range.clone(),
);
let consistent_usage = base_trackers
.textures
.query(source_id.value, view.range.clone());
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
).1,
Some(usage) => {
conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
)
.1
}
None => hal::image::Layout::DepthStencilAttachmentOptimal,
};
@ -449,7 +428,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
at.stencil_load_op,
at.stencil_store_op,
),
layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal,
layouts: old_layout..hal::image::Layout::DepthStencilAttachmentOptimal,
})
}
None => None,
@ -469,25 +448,25 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
view.samples, sample_count,
"All attachments must have the same sample_count"
);
let first_use = trackers.views.init(
at.attachment,
view.life_guard.add_ref(),
PhantomData,
).is_ok();
let first_use = trackers
.views
.init(at.attachment, view.life_guard.add_ref(), PhantomData)
.is_ok();
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let consistent_usage = base_trackers.textures.query(
source_id.value,
view.range.clone(),
);
let consistent_usage = base_trackers
.textures
.query(source_id.value, view.range.clone());
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => hal::image::Layout::ColorAttachmentOptimal,
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
old_layout..hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { ref source_id, .. } => {
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
@ -503,7 +482,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} else {
end
};
start .. end
start..end
}
};
@ -516,35 +495,32 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
for resolve_target in color_attachments
.iter()
.flat_map(|at| at.resolve_target)
{
for resolve_target in color_attachments.iter().flat_map(|at| at.resolve_target) {
let view = &view_guard[resolve_target];
assert_eq!(extent, Some(view.extent));
assert_eq!(
view.samples, 1,
"All resolve_targets must have a sample_count of 1"
);
let first_use = trackers.views.init(
resolve_target,
view.life_guard.add_ref(),
PhantomData,
).is_ok();
let first_use = trackers
.views
.init(resolve_target, view.life_guard.add_ref(), PhantomData)
.is_ok();
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let consistent_usage = base_trackers.textures.query(
source_id.value,
view.range.clone(),
);
let consistent_usage = base_trackers
.textures
.query(source_id.value, view.range.clone());
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => hal::image::Layout::ColorAttachmentOptimal,
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
old_layout..hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { ref source_id, .. } => {
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
@ -560,7 +536,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} else {
end
};
start .. end
start..end
}
};
@ -650,7 +626,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
let subpass = hal::pass::SubpassDesc {
colors: &color_ids[.. color_attachments.len()],
colors: &color_ids[..color_attachments.len()],
resolves: &resolve_ids,
depth_stencil: depth_stencil_attachment.map(|_| &depth_id),
inputs: &[],
@ -791,7 +767,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
0,
iter::once(hal::pso::Viewport {
rect,
depth: 0.0 .. 1.0,
depth: 0.0..1.0,
}),
);
}
@ -838,9 +814,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
assert!(unsafe { peeker.add(RenderCommand::max_size()) } <= raw_data_end);
peeker = unsafe { RenderCommand::peek_from(peeker, &mut command) };
match command {
RenderCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => {
RenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
phantom_offsets,
} => {
let (new_peeker, offsets) = unsafe {
phantom_offsets.decode_unaligned(peeker, num_dynamic_offsets as usize, raw_data_end)
phantom_offsets.decode_unaligned(
peeker,
num_dynamic_offsets as usize,
raw_data_end,
)
};
peeker = new_peeker;
@ -864,11 +849,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trackers.merge_extend(&bind_group.used);
if let Some((pipeline_layout_id, follow_ups)) = state.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_ups.clone().map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()));
if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
index as usize,
bind_group_id,
bind_group,
offsets,
) {
let bind_groups = iter::once(bind_group.raw.raw()).chain(
follow_ups
.clone()
.map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
);
unsafe {
raw.bind_graphics_descriptor_sets(
&&pipeline_layout_guard[pipeline_layout_id].raw,
@ -877,7 +868,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
offsets
.iter()
.chain(follow_ups.flat_map(|(_, offsets)| offsets))
.cloned()
.cloned(),
);
}
};
@ -898,9 +889,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
"The render pipeline and renderpass have mismatching sample_count"
);
state.blend_color
state
.blend_color
.require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR));
state.stencil_reference
state
.stencil_reference
.require(pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE));
unsafe {
@ -911,7 +904,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if state.binder.pipeline_layout_id != Some(pipeline.layout_id) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
state.binder.pipeline_layout_id = Some(pipeline.layout_id);
state.binder
state
.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
@ -968,30 +962,35 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
// Update vertex buffer limits
for (vbs, &(stride, rate)) in state
.vertex
.inputs
.iter_mut()
.zip(&pipeline.vertex_strides)
for (vbs, &(stride, rate)) in
state.vertex.inputs.iter_mut().zip(&pipeline.vertex_strides)
{
vbs.stride = stride;
vbs.rate = rate;
}
for vbs in state.vertex.inputs[pipeline.vertex_strides.len() ..].iter_mut() {
for vbs in state.vertex.inputs[pipeline.vertex_strides.len()..].iter_mut() {
vbs.stride = 0;
vbs.rate = InputStepMode::Vertex;
}
state.vertex.update_limits();
}
RenderCommand::SetIndexBuffer { buffer_id, offset, size } => {
RenderCommand::SetIndexBuffer {
buffer_id,
offset,
size,
} => {
let buffer = trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDEX));
let end = if size != 0 { offset + size } else { buffer.size };
state.index.bound_buffer_view = Some((buffer_id, offset .. end));
let end = if size != 0 {
offset + size
} else {
buffer.size
};
state.index.bound_buffer_view = Some((buffer_id, offset..end));
state.index.update_limit();
let view = hal::buffer::IndexBufferView {
@ -1007,7 +1006,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.bind_index_buffer(view);
}
}
RenderCommand::SetVertexBuffer { slot, buffer_id, offset, size } => {
RenderCommand::SetVertexBuffer {
slot,
buffer_id,
offset,
size,
} => {
let buffer = trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::VERTEX)
@ -1040,7 +1044,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.set_stencil_reference(hal::pso::Face::all(), value);
}
}
RenderCommand::SetViewport { ref rect, depth_min, depth_max } => {
RenderCommand::SetViewport {
ref rect,
depth_min,
depth_max,
} => {
use std::{convert::TryFrom, i16};
let r = hal::pso::Rect {
x: i16::try_from(rect.x.round() as i64).unwrap_or(0),
@ -1053,7 +1061,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
0,
iter::once(hal::pso::Viewport {
rect: r,
depth: depth_min .. depth_max,
depth: depth_min..depth_max,
}),
);
}
@ -1067,13 +1075,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
h: i16::try_from(rect.h).unwrap_or(i16::MAX),
};
unsafe {
raw.set_scissors(
0,
iter::once(r),
);
raw.set_scissors(0, iter::once(r));
}
}
RenderCommand::Draw { vertex_count, instance_count, first_vertex, first_instance } => {
RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
state.is_ready().unwrap();
assert!(
first_vertex + vertex_count <= state.vertex.vertex_limit,
@ -1086,12 +1096,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
unsafe {
raw.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
first_vertex..first_vertex + vertex_count,
first_instance..first_instance + instance_count,
);
}
}
RenderCommand::DrawIndexed { index_count, instance_count, first_index, base_vertex, first_instance } => {
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
} => {
state.is_ready().unwrap();
//TODO: validate that base_vertex + max_index() is within the provided range
@ -1106,9 +1122,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
unsafe {
raw.draw_indexed(
first_index .. first_index + index_count,
first_index..first_index + index_count,
base_vertex,
first_instance .. first_instance + instance_count,
first_instance..first_instance + instance_count,
);
}
}
@ -1117,12 +1133,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = trackers
.buffers
.use_extend(
&*buffer_guard,
buffer_id,
(),
BufferUsage::INDIRECT,
)
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDIRECT)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
@ -1135,12 +1146,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = trackers
.buffers
.use_extend(
&*buffer_guard,
buffer_id,
(),
BufferUsage::INDIRECT,
)
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDIRECT)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
@ -1169,15 +1175,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub mod render_ffi {
use super::{
RenderCommand,
super::{PhantomSlice, RawPass, Rect},
RenderCommand,
};
use crate::{
id,
RawString,
};
use wgt::{BufferAddress, Color, DynamicOffset};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, Color, DynamicOffset};
/// # Safety
///
@ -1199,9 +1202,7 @@ pub mod render_ffi {
bind_group_id,
phantom_offsets: PhantomSlice::default(),
});
pass.encode_slice(
slice::from_raw_parts(offsets, offset_length),
);
pass.encode_slice(slice::from_raw_parts(offsets, offset_length));
}
#[no_mangle]
@ -1243,10 +1244,7 @@ pub mod render_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_blend_color(
pass: &mut RawPass,
color: &Color,
) {
pub unsafe extern "C" fn wgpu_render_pass_set_blend_color(pass: &mut RawPass, color: &Color) {
pass.encode(&RenderCommand::SetBlendColor(*color));
}
@ -1326,10 +1324,7 @@ pub mod render_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.encode(&RenderCommand::DrawIndirect {
buffer_id,
offset,
});
pass.encode(&RenderCommand::DrawIndirect { buffer_id, offset });
}
#[no_mangle]
@ -1338,10 +1333,7 @@ pub mod render_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.encode(&RenderCommand::DrawIndexedIndirect {
buffer_id,
offset,
});
pass.encode(&RenderCommand::DrawIndexedIndirect { buffer_id, offset });
}
#[no_mangle]
@ -1354,25 +1346,17 @@ pub mod render_ffi {
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_push_debug_group(
_pass: &mut RawPass,
_label: RawString,
) {
pub extern "C" fn wgpu_render_pass_push_debug_group(_pass: &mut RawPass, _label: RawString) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_pop_debug_group(
_pass: &mut RawPass,
) {
pub extern "C" fn wgpu_render_pass_pop_debug_group(_pass: &mut RawPass) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_insert_debug_marker(
_pass: &mut RawPass,
_label: RawString,
) {
pub extern "C" fn wgpu_render_pass_insert_debug_marker(_pass: &mut RawPass, _label: RawString) {
//TODO
}

View File

@ -9,8 +9,8 @@ use crate::{
id::{BufferId, CommandEncoderId, TextureId},
};
use wgt::{BufferAddress, BufferUsage, Extent3d, Origin3d, TextureUsage};
use hal::command::CommandBuffer as _;
use wgt::{BufferAddress, BufferUsage, Extent3d, Origin3d, TextureUsage};
use std::iter;
@ -47,8 +47,8 @@ impl TextureCopyView {
{
hal::image::SubresourceRange {
aspects,
levels: level .. level + 1,
layers: layer .. layer + 1,
levels: level..level + 1,
layers: layer..layer + 1,
}
}
}
@ -63,7 +63,7 @@ impl TextureCopyView {
hal::image::SubresourceLayers {
aspects,
level: self.mip_level as hal::image::Level,
layers: layer .. layer + 1,
layers: layer..layer + 1,
}
}
}
@ -113,7 +113,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
all_buffer_stages()..all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
@ -172,7 +172,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
stages..stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barriers),
);
@ -236,7 +236,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
stages..stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barrier),
);
@ -297,7 +297,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_image_stages() .. all_image_stages(),
all_image_stages()..all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);

View File

@ -4,32 +4,16 @@
use crate::{binding_model, Features};
use wgt::{
BlendDescriptor,
BlendFactor,
Color,
ColorStateDescriptor,
ColorWrite,
CompareFunction,
CullMode,
DepthStencilStateDescriptor,
Extent3d,
FrontFace,
IndexFormat,
Origin3d,
PrimitiveTopology,
RasterizationStateDescriptor,
StencilOperation,
StencilStateFaceDescriptor,
TextureFormat,
VertexFormat,
BlendDescriptor, BlendFactor, Color, ColorStateDescriptor, ColorWrite, CompareFunction,
CullMode, DepthStencilStateDescriptor, Extent3d, FrontFace, IndexFormat, Origin3d,
PrimitiveTopology, RasterizationStateDescriptor, StencilOperation, StencilStateFaceDescriptor,
TextureFormat, VertexFormat,
};
pub fn map_buffer_usage(
usage: wgt::BufferUsage,
) -> (hal::buffer::Usage, hal::memory::Properties) {
use wgt::BufferUsage as W;
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
use hal::buffer::Usage as U;
use hal::memory::Properties as P;
use wgt::BufferUsage as W;
let mut hal_memory = P::empty();
if usage.contains(W::MAP_READ) {
@ -69,8 +53,8 @@ pub fn map_texture_usage(
usage: wgt::TextureUsage,
aspects: hal::format::Aspects,
) -> hal::image::Usage {
use wgt::TextureUsage as W;
use hal::image::Usage as U;
use wgt::TextureUsage as W;
let mut value = U::empty();
if usage.contains(W::COPY_SRC) {
@ -97,73 +81,46 @@ pub fn map_texture_usage(
value
}
pub fn map_binding_type(
binding: &binding_model::BindGroupLayoutEntry,
) -> hal::pso::DescriptorType {
pub fn map_binding_type(binding: &binding_model::BindGroupLayoutEntry) -> hal::pso::DescriptorType {
use crate::binding_model::BindingType as Bt;
use hal::pso;
match binding.ty {
Bt::UniformBuffer => {
pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
}
}
Bt::StorageBuffer => {
pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage {
read_only: false,
},
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
}
}
Bt::ReadonlyStorageBuffer => {
pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage {
read_only: false,
},
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
}
}
Bt::Sampler |
Bt::ComparisonSampler => {
pso::DescriptorType::Sampler
}
Bt::SampledTexture => {
pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
}
}
Bt::ReadonlyStorageTexture => {
pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Storage {
read_only: false,
},
}
}
Bt::WriteonlyStorageTexture => {
pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Storage {
read_only: true,
},
}
}
Bt::UniformBuffer => pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
},
Bt::StorageBuffer => pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: false },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
},
Bt::ReadonlyStorageBuffer => pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: false },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: binding.has_dynamic_offset,
},
},
Bt::Sampler | Bt::ComparisonSampler => pso::DescriptorType::Sampler,
Bt::SampledTexture => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
Bt::ReadonlyStorageTexture => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Storage { read_only: false },
},
Bt::WriteonlyStorageTexture => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Storage { read_only: true },
},
}
}
pub fn map_shader_stage_flags(
shader_stage_flags: wgt::ShaderStage,
) -> hal::pso::ShaderStageFlags {
use wgt::ShaderStage as Ss;
pub fn map_shader_stage_flags(shader_stage_flags: wgt::ShaderStage) -> hal::pso::ShaderStageFlags {
use hal::pso::ShaderStageFlags as H;
use wgt::ShaderStage as Ss;
let mut value = H::empty();
if shader_stage_flags.contains(Ss::VERTEX) {
@ -194,11 +151,9 @@ pub fn map_extent(extent: Extent3d) -> hal::image::Extent {
}
}
pub fn map_primitive_topology(
primitive_topology: PrimitiveTopology,
) -> hal::pso::Primitive {
use wgt::PrimitiveTopology as Pt;
pub fn map_primitive_topology(primitive_topology: PrimitiveTopology) -> hal::pso::Primitive {
use hal::pso::Primitive as H;
use wgt::PrimitiveTopology as Pt;
match primitive_topology {
Pt::PointList => H::PointList,
Pt::LineList => H::LineList,
@ -208,9 +163,7 @@ pub fn map_primitive_topology(
}
}
pub fn map_color_state_descriptor(
desc: &ColorStateDescriptor,
) -> hal::pso::ColorBlendDesc {
pub fn map_color_state_descriptor(desc: &ColorStateDescriptor) -> hal::pso::ColorBlendDesc {
let color_mask = desc.write_mask;
let blend_state = if desc.color_blend != BlendDescriptor::REPLACE
|| desc.alpha_blend != BlendDescriptor::REPLACE
@ -229,8 +182,8 @@ pub fn map_color_state_descriptor(
}
fn map_color_write_flags(flags: ColorWrite) -> hal::pso::ColorMask {
use wgt::ColorWrite as Cw;
use hal::pso::ColorMask as H;
use wgt::ColorWrite as Cw;
let mut value = H::empty();
if flags.contains(Cw::RED) {
@ -249,8 +202,8 @@ fn map_color_write_flags(flags: ColorWrite) -> hal::pso::ColorMask {
}
fn map_blend_descriptor(blend_desc: &BlendDescriptor) -> hal::pso::BlendOp {
use wgt::BlendOperation as Bo;
use hal::pso::BlendOp as H;
use wgt::BlendOperation as Bo;
match blend_desc.operation {
Bo::Add => H::Add {
src: map_blend_factor(blend_desc.src_factor),
@ -270,8 +223,8 @@ fn map_blend_descriptor(blend_desc: &BlendDescriptor) -> hal::pso::BlendOp {
}
fn map_blend_factor(blend_factor: BlendFactor) -> hal::pso::Factor {
use wgt::BlendFactor as Bf;
use hal::pso::Factor as H;
use wgt::BlendFactor as Bf;
match blend_factor {
Bf::Zero => H::Zero,
Bf::One => H::One,
@ -293,11 +246,10 @@ pub fn map_depth_stencil_state_descriptor(
desc: &DepthStencilStateDescriptor,
) -> hal::pso::DepthStencilDesc {
hal::pso::DepthStencilDesc {
depth: if desc.depth_write_enabled
|| desc.depth_compare != CompareFunction::Always
{
depth: if desc.depth_write_enabled || desc.depth_compare != CompareFunction::Always {
Some(hal::pso::DepthTest {
fun: map_compare_function(desc.depth_compare).expect("DepthStencilStateDescriptor has undefined compare function"),
fun: map_compare_function(desc.depth_compare)
.expect("DepthStencilStateDescriptor has undefined compare function"),
write: desc.depth_write_enabled,
})
} else {
@ -328,11 +280,10 @@ pub fn map_depth_stencil_state_descriptor(
}
}
fn map_stencil_face(
stencil_state_face_desc: &StencilStateFaceDescriptor,
) -> hal::pso::StencilFace {
fn map_stencil_face(stencil_state_face_desc: &StencilStateFaceDescriptor) -> hal::pso::StencilFace {
hal::pso::StencilFace {
fun: map_compare_function(stencil_state_face_desc.compare).expect("StencilStateFaceDescriptor has undefined compare function"),
fun: map_compare_function(stencil_state_face_desc.compare)
.expect("StencilStateFaceDescriptor has undefined compare function"),
op_fail: map_stencil_operation(stencil_state_face_desc.fail_op),
op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op),
op_pass: map_stencil_operation(stencil_state_face_desc.pass_op),
@ -340,8 +291,8 @@ fn map_stencil_face(
}
pub fn map_compare_function(compare_function: CompareFunction) -> Option<hal::pso::Comparison> {
use wgt::CompareFunction as Cf;
use hal::pso::Comparison as H;
use wgt::CompareFunction as Cf;
match compare_function {
Cf::Undefined => None,
Cf::Never => Some(H::Never),
@ -356,8 +307,8 @@ pub fn map_compare_function(compare_function: CompareFunction) -> Option<hal::ps
}
fn map_stencil_operation(stencil_operation: StencilOperation) -> hal::pso::StencilOp {
use wgt::StencilOperation as So;
use hal::pso::StencilOp as H;
use wgt::StencilOperation as So;
match stencil_operation {
So::Keep => H::Keep,
So::Zero => H::Zero,
@ -374,8 +325,8 @@ pub(crate) fn map_texture_format(
texture_format: TextureFormat,
features: Features,
) -> hal::format::Format {
use wgt::TextureFormat as Tf;
use hal::format::Format as H;
use wgt::TextureFormat as Tf;
match texture_format {
// Normal 8 bit formats
Tf::R8Unorm => H::R8Unorm,
@ -444,8 +395,8 @@ pub(crate) fn map_texture_format(
}
pub fn map_vertex_format(vertex_format: VertexFormat) -> hal::format::Format {
use wgt::VertexFormat as Vf;
use hal::format::Format as H;
use wgt::VertexFormat as Vf;
match vertex_format {
Vf::Uchar2 => H::Rg8Uint,
Vf::Uchar4 => H::Rgba8Uint,
@ -495,8 +446,8 @@ pub fn map_texture_dimension_size(
array_size: u32,
sample_size: u32,
) -> hal::image::Kind {
use wgt::TextureDimension::*;
use hal::image::Kind as H;
use wgt::TextureDimension::*;
match dimension {
D1 => {
assert_eq!(height, 1);
@ -531,11 +482,9 @@ pub fn map_texture_dimension_size(
}
}
pub fn map_texture_view_dimension(
dimension: wgt::TextureViewDimension,
) -> hal::image::ViewKind {
use wgt::TextureViewDimension::*;
pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
use hal::image::ViewKind as H;
use wgt::TextureViewDimension::*;
match dimension {
D1 => H::D1,
D2 => H::D2,
@ -547,8 +496,8 @@ pub fn map_texture_view_dimension(
}
pub fn map_buffer_state(usage: wgt::BufferUsage) -> hal::buffer::State {
use wgt::BufferUsage as W;
use hal::buffer::Access as A;
use wgt::BufferUsage as W;
let mut access = A::empty();
if usage.contains(W::COPY_SRC) {
@ -580,8 +529,8 @@ pub fn map_texture_state(
usage: wgt::TextureUsage,
aspects: hal::format::Aspects,
) -> hal::image::State {
use wgt::TextureUsage as W;
use hal::image::{Access as A, Layout as L};
use wgt::TextureUsage as W;
let is_color = aspects.contains(hal::format::Aspects::COLOR);
let layout = match usage {
@ -619,10 +568,7 @@ pub fn map_texture_state(
(access, layout)
}
pub fn map_load_store_ops(
load: wgt::LoadOp,
store: wgt::StoreOp,
) -> hal::pass::AttachmentOps {
pub fn map_load_store_ops(load: wgt::LoadOp, store: wgt::StoreOp) -> hal::pass::AttachmentOps {
hal::pass::AttachmentOps {
load: match load {
wgt::LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear,
@ -668,8 +614,8 @@ pub fn map_filter(filter: wgt::FilterMode) -> hal::image::Filter {
}
pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode {
use wgt::AddressMode as Am;
use hal::image::WrapMode as W;
use wgt::AddressMode as Am;
match address {
Am::ClampToEdge => W::Clamp,
Am::Repeat => W::Tile,

View File

@ -4,25 +4,18 @@
use crate::{
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
resource,
id, resource,
track::TrackerSet,
FastHashMap,
Stored,
RefCount,
SubmissionIndex,
FastHashMap, RefCount, Stored, SubmissionIndex,
};
use copyless::VecHelper as _;
use hal::device::Device as _;
use parking_lot::Mutex;
use gfx_descriptor::{DescriptorAllocator, DescriptorSet};
use gfx_memory::{Heaps, MemoryBlock};
use hal::device::Device as _;
use parking_lot::Mutex;
use std::{
sync::atomic::Ordering,
};
use std::sync::atomic::Ordering;
const CLEANUP_WAIT_MS: u64 = 5000;
@ -55,8 +48,10 @@ impl SuspectedResources {
self.texture_views.extend_from_slice(&other.texture_views);
self.samplers.extend_from_slice(&other.samplers);
self.bind_groups.extend_from_slice(&other.bind_groups);
self.compute_pipelines.extend_from_slice(&other.compute_pipelines);
self.render_pipelines.extend_from_slice(&other.render_pipelines);
self.compute_pipelines
.extend_from_slice(&other.compute_pipelines);
self.render_pipelines
.extend_from_slice(&other.render_pipelines);
}
}
@ -197,14 +192,12 @@ impl<B: hal::Backend> LifetimeTracker<B> {
new_suspects: &SuspectedResources,
) {
self.suspected_resources.extend(new_suspects);
self.active
.alloc()
.init(ActiveSubmission {
index,
fence,
last_resources: NonReferencedResources::new(),
mapped: Vec::new(),
});
self.active.alloc().init(ActiveSubmission {
index,
fence,
last_resources: NonReferencedResources::new(),
mapped: Vec::new(),
});
}
pub fn map(&mut self, buffer: id::BufferId, ref_count: RefCount) {
@ -238,11 +231,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
}
/// Returns the last submission index that is done.
pub fn triage_submissions(
&mut self,
device: &B::Device,
force_wait: bool,
) -> SubmissionIndex {
pub fn triage_submissions(&mut self, device: &B::Device, force_wait: bool) -> SubmissionIndex {
if force_wait {
self.wait_idle(device);
}
@ -259,7 +248,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
return 0;
};
for a in self.active.drain(.. done_count) {
for a in self.active.drain(..done_count) {
log::trace!("Active submission {} is done", a.index);
self.free_resources.extend(a.last_resources);
self.ready_to_map.extend(a.mapped);
@ -278,14 +267,9 @@ impl<B: hal::Backend> LifetimeTracker<B> {
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
) {
unsafe {
self.free_resources.clean(
device,
heaps_mutex,
descriptor_allocator_mutex,
);
descriptor_allocator_mutex
.lock()
.cleanup(device);
self.free_resources
.clean(device, heaps_mutex, descriptor_allocator_mutex);
descriptor_allocator_mutex.lock().cleanup(device);
}
}
}
@ -309,17 +293,26 @@ impl<B: GfxBackend> LifetimeTracker<B> {
let res = guard.remove(id).unwrap();
assert!(res.used.bind_groups.is_empty());
self.suspected_resources.buffers.extend(res.used.buffers.used());
self.suspected_resources.textures.extend(res.used.textures.used());
self.suspected_resources.texture_views.extend(res.used.views.used());
self.suspected_resources.samplers.extend(res.used.samplers.used());
self.suspected_resources
.buffers
.extend(res.used.buffers.used());
self.suspected_resources
.textures
.extend(res.used.textures.used());
self.suspected_resources
.texture_views
.extend(res.used.views.used());
self.suspected_resources
.samplers
.extend(res.used.samplers.used());
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.desc_sets.push(res.raw);
.desc_sets
.push(res.raw);
}
}
}
@ -346,7 +339,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.image_views.push((id, raw));
.image_views
.push((id, raw));
}
}
}
@ -365,7 +359,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images.push((res.raw, res.memory));
.images
.push((res.raw, res.memory));
}
}
}
@ -384,7 +379,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.samplers.push(res.raw);
.samplers
.push(res.raw);
}
}
}
@ -404,7 +400,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.buffers.push((res.raw, res.memory));
.buffers
.push((res.raw, res.memory));
}
}
}
@ -423,7 +420,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.compute_pipes.push(res.raw);
.compute_pipes
.push(res.raw);
}
}
}
@ -442,7 +440,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.graphics_pipes.push(res.raw);
.graphics_pipes
.push(res.raw);
}
}
}
@ -517,7 +516,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
// Between all attachments, we need the smallest index, because that's the last
// time this framebuffer is still valid
if let Some(attachment_last_submit) = attachment_last_submit {
let min = last_submit.unwrap_or(std::usize::MAX).min(attachment_last_submit);
let min = last_submit
.unwrap_or(std::usize::MAX)
.min(attachment_last_submit);
last_submit = Some(min);
}
}
@ -537,7 +538,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.framebuffers.push(framebuffer);
.framebuffers
.push(framebuffer);
}
}
@ -553,18 +555,25 @@ impl<B: GfxBackend> LifetimeTracker<B> {
}
let hub = B::hub(global);
let (mut buffer_guard, _) = B::hub(global).buffers.write(token);
let mut pending_callbacks: Vec<super::BufferMapPendingCallback> = Vec::with_capacity(self.ready_to_map.len());
let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
Vec::with_capacity(self.ready_to_map.len());
let mut trackers = trackers.lock();
for buffer_id in self.ready_to_map.drain(..) {
let buffer = &mut buffer_guard[buffer_id];
if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id) {
if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
{
buffer.map_state = resource::BufferMapState::Idle;
log::debug!("Mapping request is dropped because the buffer is destroyed.");
hub.buffers.free_id(buffer_id);
let buffer = buffer_guard.remove(buffer_id).unwrap();
self.free_resources.buffers.push((buffer.raw, buffer.memory));
self.free_resources
.buffers
.push((buffer.raw, buffer.memory));
} else {
let mapping = match std::mem::replace(&mut buffer.map_state, resource::BufferMapState::Active) {
let mapping = match std::mem::replace(
&mut buffer.map_state,
resource::BufferMapState::Active,
) {
resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
_ => panic!("No pending mapping."),
};

View File

@ -3,22 +3,13 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
binding_model,
command,
conv,
binding_model, command, conv,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id,
pipeline,
resource,
swap_chain,
id, pipeline, resource, swap_chain,
track::{BufferState, TextureState, TrackerSet},
FastHashMap,
Features,
LifeGuard,
Stored,
FastHashMap, Features, LifeGuard, Stored,
};
use wgt::{BufferAddress, InputStepMode, TextureDimension, TextureFormat, BIND_BUFFER_ALIGNMENT};
use arrayvec::ArrayVec;
use copyless::VecHelper as _;
use gfx_descriptor::DescriptorAllocator;
@ -32,14 +23,10 @@ use hal::{
};
use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
use wgt::{BufferAddress, InputStepMode, TextureDimension, TextureFormat, BIND_BUFFER_ALIGNMENT};
use std::{
collections::hash_map::Entry,
ffi,
iter,
marker::PhantomData,
ptr,
slice,
collections::hash_map::Entry, ffi, iter, marker::PhantomData, ptr, slice,
sync::atomic::Ordering,
};
@ -108,8 +95,11 @@ type RawBufferMut = *mut u8;
type BufferMapResult = Result<RawBufferMut, hal::device::MapError>;
type BufferMapPendingCallback = (resource::BufferMapOperation, BufferMapResult);
pub type BufferMapReadCallback =
unsafe extern "C" fn(status: resource::BufferMapAsyncStatus, data: *const u8, userdata: *mut u8);
pub type BufferMapReadCallback = unsafe extern "C" fn(
status: resource::BufferMapAsyncStatus,
data: *const u8,
userdata: *mut u8,
);
pub type BufferMapWriteCallback =
unsafe extern "C" fn(status: resource::BufferMapAsyncStatus, data: *mut u8, userdata: *mut u8);
@ -140,11 +130,8 @@ fn map_buffer<B: hal::Backend>(
};
match kind {
HostMap::Read => unsafe {
raw.invalidate_mapped_memory_ranges(iter::once((
buffer.memory.memory(),
segment,
)))
.unwrap();
raw.invalidate_mapped_memory_ranges(iter::once((buffer.memory.memory(), segment)))
.unwrap();
},
HostMap::Write => {
buffer.mapped_write_segments.push(segment);
@ -154,28 +141,24 @@ fn map_buffer<B: hal::Backend>(
Ok(ptr.as_ptr())
}
fn unmap_buffer<B: hal::Backend>(
raw: &B::Device,
buffer: &mut resource::Buffer<B>,
) {
fn unmap_buffer<B: hal::Backend>(raw: &B::Device, buffer: &mut resource::Buffer<B>) {
match buffer.map_state {
resource::BufferMapState::Idle => {
log::error!("Buffer already unmapped");
return;
},
}
_ => buffer.map_state = resource::BufferMapState::Idle,
}
if !buffer.mapped_write_segments.is_empty() {
unsafe {
raw
.flush_mapped_memory_ranges(
buffer
.mapped_write_segments
.iter()
.map(|r| (buffer.memory.memory(), r.clone())),
)
.unwrap()
raw.flush_mapped_memory_ranges(
buffer
.mapped_write_segments
.iter()
.map(|r| (buffer.memory.memory(), r.clone())),
)
.unwrap()
};
buffer.mapped_write_segments.clear();
}
@ -195,15 +178,14 @@ fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callback
match operation {
resource::BufferMapOperation::Read { callback, userdata } => unsafe {
callback(status, ptr, userdata)
}
},
resource::BufferMapOperation::Write { callback, userdata } => unsafe {
callback(status, ptr, userdata)
}
},
}
}
}
#[derive(Debug)]
pub struct Device<B: hal::Backend> {
pub(crate) raw: B::Device,
@ -272,7 +254,8 @@ impl<B: GfxBackend> Device<B> {
}
fn lock_life<'a>(
&'a self, _token: &mut Token<'a, Self>
&'a self,
_token: &mut Token<'a, Self>,
) -> MutexGuard<'a, life::LifetimeTracker<B>> {
self.life_tracker.lock()
}
@ -290,11 +273,7 @@ impl<B: GfxBackend> Device<B> {
life_tracker.triage_framebuffers(global, &mut *self.framebuffers.lock(), token);
let _last_done = life_tracker.triage_submissions(&self.raw, force_wait);
let callbacks = life_tracker.handle_mapping(global, &self.raw, &self.trackers, token);
life_tracker.cleanup(
&self.raw,
&self.mem_allocator,
&self.desc_allocator,
);
life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
callbacks
}
@ -317,7 +296,12 @@ impl<B: GfxBackend> Device<B> {
} else if (Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage) {
(Kind::Linear, MemoryUsage::Staging { read_back: true })
} else {
(Kind::General, MemoryUsage::Dynamic { sparse_updates: false })
(
Kind::General,
MemoryUsage::Dynamic {
sparse_updates: false,
},
)
}
};
@ -344,11 +328,7 @@ impl<B: GfxBackend> Device<B> {
unsafe {
self.raw
.bind_buffer_memory(
memory.memory(),
memory.segment().offset,
&mut buffer,
)
.bind_buffer_memory(memory.memory(), memory.segment().offset, &mut buffer)
.unwrap()
};
@ -378,9 +358,9 @@ impl<B: GfxBackend> Device<B> {
// Ensure `D24Plus` textures cannot be copied
match desc.format {
TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => {
assert!(!desc.usage.intersects(
wgt::TextureUsage::COPY_SRC | wgt::TextureUsage::COPY_DST
));
assert!(!desc
.usage
.intersects(wgt::TextureUsage::COPY_SRC | wgt::TextureUsage::COPY_DST));
}
_ => {}
}
@ -407,14 +387,17 @@ impl<B: GfxBackend> Device<B> {
// TODO: 2D arrays, cubemap arrays
let mut image = unsafe {
let mut image = self.raw.create_image(
kind,
desc.mip_level_count as hal::image::Level,
format,
hal::image::Tiling::Optimal,
usage,
view_capabilities,
).unwrap();
let mut image = self
.raw
.create_image(
kind,
desc.mip_level_count as hal::image::Level,
format,
hal::image::Tiling::Optimal,
usage,
view_capabilities,
)
.unwrap();
if !desc.label.is_null() {
let label = ffi::CStr::from_ptr(desc.label).to_string_lossy();
self.raw.set_image_name(&mut image, &label);
@ -438,11 +421,7 @@ impl<B: GfxBackend> Device<B> {
unsafe {
self.raw
.bind_image_memory(
memory.memory(),
memory.segment().offset,
&mut image,
)
.bind_image_memory(memory.memory(), memory.segment().offset, &mut image)
.unwrap()
};
@ -457,8 +436,8 @@ impl<B: GfxBackend> Device<B> {
format: desc.format,
full_range: hal::image::SubresourceRange {
aspects,
levels: 0 .. desc.mip_level_count as hal::image::Level,
layers: 0 .. desc.array_layer_count as hal::image::Layer,
levels: 0..desc.mip_level_count as hal::image::Level,
layers: 0..desc.array_layer_count as hal::image::Layer,
},
memory,
life_guard: LifeGuard::new(),
@ -489,11 +468,9 @@ impl<B: hal::Backend> Device<B> {
pub(crate) fn dispose(self) {
self.life_tracker.lock().triage_submissions(&self.raw, true);
self.life_tracker.lock().cleanup(
&self.raw,
&self.mem_allocator,
&self.desc_allocator,
);
self.life_tracker
.lock()
.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
self.com_allocator.destroy(&self.raw);
let mut desc_alloc = self.desc_allocator.into_inner();
let mut mem_alloc = self.mem_allocator.into_inner();
@ -510,7 +487,6 @@ impl<B: hal::Backend> Device<B> {
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn device_create_buffer<B: GfxBackend>(
&self,
@ -568,7 +544,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Ok(ptr) => {
buffer.map_state = resource::BufferMapState::Active;
ptr
},
}
Err(e) => {
log::error!("failed to create buffer in a mapped state: {:?}", e);
ptr::null_mut()
@ -577,9 +553,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let id = hub.buffers.register_identity(id_in, buffer, &mut token);
log::info!("Created mapped buffer {:?} with {:?}", id, desc);
device.trackers
device
.trackers
.lock()
.buffers.init(
.buffers
.init(
id,
ref_count,
BufferState::with_usage(wgt::BufferUsage::MAP_WRITE),
@ -609,7 +587,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
match map_buffer(
&device.raw,
&mut buffer,
hal::buffer::SubRange { offset, size: Some(data.len() as BufferAddress) },
hal::buffer::SubRange {
offset,
size: Some(data.len() as BufferAddress),
},
HostMap::Write,
) {
Ok(ptr) => unsafe {
@ -644,7 +625,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
match map_buffer(
&device.raw,
&mut buffer,
hal::buffer::SubRange { offset, size: Some(data.len() as BufferAddress) },
hal::buffer::SubRange {
offset,
size: Some(data.len() as BufferAddress),
},
HostMap::Read,
) {
Ok(ptr) => unsafe {
@ -674,7 +658,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.buffers.push(buffer_id);
.suspected_resources
.buffers
.push(buffer_id);
}
pub fn device_create_texture<B: GfxBackend>(
@ -693,13 +679,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = texture.life_guard.add_ref();
let id = hub.textures.register_identity(id_in, texture, &mut token);
device.trackers
device
.trackers
.lock()
.textures.init(
id,
ref_count,
TextureState::with_range(&range),
)
.textures
.init(id, ref_count, TextureState::with_range(&range))
.unwrap();
id
}
@ -718,7 +702,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.textures.push(texture_id);
.suspected_resources
.textures
.push(texture_id);
}
pub fn texture_create_view<B: GfxBackend>(
@ -750,8 +736,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
let range = hal::image::SubresourceRange {
aspects: texture.full_range.aspects,
levels: desc.base_mip_level as u8 .. end_level,
layers: desc.base_array_layer as u16 .. end_layer,
levels: desc.base_mip_level as u8..end_level,
layers: desc.base_array_layer as u16..end_layer,
};
(desc.format, kind, range)
}
@ -797,9 +783,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = view.life_guard.add_ref();
let id = hub.texture_views.register_identity(id_in, view, &mut token);
device.trackers
device
.trackers
.lock()
.views.init(id, ref_count, PhantomData)
.views
.init(id, ref_count, PhantomData)
.unwrap();
id
}
@ -827,7 +815,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.texture_views.push(texture_view_id);
.suspected_resources
.texture_views
.push(texture_view_id);
}
pub fn device_create_sampler<B: GfxBackend>(
@ -851,7 +841,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
conv::map_wrap(desc.address_mode_w),
),
lod_bias: hal::image::Lod(0.0),
lod_range: hal::image::Lod(desc.lod_min_clamp) .. hal::image::Lod(desc.lod_max_clamp),
lod_range: hal::image::Lod(desc.lod_min_clamp)..hal::image::Lod(desc.lod_max_clamp),
comparison: conv::map_compare_function(desc.compare),
border: hal::image::PackedColor(0),
normalized: true,
@ -869,9 +859,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = sampler.life_guard.add_ref();
let id = hub.samplers.register_identity(id_in, sampler, &mut token);
device.trackers
device
.trackers
.lock()
.samplers.init(id, ref_count, PhantomData)
.samplers
.init(id, ref_count, PhantomData)
.unwrap();
id
}
@ -890,7 +882,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.samplers.push(sampler_id);
.suspected_resources
.samplers
.push(sampler_id);
}
pub fn device_create_bind_group_layout<B: GfxBackend>(
@ -902,11 +896,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let hub = B::hub(self);
let entries = unsafe { slice::from_raw_parts(desc.entries, desc.entries_length) };
let entry_map: FastHashMap<_, _> = entries
.iter()
.cloned()
.map(|b| (b.binding, b))
.collect();
let entry_map: FastHashMap<_, _> =
entries.iter().cloned().map(|b| (b.binding, b)).collect();
// TODO: deduplicate the bind group layouts at some level.
// We can't do it right here, because in the remote scenario
@ -942,7 +933,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.unwrap();
if !desc.label.is_null() {
let label = ffi::CStr::from_ptr(desc.label).to_string_lossy();
device.raw.set_descriptor_set_layout_name(&mut raw_layout, &label);
device
.raw
.set_descriptor_set_layout_name(&mut raw_layout, &label);
}
raw_layout
};
@ -962,13 +955,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.register_identity(id_in, layout, &mut token)
}
pub fn bind_group_layout_destroy<B: GfxBackend>(&self, bind_group_layout_id: id::BindGroupLayoutId) {
pub fn bind_group_layout_destroy<B: GfxBackend>(
&self,
bind_group_layout_id: id::BindGroupLayoutId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (bgl, _) = hub.bind_group_layouts.unregister(bind_group_layout_id, &mut token);
let (bgl, _) = hub
.bind_group_layouts
.unregister(bind_group_layout_id, &mut token);
unsafe {
device_guard[bgl.device_id.value].raw.destroy_descriptor_set_layout(bgl.raw);
device_guard[bgl.device_id.value]
.raw
.destroy_descriptor_set_layout(bgl.raw);
}
}
@ -1020,9 +1020,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (pipeline_layout, _) = hub.pipeline_layouts.unregister(pipeline_layout_id, &mut token);
let (pipeline_layout, _) = hub
.pipeline_layouts
.unregister(pipeline_layout_id, &mut token);
unsafe {
device_guard[pipeline_layout.device_id.value].raw.destroy_pipeline_layout(pipeline_layout.raw);
device_guard[pipeline_layout.device_id.value]
.raw
.destroy_pipeline_layout(pipeline_layout.raw);
}
}
@ -1039,8 +1043,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device = &device_guard[device_id];
let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let bind_group_layout = &bind_group_layout_guard[desc.layout];
let entries =
unsafe { slice::from_raw_parts(desc.entries, desc.entries_length) };
let entries = unsafe { slice::from_raw_parts(desc.entries, desc.entries_length) };
assert_eq!(entries.len(), bind_group_layout.entries.len());
let desc_set = unsafe {
@ -1078,7 +1081,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//TODO: group writes into contiguous sections
let mut writes = Vec::new();
for b in entries.iter() {
let decl = bind_group_layout.entries.get(&b.binding)
let decl = bind_group_layout
.entries
.get(&b.binding)
.expect("Failed to find binding declaration for binding");
let descriptor = match b.resource {
binding_model::BindingResource::Buffer(ref bb) => {
@ -1092,11 +1097,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
binding_model::BindingType::ReadonlyStorageBuffer => {
(BIND_BUFFER_ALIGNMENT, wgt::BufferUsage::STORAGE_READ)
}
binding_model::BindingType::Sampler |
binding_model::BindingType::ComparisonSampler |
binding_model::BindingType::SampledTexture |
binding_model::BindingType::ReadonlyStorageTexture |
binding_model::BindingType::WriteonlyStorageTexture => {
binding_model::BindingType::Sampler
| binding_model::BindingType::ComparisonSampler
| binding_model::BindingType::SampledTexture
| binding_model::BindingType::ReadonlyStorageTexture
| binding_model::BindingType::WriteonlyStorageTexture => {
panic!("Mismatched buffer binding for {:?}", decl)
}
};
@ -1118,12 +1123,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let sub_range = hal::buffer::SubRange {
offset: bb.offset,
size: if bb.size == 0 { None } else {
size: if bb.size == 0 {
None
} else {
let end = bb.offset + bb.size;
assert!(
end <= buffer.size,
"Bound buffer range {:?} does not fit in buffer size {}",
bb.offset .. end,
bb.offset..end,
buffer.size
);
Some(bb.size)
@ -1133,8 +1140,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
binding_model::BindingResource::Sampler(id) => {
match decl.ty {
binding_model::BindingType::Sampler |
binding_model::BindingType::ComparisonSampler => {}
binding_model::BindingType::Sampler
| binding_model::BindingType::ComparisonSampler => {}
_ => panic!("Wrong binding type for a sampler: {:?}", decl.ty),
}
let sampler = used
@ -1149,8 +1156,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
wgt::TextureUsage::SAMPLED,
hal::image::Layout::ShaderReadOnlyOptimal,
),
binding_model::BindingType::ReadonlyStorageTexture |
binding_model::BindingType::WriteonlyStorageTexture => {
binding_model::BindingType::ReadonlyStorageTexture
| binding_model::BindingType::WriteonlyStorageTexture => {
(wgt::TextureUsage::STORAGE, hal::image::Layout::General)
}
_ => panic!("Mismatched texture binding for {:?}", decl),
@ -1211,12 +1218,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
let ref_count = bind_group.life_guard.add_ref();
let id = hub
.bind_groups
.register_identity(id_in, bind_group, &mut token);
log::debug!("Bind group {:?} {:#?}",
id, hub.bind_groups.read(&mut token).0[id].used);
log::debug!(
"Bind group {:?} {:#?}",
id,
hub.bind_groups.read(&mut token).0[id].used
);
device
.trackers
@ -1241,7 +1250,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.bind_groups.push(bind_group_id);
.suspected_resources
.bind_groups
.push(bind_group_id);
}
pub fn device_create_shader_module<B: GfxBackend>(
@ -1256,12 +1267,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device = &device_guard[device_id];
let spv = unsafe { slice::from_raw_parts(desc.code.bytes, desc.code.length) };
let raw = unsafe {
device
.raw
.create_shader_module(spv)
.unwrap()
};
let raw = unsafe { device.raw.create_shader_module(spv).unwrap() };
let shader = pipeline::ShaderModule {
raw,
device_id: Stored {
@ -1280,7 +1286,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
unsafe {
device_guard[module.device_id.value].raw.destroy_shader_module(module.raw);
device_guard[module.device_id.value]
.raw
.destroy_shader_module(module.raw);
}
}
@ -1301,31 +1309,30 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
ref_count: device.life_guard.add_ref(),
};
let lowest_active_index = device
.lock_life(&mut token)
.lowest_active_submission();
let lowest_active_index = device.lock_life(&mut token).lowest_active_submission();
let mut command_buffer = device
.com_allocator
.allocate(dev_stored, &device.raw, device.features, lowest_active_index);
let mut command_buffer = device.com_allocator.allocate(
dev_stored,
&device.raw,
device.features,
lowest_active_index,
);
unsafe {
let raw_command_buffer = command_buffer.raw.last_mut().unwrap();
if !desc.label.is_null() {
let label = ffi::CStr::from_ptr(desc.label).to_string_lossy();
device.raw.set_command_buffer_name(raw_command_buffer, &label);
device
.raw
.set_command_buffer_name(raw_command_buffer, &label);
}
raw_command_buffer.begin_primary(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
);
raw_command_buffer.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
hub.command_buffers
.register_identity(id_in, command_buffer, &mut token)
}
pub fn command_encoder_destroy<B: GfxBackend>(
&self, command_encoder_id: id::CommandEncoderId
) {
pub fn command_encoder_destroy<B: GfxBackend>(&self, command_encoder_id: id::CommandEncoderId) {
let hub = B::hub(self);
let mut token = Token::root();
@ -1387,13 +1394,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device
.lock_life(&mut token)
.suspected_resources.extend(&device.temp_suspected);
.suspected_resources
.extend(&device.temp_suspected);
device.com_allocator.discard(comb);
}
pub fn command_buffer_destroy<B: GfxBackend>(
&self, command_buffer_id: id::CommandBufferId
) {
pub fn command_buffer_destroy<B: GfxBackend>(&self, command_buffer_id: id::CommandBufferId) {
self.command_encoder_destroy::<B>(command_buffer_id)
}
@ -1497,9 +1503,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// execute resource transitions
let mut transit = device.com_allocator.extend(comb);
unsafe {
transit.begin_primary(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
);
transit.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
command::CommandBuffer::insert_barriers(
@ -1530,7 +1534,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
signal_semaphores: signal_swapchain_semaphores
.into_iter()
.map(|sc_id| &swap_chain_guard[sc_id].semaphore),
};
unsafe {
@ -1547,9 +1550,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device = &device_guard[queue_id];
let callbacks = device.maintain(self, false, &mut token);
device
.lock_life(&mut token)
.track_submission(submit_index, fence, &device.temp_suspected);
device.lock_life(&mut token).track_submission(
submit_index,
fence,
&device.temp_suspected,
);
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
@ -1681,7 +1686,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
samples: sc,
ops: hal::pass::AttachmentOps::PRESERVE,
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts: hal::image::Layout::General .. hal::image::Layout::General,
layouts: hal::image::Layout::General..hal::image::Layout::General,
})
.collect(),
// We can ignore the resolves as the vulkan specs says:
@ -1694,7 +1699,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
samples: sc,
ops: hal::pass::AttachmentOps::PRESERVE,
stencil_ops: hal::pass::AttachmentOps::PRESERVE,
layouts: hal::image::Layout::General .. hal::image::Layout::General,
layouts: hal::image::Layout::General..hal::image::Layout::General,
}),
};
@ -1715,7 +1720,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
let subpass = hal::pass::SubpassDesc {
colors: &color_ids[.. desc.color_states_length],
colors: &color_ids[..desc.color_states_length],
depth_stencil: depth_stencil_state.map(|_| &depth_id),
inputs: &[],
resolves: &[],
@ -1844,7 +1849,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.render_pipelines.push(render_pipeline_id);
.suspected_resources
.render_pipelines
.push(render_pipeline_id);
}
pub fn device_create_compute_pipeline<B: GfxBackend>(
@ -1906,7 +1913,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.register_identity(id_in, pipeline, &mut token)
}
pub fn compute_pipeline_destroy<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId) {
pub fn compute_pipeline_destroy<B: GfxBackend>(
&self,
compute_pipeline_id: id::ComputePipelineId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
@ -1920,8 +1930,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources.compute_pipelines.push(compute_pipeline_id);
.suspected_resources
.compute_pipelines
.push(compute_pipeline_id);
}
pub fn device_create_swap_chain<B: GfxBackend>(
@ -1980,7 +1991,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let num_frames = swap_chain::DESIRED_NUM_FRAMES
.max(*caps.image_count.start())
.min(*caps.image_count.end());
let mut config = swap_chain::swap_chain_descriptor_to_hal(&desc, num_frames, device.features);
let mut config =
swap_chain::swap_chain_descriptor_to_hal(&desc, num_frames, device.features);
if let Some(formats) = formats {
assert!(
formats.contains(&config.format),
@ -2100,14 +2112,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
operation.call_error();
return;
}
resource::BufferMapState::Idle => resource::BufferMapState::Waiting(resource::BufferPendingMapping {
sub_range: hal::buffer::SubRange {
offset: range.start,
size: Some(range.end - range.start),
},
op: operation,
parent_ref_count: buffer.life_guard.add_ref(),
}),
resource::BufferMapState::Idle => {
resource::BufferMapState::Waiting(resource::BufferPendingMapping {
sub_range: hal::buffer::SubRange {
offset: range.start,
size: Some(range.end - range.start),
},
op: operation,
parent_ref_count: buffer.life_guard.add_ref(),
})
}
};
log::debug!("Buffer {:?} map state -> Waiting", buffer_id);
@ -2122,9 +2136,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.change_replace(buffer_id, &ref_count, (), usage);
device
.lock_life(&mut token)
.map(buffer_id, ref_count);
device.lock_life(&mut token).map(buffer_id, ref_count);
}
pub fn buffer_unmap<B: GfxBackend>(&self, buffer_id: id::BufferId) {
@ -2136,9 +2148,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let buffer = &mut buffer_guard[buffer_id];
log::debug!("Buffer {:?} map state -> Idle", buffer_id);
unmap_buffer(
&device_guard[buffer.device_id.value].raw,
buffer,
);
unmap_buffer(&device_guard[buffer.device_id.value].raw, buffer);
}
}

View File

@ -8,40 +8,25 @@ use crate::{
command::CommandBuffer,
device::Device,
id::{
AdapterId,
BindGroupId,
BindGroupLayoutId,
BufferId,
CommandBufferId,
ComputePipelineId,
DeviceId,
PipelineLayoutId,
RenderPipelineId,
SamplerId,
ShaderModuleId,
SurfaceId,
SwapChainId,
TextureId,
TextureViewId,
TypedId,
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, RenderPipelineId, SamplerId, ShaderModuleId, SurfaceId,
SwapChainId, TextureId, TextureViewId, TypedId,
},
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
resource::{Buffer, Sampler, Texture, TextureView},
swap_chain::SwapChain,
Epoch,
Index,
Epoch, Index,
};
use wgt::Backend;
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use vec_map::VecMap;
use wgt::Backend;
#[cfg(debug_assertions)]
use std::cell::Cell;
use std::{fmt::Debug, iter, marker::PhantomData, ops};
/// A simple structure to manage identities of objects.
#[derive(Debug)]
pub struct IdentityManager {
@ -136,11 +121,9 @@ impl<T, I: TypedId> Storage<T, I> {
}
pub fn iter(&self, backend: Backend) -> impl Iterator<Item = (I, &T)> {
self.map
.iter()
.map(move |(index, (value, storage_epoch))| {
(I::zip(index as Index, *storage_epoch, backend), value)
})
self.map.iter().map(move |(index, (value, storage_epoch))| {
(I::zip(index as Index, *storage_epoch, backend), value)
})
}
}
@ -252,7 +235,6 @@ impl<'a, T> Drop for Token<'a, T> {
}
}
pub trait IdentityHandler<I>: Debug {
type Input: Clone + Debug;
fn process(&self, id: Self::Input, backend: Backend) -> I;
@ -281,35 +263,35 @@ impl<I: TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
type Filter = Mutex<IdentityManager>;
fn spawn(&self, min_index: Index) -> Self::Filter {
let mut man = IdentityManager::default();
man.free.extend(0 .. min_index);
man.free.extend(0..min_index);
man.epochs.extend(iter::repeat(1).take(min_index as usize));
Mutex::new(man)
}
}
pub trait GlobalIdentityHandlerFactory:
IdentityHandlerFactory<AdapterId> +
IdentityHandlerFactory<DeviceId> +
IdentityHandlerFactory<SwapChainId> +
IdentityHandlerFactory<PipelineLayoutId> +
IdentityHandlerFactory<ShaderModuleId> +
IdentityHandlerFactory<BindGroupLayoutId> +
IdentityHandlerFactory<BindGroupId> +
IdentityHandlerFactory<CommandBufferId> +
IdentityHandlerFactory<RenderPipelineId> +
IdentityHandlerFactory<ComputePipelineId> +
IdentityHandlerFactory<BufferId> +
IdentityHandlerFactory<TextureId> +
IdentityHandlerFactory<TextureViewId> +
IdentityHandlerFactory<SamplerId> +
IdentityHandlerFactory<SurfaceId>
{}
IdentityHandlerFactory<AdapterId>
+ IdentityHandlerFactory<DeviceId>
+ IdentityHandlerFactory<SwapChainId>
+ IdentityHandlerFactory<PipelineLayoutId>
+ IdentityHandlerFactory<ShaderModuleId>
+ IdentityHandlerFactory<BindGroupLayoutId>
+ IdentityHandlerFactory<BindGroupId>
+ IdentityHandlerFactory<CommandBufferId>
+ IdentityHandlerFactory<RenderPipelineId>
+ IdentityHandlerFactory<ComputePipelineId>
+ IdentityHandlerFactory<BufferId>
+ IdentityHandlerFactory<TextureId>
+ IdentityHandlerFactory<TextureViewId>
+ IdentityHandlerFactory<SamplerId>
+ IdentityHandlerFactory<SurfaceId>
{
}
impl GlobalIdentityHandlerFactory for IdentityManagerFactory {}
pub type Input<G, I> = <<G as IdentityHandlerFactory<I>>::Filter as IdentityHandler<I>>::Input;
#[derive(Debug)]
pub struct Registry<T, I: TypedId, F: IdentityHandlerFactory<I>> {
identity: F::Filter,

View File

@ -5,15 +5,19 @@
use crate::{Epoch, Index};
#[cfg(feature = "serde")]
use serde_crate::{Deserialize, Serialize};
use wgt::Backend;
use std::{fmt, marker::PhantomData, mem, num::NonZeroU64};
use wgt::Backend;
const BACKEND_BITS: usize = 3;
const EPOCH_MASK: u32 = (1 << (32 - BACKEND_BITS)) - 1;
type Dummy = crate::backend::Empty;
#[repr(transparent)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct Id<T>(NonZeroU64, PhantomData<T>);
// required for PeekPoke
@ -75,7 +79,7 @@ impl<T> Eq for Id<T> {}
unsafe impl<T> peek_poke::Poke for Id<T> {
fn max_size() -> usize {
mem::size_of::<u64>()
mem::size_of::<u64>()
}
unsafe fn poke_into(&self, data: *mut u8) -> *mut u8 {
self.0.get().poke_into(data)
@ -112,7 +116,6 @@ impl<T> TypedId for Id<T> {
}
}
pub type AdapterId = Id<crate::instance::Adapter<Dummy>>;
pub type SurfaceId = Id<crate::instance::Surface>;
// Device

View File

@ -17,20 +17,19 @@ use serde_crate::{Deserialize, Serialize};
use hal::{
self,
adapter::{
AdapterInfo as HalAdapterInfo,
DeviceType as HalDeviceType,
PhysicalDevice as _,
},
adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _},
queue::QueueFamily as _,
window::Surface as _,
Instance as _,
};
#[repr(C)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct RequestAdapterOptions {
pub power_preference: PowerPreference,
pub compatible_surface: Option<SurfaceId>,
@ -125,7 +124,11 @@ pub struct Adapter<B: hal::Backend> {
/// Metadata about a backend adapter.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub struct AdapterInfo {
/// Adapter name
pub name: String,
@ -160,7 +163,11 @@ impl AdapterInfo {
/// Supported physical device types
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))]
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate")
)]
pub enum DeviceType {
/// Other
Other,
@ -300,13 +307,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut adapters_vk = match instance.vulkan {
Some(ref inst) if id_vulkan.is_some() => {
let mut adapters = inst.enumerate_adapters();
if let Some(&Surface { vulkan: Some(ref surface), .. }) = compatible_surface {
adapters.retain(|a|
if let Some(&Surface {
vulkan: Some(ref surface),
..
}) = compatible_surface
{
adapters.retain(|a| {
a.queue_families
.iter()
.find(|qf| qf.queue_type().supports_graphics())
.map_or(false, |qf| surface.supports_queue_family(qf))
);
});
}
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
@ -317,12 +328,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut adapters_mtl = if id_metal.is_some() {
let mut adapters = instance.metal.enumerate_adapters();
if let Some(surface) = compatible_surface {
adapters.retain(|a|
adapters.retain(|a| {
a.queue_families
.iter()
.find(|qf| qf.queue_type().supports_graphics())
.map_or(false, |qf| surface.metal.supports_queue_family(qf))
);
});
}
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
@ -333,13 +344,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut adapters_dx12 = match instance.dx12 {
Some(ref inst) if id_dx12.is_some() => {
let mut adapters = inst.enumerate_adapters();
if let Some(&Surface { dx12: Some(ref surface), .. }) = compatible_surface {
adapters.retain(|a|
if let Some(&Surface {
dx12: Some(ref surface),
..
}) = compatible_surface
{
adapters.retain(|a| {
a.queue_families
.iter()
.find(|qf| qf.queue_type().supports_graphics())
.map_or(false, |qf| surface.supports_queue_family(qf))
);
});
}
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
@ -350,12 +365,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut adapters_dx11 = if id_dx11.is_some() {
let mut adapters = instance.dx11.enumerate_adapters();
if let Some(surface) = compatible_surface {
adapters.retain(|a|
adapters.retain(|a| {
a.queue_families
.iter()
.find(|qf| qf.queue_type().supports_graphics())
.map_or(false, |qf| surface.dx11.supports_queue_family(qf))
);
});
}
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
@ -388,14 +403,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let preferred_gpu = match desc.power_preference {
PowerPreference::Default => {
match power::is_battery_discharging() {
Ok(false) => discrete.or(integrated).or(other).or(virt),
Ok(true) => integrated.or(discrete).or(other).or(virt),
Err(err) => {
log::debug!("Power info unavailable, preferring integrated gpu ({})", err);
integrated.or(discrete).or(other).or(virt)
}
PowerPreference::Default => match power::is_battery_discharging() {
Ok(false) => discrete.or(integrated).or(other).or(virt),
Ok(true) => integrated.or(discrete).or(other).or(virt),
Err(err) => {
log::debug!(
"Power info unavailable, preferring integrated gpu ({})",
err
);
integrated.or(discrete).or(other).or(virt)
}
},
PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt),
@ -500,13 +516,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device = {
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id].raw;
let wishful_features =
hal::Features::VERTEX_STORES_AND_ATOMICS |
hal::Features::FRAGMENT_STORES_AND_ATOMICS |
hal::Features::NDC_Y_UP;
let wishful_features = hal::Features::VERTEX_STORES_AND_ATOMICS
| hal::Features::FRAGMENT_STORES_AND_ATOMICS
| hal::Features::NDC_Y_UP;
let enabled_features = adapter.physical_device.features() & wishful_features;
if enabled_features != wishful_features {
log::warn!("Missing features: {:?}", wishful_features - enabled_features);
log::warn!(
"Missing features: {:?}",
wishful_features - enabled_features
);
}
let family = adapter

View File

@ -103,8 +103,7 @@ impl LifeGuard {
/// Returns `true` if the resource is still needed by the user.
fn use_at(&self, submit_index: SubmissionIndex) -> bool {
self.submission_index
.store(submit_index, Ordering::Release);
self.submission_index.store(submit_index, Ordering::Release);
self.ref_count.is_some()
}
}

View File

@ -5,14 +5,13 @@
use crate::{
device::RenderPassContext,
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
LifeGuard,
RawString,
RefCount,
Stored,
U32Array
LifeGuard, RawString, RefCount, Stored, U32Array,
};
use wgt::{BufferAddress, ColorStateDescriptor, DepthStencilStateDescriptor, IndexFormat, InputStepMode, PrimitiveTopology, RasterizationStateDescriptor, VertexAttributeDescriptor};
use std::borrow::Borrow;
use wgt::{
BufferAddress, ColorStateDescriptor, DepthStencilStateDescriptor, IndexFormat, InputStepMode,
PrimitiveTopology, RasterizationStateDescriptor, VertexAttributeDescriptor,
};
#[repr(C)]
#[derive(Debug)]
@ -40,7 +39,7 @@ pub struct ShaderModuleDescriptor {
#[derive(Debug)]
pub struct ShaderModule<B: hal::Backend> {
pub(crate) raw: B::ShaderModule,
pub(crate) device_id: Stored<DeviceId>
pub(crate) device_id: Stored<DeviceId>,
}
#[repr(C)]

View File

@ -19,13 +19,16 @@ impl fmt::Display for Error {
}
}
#[cfg(all(feature = "battery", any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
)))]
#[cfg(all(
feature = "battery",
any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
)
))]
mod platform {
use super::Error;
use battery::{self, Manager, State};
@ -49,13 +52,16 @@ mod platform {
}
}
#[cfg(any(not(feature = "battery"), not(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
))))]
#[cfg(any(
not(feature = "battery"),
not(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
))
))]
mod platform {
use super::Error;

View File

@ -5,18 +5,11 @@
use crate::{
id::{DeviceId, SwapChainId, TextureId},
track::DUMMY_SELECTOR,
LifeGuard,
RefCount,
Stored,
LifeGuard, RefCount, Stored,
};
use wgt::{
BufferAddress,
BufferUsage,
TextureFormat,
TextureUsage,
};
use gfx_memory::MemoryBlock;
use wgt::{BufferAddress, BufferUsage, TextureFormat, TextureUsage};
use std::{borrow::Borrow, fmt};
@ -47,7 +40,7 @@ pub enum BufferMapOperation {
Write {
callback: crate::device::BufferMapWriteCallback,
userdata: *mut u8,
}
},
}
//TODO: clarify if/why this is needed here
@ -69,11 +62,15 @@ impl BufferMapOperation {
match self {
BufferMapOperation::Read { callback, userdata } => {
log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending");
unsafe { callback(BufferMapAsyncStatus::Error, std::ptr::null(), userdata); }
unsafe {
callback(BufferMapAsyncStatus::Error, std::ptr::null(), userdata);
}
}
BufferMapOperation::Write { callback, userdata } => {
log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending");
unsafe { callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); }
unsafe {
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata);
}
}
}
}

View File

@ -36,15 +36,11 @@ use crate::{
conv,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId},
resource,
Features,
LifeGuard,
Stored,
resource, Features, LifeGuard, Stored,
};
use wgt::SwapChainDescriptor;
use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _};
use wgt::SwapChainDescriptor;
const FRAME_TIMEOUT_MS: u64 = 1000;
pub const DESIRED_NUM_FRAMES: u32 = 3;
@ -118,7 +114,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
Err(e) => {
log::warn!("acquire_image() failed ({:?}), reconfiguring swapchain", e);
let desc = swap_chain_descriptor_to_hal(&sc.desc, sc.num_frames, device.features);
let desc =
swap_chain_descriptor_to_hal(&sc.desc, sc.num_frames, device.features);
unsafe {
suf.configure_swapchain(&device.raw, desc).unwrap();
suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000).unwrap()
@ -144,8 +141,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
samples: 1,
range: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR,
layers: 0 .. 1,
levels: 0 .. 1,
layers: 0..1,
levels: 0..1,
},
life_guard: LifeGuard::new(),
};

View File

@ -59,7 +59,7 @@ impl ResourceState for BufferState {
let pending = PendingTransition {
id,
selector: (),
usage: old .. usage,
usage: old..usage,
};
self.last = match output {
None => pending.collapse()?,
@ -92,7 +92,7 @@ impl ResourceState for BufferState {
let pending = PendingTransition {
id,
selector: (),
usage: old .. new,
usage: old..new,
};
match output {
None => pending.collapse()?,
@ -114,7 +114,7 @@ impl ResourceState for BufferState {
#[cfg(test)]
mod test {
use super::*;
use crate::{id::TypedId};
use crate::id::TypedId;
#[test]
fn change() {

View File

@ -10,26 +10,16 @@ use crate::{
conv,
hub::Storage,
id::{self, TypedId},
resource,
Epoch,
FastHashMap,
Index,
RefCount,
resource, Epoch, FastHashMap, Index, RefCount,
};
use std::{
borrow::Borrow,
collections::hash_map::Entry,
fmt,
marker::PhantomData,
ops,
vec::Drain,
borrow::Borrow, collections::hash_map::Entry, fmt, marker::PhantomData, ops, vec::Drain,
};
pub use buffer::BufferState;
pub use texture::TextureState;
/// A single unit of state tracking. It keeps an initial
/// usage as well as the last/current one, similar to `Range`.
#[derive(Clone, Copy, Debug, PartialEq)]
@ -136,7 +126,8 @@ impl PendingTransition<BufferState> {
) -> hal::memory::Barrier<'a, B> {
log::trace!("\tbuffer -> {:?}", self);
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(self.usage.start) .. conv::map_buffer_state(self.usage.end),
states: conv::map_buffer_state(self.usage.start)
..conv::map_buffer_state(self.usage.end),
target: &buf.raw,
range: hal::buffer::SubRange::WHOLE,
families: None,
@ -154,11 +145,11 @@ impl PendingTransition<TextureState> {
let aspects = tex.full_range.aspects;
hal::memory::Barrier::Image {
states: conv::map_texture_state(self.usage.start, aspects)
.. conv::map_texture_state(self.usage.end, aspects),
..conv::map_texture_state(self.usage.end, aspects),
target: &tex.raw,
range: hal::image::SubresourceRange {
aspects,
.. self.selector
..self.selector
},
families: None,
}
@ -179,9 +170,7 @@ impl<S: ResourceState + fmt::Debug> fmt::Debug for ResourceTracker<S> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
self.map
.iter()
.map(|(&index, res)| {
((index, res.epoch), &res.state)
})
.map(|(&index, res)| ((index, res.epoch), &res.state))
.collect::<FastHashMap<_, _>>()
.fmt(formatter)
}
@ -256,12 +245,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Initialize a resource to be used.
///
/// Returns false if the resource is already registered.
pub fn init(
&mut self,
id: S::Id,
ref_count: RefCount,
state: S,
) -> Result<(), &S> {
pub fn init(&mut self, id: S::Id, ref_count: RefCount, state: S) -> Result<(), &S> {
let (index, epoch, backend) = id.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
@ -273,9 +257,7 @@ impl<S: ResourceState> ResourceTracker<S> {
});
Ok(())
}
Entry::Occupied(e) => {
Err(&e.into_mut().state)
}
Entry::Occupied(e) => Err(&e.into_mut().state),
}
}
@ -356,9 +338,7 @@ impl<S: ResourceState> ResourceTracker<S> {
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::zip(index, new.epoch, self.backend);
e.into_mut()
.state
.merge(id, &new.state, None)?;
e.into_mut().state.merge(id, &new.state, None)?;
}
}
}
@ -367,10 +347,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Merge another tracker, adding it's transitions to `self`.
/// Transitions the current usage to the new one.
pub fn merge_replace<'a>(
&'a mut self,
other: &'a Self,
) -> Drain<PendingTransition<S>> {
pub fn merge_replace<'a>(&'a mut self, other: &'a Self) -> Drain<PendingTransition<S>> {
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
Entry::Vacant(e) => {
@ -424,7 +401,6 @@ impl<S: ResourceState> ResourceTracker<S> {
}
}
impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
type Id = I;
type Selector = ();
@ -458,7 +434,6 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
pub const DUMMY_SELECTOR: () = ();
/// A set of trackers for all relevant resources.
#[derive(Debug)]
pub struct TrackerSet {
@ -515,7 +490,9 @@ impl TrackerSet {
self.views.merge_extend(&other.views).unwrap();
self.bind_groups.merge_extend(&other.bind_groups).unwrap();
self.samplers.merge_extend(&other.samplers).unwrap();
self.compute_pipes.merge_extend(&other.compute_pipes).unwrap();
self.compute_pipes
.merge_extend(&other.compute_pipes)
.unwrap();
self.render_pipes.merge_extend(&other.render_pipes).unwrap();
}

View File

@ -4,13 +4,7 @@
use smallvec::SmallVec;
use std::{
cmp::Ordering,
fmt::Debug,
iter,
ops::Range,
slice::Iter,
};
use std::{cmp::Ordering, fmt::Debug, iter, ops::Range, slice::Iter};
/// Structure that keeps track of a I -> T mapping,
/// optimized for a case where keys of the same values
@ -66,7 +60,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
for a in self.ranges.iter() {
assert!(a.0.start < a.0.end);
}
for (a, b) in self.ranges.iter().zip(self.ranges[1 ..].iter()) {
for (a, b) in self.ranges.iter().zip(self.ranges[1..].iter()) {
assert!(a.0.end <= b.0.start);
}
}
@ -128,7 +122,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
None => {
let pos = self.ranges.len();
self.ranges.push((index.clone(), default));
return &mut self.ranges[pos ..];
return &mut self.ranges[pos..];
}
};
@ -137,7 +131,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
if range.start < index.start {
self.ranges[start_pos].0.start = index.start;
self.ranges
.insert(start_pos, (range.start .. index.start, value));
.insert(start_pos, (range.start..index.start, value));
start_pos += 1;
}
}
@ -146,19 +140,19 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
loop {
let (range, value) = self.ranges[pos].clone();
if range.start >= index.end {
self.ranges.insert(pos, (range_pos .. index.end, default));
self.ranges.insert(pos, (range_pos..index.end, default));
pos += 1;
break;
}
if range.start > range_pos {
self.ranges.insert(pos, (range_pos .. range.start, default));
self.ranges.insert(pos, (range_pos..range.start, default));
pos += 1;
range_pos = range.start;
}
if range.end >= index.end {
if range.end != index.end {
self.ranges[pos].0.start = index.end;
self.ranges.insert(pos, (range_pos .. index.end, value));
self.ranges.insert(pos, (range_pos..index.end, value));
}
pos += 1;
break;
@ -166,16 +160,15 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
pos += 1;
range_pos = range.end;
if pos == self.ranges.len() {
self.ranges.push((range_pos .. index.end, default));
self.ranges.push((range_pos..index.end, default));
pos += 1;
break;
}
}
&mut self.ranges[start_pos .. pos]
&mut self.ranges[start_pos..pos]
}
/// Helper method for isolation that checks the sanity of the results.
#[cfg(test)]
pub fn sanely_isolated(&self, index: Range<I>, default: T) -> Vec<(Range<I>, T)> {
@ -198,7 +191,6 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
}
}
/// A custom iterator that goes through two `RangedStates` and process a merge.
#[derive(Debug)]
pub struct Merge<'a, I, T> {
@ -218,32 +210,32 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
if self.base == rb.start {
// right stream is starting
debug_assert!(self.base < ra.end);
(self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb))
(self.base..ra.end.min(rb.end), Some(*va)..Some(*vb))
} else {
// right hasn't started yet
debug_assert!(self.base < rb.start);
(self.base .. rb.start, Some(*va) .. None)
(self.base..rb.start, Some(*va)..None)
}
} else if rb.start < self.base {
// in the middle of the right stream
if self.base == ra.start {
// left stream is starting
debug_assert!(self.base < rb.end);
(self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb))
(self.base..ra.end.min(rb.end), Some(*va)..Some(*vb))
} else {
// left hasn't started yet
debug_assert!(self.base < ra.start);
(self.base .. ra.start, None .. Some(*vb))
(self.base..ra.start, None..Some(*vb))
}
} else {
// no active streams
match ra.start.cmp(&rb.start) {
// both are starting
Ordering::Equal => (ra.start .. ra.end.min(rb.end), Some(*va) .. Some(*vb)),
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(*va)..Some(*vb)),
// only left is starting
Ordering::Less => (ra.start .. rb.start.min(ra.end), Some(*va) .. None),
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(*va)..None),
// only right is starting
Ordering::Greater => (rb.start .. ra.start.min(rb.end), None .. Some(*vb)),
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(*vb)),
}
};
self.base = range.end;
@ -257,17 +249,17 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
}
// only right stream
(None, Some(&(ref rb, vb))) => {
let range = self.base.max(rb.start) .. rb.end;
let range = self.base.max(rb.start)..rb.end;
self.base = rb.end;
let _ = self.sb.next();
Some((range, None .. Some(*vb)))
Some((range, None..Some(*vb)))
}
// only left stream
(Some(&(ref ra, va)), None) => {
let range = self.base.max(ra.start) .. ra.end;
let range = self.base.max(ra.start)..ra.end;
self.base = ra.end;
let _ = self.sa.next();
Some((range, Some(*va) .. None))
Some((range, Some(*va)..None))
}
// done
(None, None) => None,
@ -292,104 +284,83 @@ mod test {
#[test]
fn sane_good() {
let rs = RangedStates::from_slice(
&[(1 .. 4, 9u8), (4 .. 5, 9)],
);
let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9)]);
rs.check_sanity();
}
#[test]
#[should_panic]
fn sane_empty() {
let rs = RangedStates::from_slice(
&[(1 .. 4, 9u8), (5 .. 5, 9)],
);
let rs = RangedStates::from_slice(&[(1..4, 9u8), (5..5, 9)]);
rs.check_sanity();
}
#[test]
#[should_panic]
fn sane_intersect() {
let rs = RangedStates::from_slice(
&[(1 .. 4, 9u8), (3 .. 5, 9)],
);
let rs = RangedStates::from_slice(&[(1..4, 9u8), (3..5, 9)]);
rs.check_sanity();
}
#[test]
fn coalesce() {
let mut rs = RangedStates::from_slice(
&[(1 .. 4, 9u8), (4 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1)],
);
let mut rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
rs.coalesce();
rs.check_sanity();
assert_eq!(
rs.ranges.as_slice(),
&[(1 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1),]
);
assert_eq!(rs.ranges.as_slice(), &[(1..5, 9), (5..7, 1), (8..9, 1),]);
}
#[test]
fn query() {
let rs = RangedStates::from_slice(
&[(1 .. 4, 1u8), (5 .. 7, 2)],
);
assert_eq!(rs.query(&(0 .. 1), |v| *v), None);
assert_eq!(rs.query(&(1 .. 3), |v| *v), Some(Ok(1)));
assert_eq!(rs.query(&(1 .. 6), |v| *v), Some(Err(())));
let rs = RangedStates::from_slice(&[(1..4, 1u8), (5..7, 2)]);
assert_eq!(rs.query(&(0..1), |v| *v), None);
assert_eq!(rs.query(&(1..3), |v| *v), Some(Ok(1)));
assert_eq!(rs.query(&(1..6), |v| *v), Some(Err(())));
}
#[test]
fn isolate() {
let rs = RangedStates::from_slice(
&[(1 .. 4, 9u8), (4 .. 5, 9), (5 .. 7, 1), (8 .. 9, 1)],
);
assert_eq!(&rs.sanely_isolated(4 .. 5, 0), &[(4 .. 5, 9u8),]);
let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
assert_eq!(&rs.sanely_isolated(4..5, 0), &[(4..5, 9u8),]);
assert_eq!(
&rs.sanely_isolated(0 .. 6, 0),
&[(0 .. 1, 0), (1 .. 4, 9u8), (4 .. 5, 9), (5 .. 6, 1),]
&rs.sanely_isolated(0..6, 0),
&[(0..1, 0), (1..4, 9u8), (4..5, 9), (5..6, 1),]
);
assert_eq!(&rs.sanely_isolated(8..10, 1), &[(8..9, 1), (9..10, 1),]);
assert_eq!(
&rs.sanely_isolated(8 .. 10, 1),
&[(8 .. 9, 1), (9 .. 10, 1),]
);
assert_eq!(
&rs.sanely_isolated(6 .. 9, 0),
&[(6 .. 7, 1), (7 .. 8, 0), (8 .. 9, 1),]
&rs.sanely_isolated(6..9, 0),
&[(6..7, 1), (7..8, 0), (8..9, 1),]
);
}
#[test]
fn merge_same() {
assert_eq!(
&easy_merge(&[(1 .. 4, 0u8),], &[(1 .. 4, 2u8),],),
&[(1 .. 4, Some(0) .. Some(2)),]
&easy_merge(&[(1..4, 0u8),], &[(1..4, 2u8),],),
&[(1..4, Some(0)..Some(2)),]
);
}
#[test]
fn merge_empty() {
assert_eq!(
&easy_merge(&[(1 .. 2, 0u8),], &[],),
&[(1 .. 2, Some(0) .. None),]
&easy_merge(&[(1..2, 0u8),], &[],),
&[(1..2, Some(0)..None),]
);
assert_eq!(
&easy_merge(&[], &[(3 .. 4, 1u8),],),
&[(3 .. 4, None .. Some(1)),]
&easy_merge(&[], &[(3..4, 1u8),],),
&[(3..4, None..Some(1)),]
);
}
#[test]
fn merge_separate() {
assert_eq!(
&easy_merge(
&[(1 .. 2, 0u8), (5 .. 6, 1u8),],
&[(2 .. 4, 2u8),],
),
&easy_merge(&[(1..2, 0u8), (5..6, 1u8),], &[(2..4, 2u8),],),
&[
(1 .. 2, Some(0) .. None),
(2 .. 4, None .. Some(2)),
(5 .. 6, Some(1) .. None),
(1..2, Some(0)..None),
(2..4, None..Some(2)),
(5..6, Some(1)..None),
]
);
}
@ -397,37 +368,31 @@ mod test {
#[test]
fn merge_subset() {
assert_eq!(
&easy_merge(
&[(1 .. 6, 0u8),],
&[(2 .. 4, 2u8),],
),
&easy_merge(&[(1..6, 0u8),], &[(2..4, 2u8),],),
&[
(1 .. 2, Some(0) .. None),
(2 .. 4, Some(0) .. Some(2)),
(4 .. 6, Some(0) .. None),
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..6, Some(0)..None),
]
);
assert_eq!(
&easy_merge(&[(2 .. 4, 0u8),], &[(1 .. 4, 2u8),],),
&[(1 .. 2, None .. Some(2)), (2 .. 4, Some(0) .. Some(2)),]
&easy_merge(&[(2..4, 0u8),], &[(1..4, 2u8),],),
&[(1..2, None..Some(2)), (2..4, Some(0)..Some(2)),]
);
}
#[test]
fn merge_all() {
assert_eq!(
&easy_merge(
&[(1 .. 4, 0u8), (5 .. 8, 1u8),],
&[(2 .. 6, 2u8), (7 .. 9, 3u8),],
),
&easy_merge(&[(1..4, 0u8), (5..8, 1u8),], &[(2..6, 2u8), (7..9, 3u8),],),
&[
(1 .. 2, Some(0) .. None),
(2 .. 4, Some(0) .. Some(2)),
(4 .. 5, None .. Some(2)),
(5 .. 6, Some(1) .. Some(2)),
(6 .. 7, Some(1) .. None),
(7 .. 8, Some(1) .. Some(3)),
(8 .. 9, None .. Some(3)),
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..5, None..Some(2)),
(5..6, Some(1)..Some(2)),
(6..7, Some(1)..None),
(7..8, Some(1)..Some(3)),
(8..9, None..Some(3)),
]
);
}

View File

@ -5,12 +5,11 @@
use super::{range::RangedStates, PendingTransition, ResourceState, Unit};
use crate::{device::MAX_MIP_LEVELS, id::TextureId};
use wgt::TextureUsage;
use arrayvec::ArrayVec;
use wgt::TextureUsage;
use std::{iter, ops::Range};
//TODO: store `hal::image::State` here to avoid extra conversions
type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUsage>>;
@ -40,13 +39,10 @@ impl TextureState {
debug_assert_eq!(range.levels.start, 0);
TextureState {
mips: iter::repeat_with(|| {
PlaneStates::from_range(
0 .. range.layers.end,
Unit::new(TextureUsage::UNINITIALIZED),
)
})
.take(range.levels.end as usize)
.collect(),
PlaneStates::from_range(0..range.layers.end, Unit::new(TextureUsage::UNINITIALIZED))
})
.take(range.levels.end as usize)
.collect(),
full: true,
}
}
@ -69,7 +65,7 @@ impl ResourceState for TextureState {
}
let mip_start = num_levels.min(selector.levels.start as usize);
let mip_end = num_levels.min(selector.levels.end as usize);
for mip in self.mips[mip_start .. mip_end].iter() {
for mip in self.mips[mip_start..mip_end].iter() {
match mip.query(&selector.layers, |unit| unit.last) {
None => {}
Some(Ok(usage)) if result == Some(usage) => {}
@ -96,8 +92,7 @@ impl ResourceState for TextureState {
self.mips.push(PlaneStates::empty());
}
}
for (mip_id, mip) in self.mips
[selector.levels.start as usize .. selector.levels.end as usize]
for (mip_id, mip) in self.mips[selector.levels.start as usize..selector.levels.end as usize]
.iter_mut()
.enumerate()
{
@ -115,10 +110,10 @@ impl ResourceState for TextureState {
id,
selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::empty(),
levels: level .. level + 1,
levels: level..level + 1,
layers: range.clone(),
},
usage: unit.last .. usage,
usage: unit.last..usage,
};
unit.last = match output {
@ -175,9 +170,7 @@ impl ResourceState for TextureState {
end: Some(end),
} => {
let to_usage = end.port();
if start.last == to_usage
&& TextureUsage::ORDERED.contains(to_usage)
{
if start.last == to_usage && TextureUsage::ORDERED.contains(to_usage) {
Unit {
first: start.first,
last: end.last,
@ -191,19 +184,17 @@ impl ResourceState for TextureState {
id,
selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::empty(),
levels: level .. level + 1,
levels: level..level + 1,
layers: layers.clone(),
},
usage: start.last .. to_usage,
usage: start.last..to_usage,
};
match output {
None => {
Unit {
first: start.first,
last: pending.collapse()?,
}
}
None => Unit {
first: start.first,
last: pending.collapse()?,
},
Some(ref mut out) => {
out.push(pending);
Unit {
@ -229,7 +220,6 @@ impl ResourceState for TextureState {
}
}
#[cfg(test)]
mod test {
//TODO: change() and merge() tests
@ -242,16 +232,16 @@ mod test {
let mut ts = TextureState::default();
ts.mips.push(PlaneStates::empty());
ts.mips.push(PlaneStates::from_slice(&[
(1 .. 3, Unit::new(TextureUsage::SAMPLED)),
(3 .. 5, Unit::new(TextureUsage::SAMPLED)),
(5 .. 6, Unit::new(TextureUsage::STORAGE)),
(1..3, Unit::new(TextureUsage::SAMPLED)),
(3..5, Unit::new(TextureUsage::SAMPLED)),
(5..6, Unit::new(TextureUsage::STORAGE)),
]));
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1 .. 2,
layers: 2 .. 5,
levels: 1..2,
layers: 2..5,
}),
// level 1 matches
Some(TextureUsage::SAMPLED),
@ -259,8 +249,8 @@ mod test {
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 0 .. 2,
layers: 2 .. 5,
levels: 0..2,
layers: 2..5,
}),
// level 0 is empty, level 1 matches
Some(TextureUsage::SAMPLED),
@ -268,8 +258,8 @@ mod test {
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1 .. 2,
layers: 1 .. 5,
levels: 1..2,
layers: 1..5,
}),
// level 1 matches with gaps
Some(TextureUsage::SAMPLED),
@ -277,8 +267,8 @@ mod test {
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1 .. 2,
layers: 4 .. 6,
levels: 1..2,
layers: 4..6,
}),
// level 1 doesn't match
None,

View File

@ -4,14 +4,10 @@
use crate::GLOBAL;
pub use core::command::{
compute_ffi::*,
render_ffi::*,
};
pub use core::command::{compute_ffi::*, render_ffi::*};
use core::{gfx_select, id};
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_finish(
encoder_id: id::CommandEncoderId,
@ -80,7 +76,6 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
copy_size))
}
/// # Safety
///
/// This function is unsafe because improper use may lead to memory

View File

@ -4,8 +4,8 @@
use crate::GLOBAL;
use wgt::{BackendBit, DeviceDescriptor, Limits};
use core::{gfx_select, hub::Token, id};
use wgt::{BackendBit, DeviceDescriptor, Limits};
use std::{marker::PhantomData, slice};
@ -30,13 +30,12 @@ pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> id
},
#[cfg(target_os = "macos")]
Rwh::MacOS(h) => {
let ns_view =
if h.ns_view.is_null() {
let ns_window = h.ns_window as *mut Object;
unsafe { msg_send![ns_window, contentView] }
} else {
h.ns_view
};
let ns_view = if h.ns_view.is_null() {
let ns_window = h.ns_window as *mut Object;
unsafe { msg_send![ns_window, contentView] }
} else {
h.ns_view
};
core::instance::Surface {
#[cfg(feature = "vulkan-portability")]
vulkan: instance
@ -47,7 +46,7 @@ pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> id
.metal
.create_surface_from_nsview(ns_view, cfg!(debug_assertions)),
}
},
}
#[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))]
Rwh::Xlib(h) => core::instance::Surface {
vulkan: instance
@ -105,7 +104,8 @@ pub extern "C" fn wgpu_create_surface_from_wayland(
) -> id::SurfaceId {
use raw_window_handle::unix::WaylandHandle;
wgpu_create_surface(raw_window_handle::RawWindowHandle::Wayland(WaylandHandle {
surface, display,
surface,
display,
..WaylandHandle::empty()
}))
}
@ -184,10 +184,7 @@ pub extern "C" fn wgpu_adapter_destroy(adapter_id: id::AdapterId) {
}
#[no_mangle]
pub extern "C" fn wgpu_device_get_limits(
_device_id: id::DeviceId,
limits: &mut Limits,
) {
pub extern "C" fn wgpu_device_get_limits(_device_id: id::DeviceId, limits: &mut Limits) {
*limits = Limits::default(); // TODO
}
@ -209,7 +206,8 @@ pub unsafe extern "C" fn wgpu_device_create_buffer_mapped(
desc: &wgt::BufferDescriptor,
mapped_ptr_out: *mut *mut u8,
) -> id::BufferId {
let (id, ptr) = gfx_select!(device_id => GLOBAL.device_create_buffer_mapped(device_id, desc, PhantomData));
let (id, ptr) =
gfx_select!(device_id => GLOBAL.device_create_buffer_mapped(device_id, desc, PhantomData));
*mapped_ptr_out = ptr;
id
}
@ -329,8 +327,7 @@ pub unsafe extern "C" fn wgpu_queue_submit(
command_buffers: *const id::CommandBufferId,
command_buffers_length: usize,
) {
let command_buffer_ids =
slice::from_raw_parts(command_buffers, command_buffers_length);
let command_buffer_ids = slice::from_raw_parts(command_buffers, command_buffers_length);
gfx_select!(queue_id => GLOBAL.queue_submit(queue_id, command_buffer_ids))
}
@ -377,10 +374,7 @@ pub extern "C" fn wgpu_buffer_map_read_async(
callback: core::device::BufferMapReadCallback,
userdata: *mut u8,
) {
let operation = core::resource::BufferMapOperation::Read {
callback,
userdata,
};
let operation = core::resource::BufferMapOperation::Read { callback, userdata };
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, wgt::BufferUsage::MAP_READ, start .. start + size, operation))
}
@ -393,10 +387,7 @@ pub extern "C" fn wgpu_buffer_map_write_async(
callback: core::device::BufferMapWriteCallback,
userdata: *mut u8,
) {
let operation = core::resource::BufferMapOperation::Write {
callback,
userdata,
};
let operation = core::resource::BufferMapOperation::Write { callback, userdata };
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, wgt::BufferUsage::MAP_WRITE, start .. start + size, operation))
}
@ -411,9 +402,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(
swap_chain_id: id::SwapChainId,
) -> core::swap_chain::SwapChainOutput {
gfx_select!(swap_chain_id => GLOBAL.swap_chain_get_next_texture(swap_chain_id, PhantomData))
.unwrap_or(core::swap_chain::SwapChainOutput {
view_id: None,
})
.unwrap_or(core::swap_chain::SwapChainOutput { view_id: None })
}
#[no_mangle]

View File

@ -4,7 +4,6 @@
use core::id;
pub type FactoryParam = *mut std::ffi::c_void;
#[derive(Debug)]
@ -14,7 +13,9 @@ pub struct IdentityRecycler<I> {
kind: &'static str,
}
impl<I: id::TypedId + Clone + std::fmt::Debug> core::hub::IdentityHandler<I> for IdentityRecycler<I> {
impl<I: id::TypedId + Clone + std::fmt::Debug> core::hub::IdentityHandler<I>
for IdentityRecycler<I>
{
type Input = I;
fn process(&self, id: I, _backend: wgt::Backend) -> I {
log::debug!("process {} {:?}", self.kind, id);

View File

@ -2,17 +2,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use core::{
hub::IdentityManager,
id,
};
use core::{hub::IdentityManager, id};
use wgt::Backend;
pub use core::command::{
compute_ffi::*,
render_ffi::*,
};
pub use core::command::{compute_ffi::*, render_ffi::*};
use parking_lot::Mutex;
@ -21,7 +15,6 @@ use std::{ptr, slice};
pub mod identity;
pub mod server;
#[derive(Debug, Default)]
struct IdentityHub {
adapters: IdentityManager,
@ -136,7 +129,10 @@ pub extern "C" fn wgpu_client_kill_adapter_id(client: &Client, id: id::AdapterId
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_device_id(client: &Client, adapter_id: id::AdapterId) -> id::DeviceId {
pub extern "C" fn wgpu_client_make_device_id(
client: &Client,
adapter_id: id::AdapterId,
) -> id::DeviceId {
let backend = adapter_id.backend();
client
.identities
@ -157,7 +153,10 @@ pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: id::DeviceId)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_buffer_id(client: &Client, device_id: id::DeviceId) -> id::BufferId {
pub extern "C" fn wgpu_client_make_buffer_id(
client: &Client,
device_id: id::DeviceId,
) -> id::BufferId {
let backend = device_id.backend();
client
.identities
@ -178,7 +177,10 @@ pub extern "C" fn wgpu_client_kill_buffer_id(client: &Client, id: id::BufferId)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_texture_id(client: &Client, device_id: id::DeviceId) -> id::TextureId {
pub extern "C" fn wgpu_client_make_texture_id(
client: &Client,
device_id: id::DeviceId,
) -> id::TextureId {
let backend = device_id.backend();
client
.identities
@ -189,10 +191,7 @@ pub extern "C" fn wgpu_client_make_texture_id(client: &Client, device_id: id::De
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_id(
client: &Client,
id: id::TextureId,
) {
pub extern "C" fn wgpu_client_kill_texture_id(client: &Client, id: id::TextureId) {
client
.identities
.lock()
@ -201,9 +200,11 @@ pub extern "C" fn wgpu_client_kill_texture_id(
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_texture_view_id(client: &Client, device_id: id::DeviceId) -> id::TextureViewId {
pub extern "C" fn wgpu_client_make_texture_view_id(
client: &Client,
device_id: id::DeviceId,
) -> id::TextureViewId {
let backend = device_id.backend();
client
.identities
@ -214,10 +215,7 @@ pub extern "C" fn wgpu_client_make_texture_view_id(client: &Client, device_id: i
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_view_id(
client: &Client,
id: id::TextureViewId,
) {
pub extern "C" fn wgpu_client_kill_texture_view_id(client: &Client, id: id::TextureViewId) {
client
.identities
.lock()
@ -227,7 +225,10 @@ pub extern "C" fn wgpu_client_kill_texture_view_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_sampler_id(client: &Client, device_id: id::DeviceId) -> id::SamplerId {
pub extern "C" fn wgpu_client_make_sampler_id(
client: &Client,
device_id: id::DeviceId,
) -> id::SamplerId {
let backend = device_id.backend();
client
.identities
@ -238,10 +239,7 @@ pub extern "C" fn wgpu_client_make_sampler_id(client: &Client, device_id: id::De
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_sampler_id(
client: &Client,
id: id::SamplerId,
) {
pub extern "C" fn wgpu_client_kill_sampler_id(client: &Client, id: id::SamplerId) {
client
.identities
.lock()
@ -265,10 +263,7 @@ pub extern "C" fn wgpu_client_make_encoder_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_encoder_id(
client: &Client,
id: id::CommandEncoderId,
) {
pub extern "C" fn wgpu_client_kill_encoder_id(client: &Client, id: id::CommandEncoderId) {
client
.identities
.lock()
@ -345,10 +340,7 @@ pub extern "C" fn wgpu_client_make_pipeline_layout_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_pipeline_layout_id(
client: &Client,
id: id::PipelineLayoutId,
) {
pub extern "C" fn wgpu_client_kill_pipeline_layout_id(client: &Client, id: id::PipelineLayoutId) {
client
.identities
.lock()
@ -372,10 +364,7 @@ pub extern "C" fn wgpu_client_make_bind_group_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_bind_group_id(
client: &Client,
id: id::BindGroupId,
) {
pub extern "C" fn wgpu_client_kill_bind_group_id(client: &Client, id: id::BindGroupId) {
client
.identities
.lock()
@ -399,10 +388,7 @@ pub extern "C" fn wgpu_client_make_shader_module_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_shader_module_id(
client: &Client,
id: id::ShaderModuleId,
) {
pub extern "C" fn wgpu_client_kill_shader_module_id(client: &Client, id: id::ShaderModuleId) {
client
.identities
.lock()
@ -426,10 +412,7 @@ pub extern "C" fn wgpu_client_make_compute_pipeline_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_compute_pipeline_id(
client: &Client,
id: id::ComputePipelineId,
) {
pub extern "C" fn wgpu_client_kill_compute_pipeline_id(client: &Client, id: id::ComputePipelineId) {
client
.identities
.lock()
@ -453,10 +436,7 @@ pub extern "C" fn wgpu_client_make_render_pipeline_id(
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_render_pipeline_id(
client: &Client,
id: id::RenderPipelineId,
) {
pub extern "C" fn wgpu_client_kill_render_pipeline_id(client: &Client, id: id::RenderPipelineId) {
client
.identities
.lock()

View File

@ -8,7 +8,6 @@ use core::{gfx_select, id};
use std::slice;
pub type Global = core::hub::Global<IdentityRecyclerFactory>;
#[no_mangle]
@ -71,10 +70,7 @@ pub extern "C" fn wgpu_server_adapter_request_device(
}
#[no_mangle]
pub extern "C" fn wgpu_server_adapter_destroy(
global: &Global,
adapter_id: id::AdapterId,
) {
pub extern "C" fn wgpu_server_adapter_destroy(global: &Global, adapter_id: id::AdapterId) {
gfx_select!(adapter_id => global.adapter_destroy(adapter_id))
}
@ -123,11 +119,8 @@ pub extern "C" fn wgpu_server_buffer_map_read(
callback: core::device::BufferMapReadCallback,
userdata: *mut u8,
) {
let operation = core::resource::BufferMapOperation::Read {
callback,
userdata,
};
let operation = core::resource::BufferMapOperation::Read { callback, userdata };
gfx_select!(buffer_id => global.buffer_map_async(
buffer_id,
wgt::BufferUsage::MAP_READ,
@ -161,10 +154,7 @@ pub extern "C" fn wgpu_server_encoder_finish(
}
#[no_mangle]
pub extern "C" fn wgpu_server_encoder_destroy(
global: &Global,
self_id: id::CommandEncoderId,
) {
pub extern "C" fn wgpu_server_encoder_destroy(global: &Global, self_id: id::CommandEncoderId) {
gfx_select!(self_id => global.command_encoder_destroy(self_id));
}
@ -320,10 +310,7 @@ pub extern "C" fn wgpu_server_device_create_bind_group(
}
#[no_mangle]
pub extern "C" fn wgpu_server_bind_group_destroy(
global: &Global,
self_id: id::BindGroupId,
) {
pub extern "C" fn wgpu_server_bind_group_destroy(global: &Global, self_id: id::BindGroupId) {
gfx_select!(self_id => global.bind_group_destroy(self_id));
}
@ -338,10 +325,7 @@ pub extern "C" fn wgpu_server_device_create_shader_module(
}
#[no_mangle]
pub extern "C" fn wgpu_server_shader_module_destroy(
global: &Global,
self_id: id::ShaderModuleId,
) {
pub extern "C" fn wgpu_server_shader_module_destroy(global: &Global, self_id: id::ShaderModuleId) {
gfx_select!(self_id => global.shader_module_destroy(self_id));
}
@ -402,18 +386,12 @@ pub extern "C" fn wgpu_server_texture_create_view(
}
#[no_mangle]
pub extern "C" fn wgpu_server_texture_destroy(
global: &Global,
self_id: id::TextureId,
) {
pub extern "C" fn wgpu_server_texture_destroy(global: &Global, self_id: id::TextureId) {
gfx_select!(self_id => global.texture_destroy(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_texture_view_destroy(
global: &Global,
self_id: id::TextureViewId,
) {
pub extern "C" fn wgpu_server_texture_view_destroy(global: &Global, self_id: id::TextureViewId) {
gfx_select!(self_id => global.texture_view_destroy(self_id));
}
@ -428,9 +406,6 @@ pub extern "C" fn wgpu_server_device_create_sampler(
}
#[no_mangle]
pub extern "C" fn wgpu_server_sampler_destroy(
global: &Global,
self_id: id::SamplerId,
) {
pub extern "C" fn wgpu_server_sampler_destroy(global: &Global, self_id: id::SamplerId) {
gfx_select!(self_id => global.sampler_destroy(self_id));
}

View File

@ -2,11 +2,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{io, slice, ptr};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
#[cfg(feature = "peek-poke")]
use peek_poke::{PeekPoke};
use peek_poke::PeekPoke;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::{io, ptr, slice};
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
@ -557,9 +557,7 @@ pub struct CommandEncoderDescriptor {
impl Default for CommandEncoderDescriptor {
fn default() -> CommandEncoderDescriptor {
CommandEncoderDescriptor {
label: ptr::null(),
}
CommandEncoderDescriptor { label: ptr::null() }
}
}
@ -731,11 +729,7 @@ pub struct Origin3d {
}
impl Origin3d {
pub const ZERO: Self = Origin3d {
x: 0,
y: 0,
z: 0,
};
pub const ZERO: Self = Origin3d { x: 0, y: 0, z: 0 };
}
impl Default for Origin3d {