Surface API

This commit is contained in:
Dzmitry Malyshau 2021-06-06 00:34:02 -04:00
parent bae3c694bd
commit 738ae2b227
10 changed files with 288 additions and 912 deletions

1
Cargo.lock generated
View File

@ -1892,6 +1892,7 @@ dependencies = [
"bitflags",
"naga",
"smallvec",
"thiserror",
"wgpu-types",
]

View File

@ -1,287 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{CommandBuffer, CommandEncoderStatus};
use crate::{
device::DeviceError, hub::HalApi, id::DeviceId, track::TrackerSet, FastHashMap,
PrivateFeatures, Stored, SubmissionIndex,
};
#[cfg(debug_assertions)]
use crate::LabelHelpers;
use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
use parking_lot::Mutex;
use thiserror::Error;
use std::thread;
const GROW_AMOUNT: usize = 20;
#[derive(Debug)]
struct CommandPool<A: hal::Api> {
raw: B::CommandPool,
total: usize,
available: Vec<B::CommandBuffer>,
pending: Vec<(B::CommandBuffer, SubmissionIndex)>,
}
impl<A: hal::Api> CommandPool<A> {
fn maintain(&mut self, last_done_index: SubmissionIndex) {
for i in (0..self.pending.len()).rev() {
if self.pending[i].1 <= last_done_index {
let (cmd_buf, index) = self.pending.swap_remove(i);
log::trace!(
"recycling cmdbuf submitted in {} when {} is last done",
index,
last_done_index,
);
self.recycle(cmd_buf);
}
}
}
fn recycle(&mut self, mut raw: B::CommandBuffer) {
unsafe {
raw.reset(false);
}
self.available.push(raw);
}
fn allocate(&mut self) -> B::CommandBuffer {
if self.available.is_empty() {
self.total += GROW_AMOUNT;
unsafe {
self.raw.allocate(
GROW_AMOUNT,
hal::command::Level::Primary,
&mut self.available,
)
};
}
self.available.pop().unwrap()
}
fn destroy(mut self, device: &B::Device) {
unsafe {
self.raw.free(self.available.into_iter());
device.destroy_command_pool(self.raw);
}
}
}
#[derive(Debug)]
struct Inner<A: hal::Api> {
pools: FastHashMap<thread::ThreadId, CommandPool<A>>,
}
#[derive(Debug)]
pub struct CommandAllocator<A: hal::Api> {
queue_family: hal::queue::QueueFamilyId,
internal_thread_id: thread::ThreadId,
inner: Mutex<Inner<A>>,
}
impl<A: HalApi> CommandAllocator<A> {
#[allow(clippy::too_many_arguments)]
pub(crate) fn allocate(
&self,
device_id: Stored<DeviceId>,
device: &B::Device,
limits: wgt::Limits,
downlevel: wgt::DownlevelCapabilities,
features: wgt::Features,
private_features: PrivateFeatures,
label: &crate::Label,
#[cfg(feature = "trace")] enable_tracing: bool,
) -> Result<CommandBuffer<A>, CommandAllocatorError> {
//debug_assert_eq!(device_id.backend(), A::VARIANT);
let thread_id = thread::current().id();
let mut inner = self.inner.lock();
use std::collections::hash_map::Entry;
let pool = match inner.pools.entry(thread_id) {
Entry::Vacant(e) => {
log::info!("Starting on thread {:?}", thread_id);
let raw = unsafe {
device
.create_command_pool(
self.queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(DeviceError::OutOfMemory))?
};
e.insert(CommandPool {
raw,
total: 0,
available: Vec::new(),
pending: Vec::new(),
})
}
Entry::Occupied(e) => e.into_mut(),
};
//Note: we have to allocate the first buffer right here, or otherwise
// the pool may be cleaned up by maintenance called from another thread.
Ok(CommandBuffer {
raw: vec![pool.allocate()],
status: CommandEncoderStatus::Recording,
recorded_thread_id: thread_id,
device_id,
trackers: TrackerSet::new(A::VARIANT),
used_swap_chains: Default::default(),
buffer_memory_init_actions: Default::default(),
limits,
downlevel,
private_features,
support_fill_buffer_texture: features.contains(wgt::Features::CLEAR_COMMANDS),
has_labels: label.is_some(),
#[cfg(feature = "trace")]
commands: if enable_tracing {
Some(Vec::new())
} else {
None
},
#[cfg(debug_assertions)]
label: label.to_string_or_default(),
})
}
}
impl<A: hal::Api> CommandAllocator<A> {
pub fn new(
queue_family: hal::queue::QueueFamilyId,
device: &B::Device,
) -> Result<Self, CommandAllocatorError> {
let internal_thread_id = thread::current().id();
log::info!("Starting on (internal) thread {:?}", internal_thread_id);
let mut pools = FastHashMap::default();
pools.insert(
internal_thread_id,
CommandPool {
raw: unsafe {
device
.create_command_pool(
queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(DeviceError::OutOfMemory))?
},
total: 0,
available: Vec::new(),
pending: Vec::new(),
},
);
Ok(Self {
queue_family,
internal_thread_id,
inner: Mutex::new(Inner { pools }),
})
}
fn allocate_for_thread_id(&self, thread_id: thread::ThreadId) -> B::CommandBuffer {
let mut inner = self.inner.lock();
inner.pools.get_mut(&thread_id).unwrap().allocate()
}
pub fn allocate_internal(&self) -> B::CommandBuffer {
self.allocate_for_thread_id(self.internal_thread_id)
}
pub fn extend(&self, cmd_buf: &CommandBuffer<A>) -> B::CommandBuffer {
self.allocate_for_thread_id(cmd_buf.recorded_thread_id)
}
pub fn discard_internal(&self, raw: B::CommandBuffer) {
let mut inner = self.inner.lock();
inner
.pools
.get_mut(&self.internal_thread_id)
.unwrap()
.recycle(raw);
}
pub fn discard(&self, mut cmd_buf: CommandBuffer<A>) {
cmd_buf.trackers.clear();
let mut inner = self.inner.lock();
let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
for raw in cmd_buf.raw {
pool.recycle(raw);
}
}
pub fn after_submit_internal(&self, raw: B::CommandBuffer, submit_index: SubmissionIndex) {
let mut inner = self.inner.lock();
inner
.pools
.get_mut(&self.internal_thread_id)
.unwrap()
.pending
.push((raw, submit_index));
}
pub fn after_submit(
&self,
cmd_buf: CommandBuffer<A>,
device: &B::Device,
submit_index: SubmissionIndex,
) {
// Record this command buffer as pending
let mut inner = self.inner.lock();
let clear_label = cmd_buf.has_labels;
inner
.pools
.get_mut(&cmd_buf.recorded_thread_id)
.unwrap()
.pending
.extend(cmd_buf.raw.into_iter().map(|mut raw| {
if clear_label {
unsafe { device.set_command_buffer_name(&mut raw, "") };
}
(raw, submit_index)
}));
}
pub fn maintain(&self, device: &B::Device, last_done_index: SubmissionIndex) {
profiling::scope!("maintain", "CommandAllocator");
let mut inner = self.inner.lock();
let mut remove_threads = Vec::new();
for (&thread_id, pool) in inner.pools.iter_mut() {
pool.maintain(last_done_index);
if pool.total == pool.available.len() && thread_id != self.internal_thread_id {
assert!(pool.pending.is_empty());
remove_threads.push(thread_id);
}
}
for thread_id in remove_threads {
log::info!("Removing from thread {:?}", thread_id);
let pool = inner.pools.remove(&thread_id).unwrap();
pool.destroy(device);
}
}
pub fn destroy(self, device: &B::Device) {
let mut inner = self.inner.lock();
for (_, mut pool) in inner.pools.drain() {
while let Some((raw, _)) = pool.pending.pop() {
pool.recycle(raw);
}
if pool.total != pool.available.len() {
log::error!(
"Some command buffers are still recorded, only tracking {} / {}",
pool.available.len(),
pool.total
);
}
pool.destroy(device);
}
}
}
#[derive(Clone, Debug, Error)]
pub enum CommandAllocatorError {
#[error(transparent)]
Device(#[from] DeviceError),
}

View File

@ -2,7 +2,6 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
mod allocator;
mod bind;
mod bundle;
mod clear;
@ -12,8 +11,6 @@ mod query;
mod render;
mod transfer;
pub(crate) use self::allocator::CommandAllocator;
pub use self::allocator::CommandAllocatorError;
pub use self::bundle::*;
pub use self::compute::*;
pub use self::draw::*;
@ -22,20 +19,19 @@ pub use self::render::*;
pub use self::transfer::*;
use crate::{
device::{all_buffer_stages, all_image_stages},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id,
memory_init_tracker::MemoryInitTrackerAction,
resource::{Buffer, Texture},
track::{BufferState, ResourceTracker, TextureState, TrackerSet},
Label, PrivateFeatures, Stored,
Label, Stored,
};
use hal::command::CommandBuffer as _;
use hal::CommandBuffer as _;
use smallvec::SmallVec;
use thiserror::Error;
use std::thread::ThreadId;
use std::thread;
const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64];
@ -48,18 +44,16 @@ enum CommandEncoderStatus {
#[derive(Debug)]
pub struct CommandBuffer<A: hal::Api> {
pub(crate) raw: Vec<B::CommandBuffer>,
pub(crate) raw: Vec<A::CommandBuffer>,
status: CommandEncoderStatus,
recorded_thread_id: ThreadId,
recorded_thread_id: thread::ThreadId,
pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) trackers: TrackerSet,
pub(crate) used_swap_chains: SmallVec<[Stored<id::SwapChainId>; 1]>,
pub(crate) buffer_memory_init_actions: Vec<MemoryInitTrackerAction<id::BufferId>>,
limits: wgt::Limits,
downlevel: wgt::DownlevelCapabilities,
private_features: PrivateFeatures,
support_fill_buffer_texture: bool,
has_labels: bool,
#[cfg(feature = "trace")]
pub(crate) commands: Option<Vec<crate::device::trace::Command>>,
#[cfg(debug_assertions)]
@ -67,6 +61,37 @@ pub struct CommandBuffer<A: hal::Api> {
}
impl<A: HalApi> CommandBuffer<A> {
pub(crate) fn new(
raw: A::CommandBuffer,
device_id: Stored<id::DeviceId>,
limits: wgt::Limits,
downlevel: wgt::DownlevelCapabilities,
features: wgt::Features,
#[cfg(feature = "trace")] enable_tracing: bool,
#[cfg(debug_assertions)] label: &Label,
) -> Self {
CommandBuffer {
raw: vec![raw],
status: CommandEncoderStatus::Recording,
recorded_thread_id: thread::current.id(),
device_id,
trackers: TrackerSet::new(A::VARIANT),
used_swap_chains: Default::default(),
buffer_memory_init_actions: Default::default(),
limits,
downlevel,
support_fill_buffer_texture: features.contains(wgt::Features::CLEAR_COMMANDS),
#[cfg(feature = "trace")]
commands: if enable_tracing {
Some(Vec::new())
} else {
None
},
#[cfg(debug_assertions)]
label: label.to_string_or_default(),
}
}
fn get_encoder_mut(
storage: &mut Storage<Self, id::CommandEncoderId>,
id: id::CommandEncoderId,

View File

@ -1,293 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::DeviceError;
use hal::device::Device as _;
use std::{borrow::Cow, iter, ptr::NonNull};
#[derive(Debug)]
pub struct MemoryAllocator<A: hal::Api>(gpu_alloc::GpuAllocator<B::Memory>);
#[derive(Debug)]
pub struct MemoryBlock<A: hal::Api>(gpu_alloc::MemoryBlock<B::Memory>);
struct MemoryDevice<'a, A: hal::Api>(&'a B::Device);
impl<A: hal::Api> MemoryAllocator<A> {
pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
let mem_config = gpu_alloc::Config {
dedicated_threshold: 32 << 20,
preferred_dedicated_threshold: 8 << 20,
transient_dedicated_threshold: 128 << 20,
linear_chunk: 128 << 20,
minimal_buddy_size: 1 << 10,
initial_buddy_dedicated_size: 8 << 20,
};
let properties = gpu_alloc::DeviceProperties {
memory_types: Cow::Owned(
mem_props
.memory_types
.iter()
.map(|mt| gpu_alloc::MemoryType {
heap: mt.heap_index as u32,
props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate(
mt.properties.bits() as u8,
),
})
.collect::<Vec<_>>(),
),
memory_heaps: Cow::Owned(
mem_props
.memory_heaps
.iter()
.map(|mh| gpu_alloc::MemoryHeap { size: mh.size })
.collect::<Vec<_>>(),
),
max_memory_allocation_count: if limits.max_memory_allocation_count == 0 {
log::warn!("max_memory_allocation_count is not set by gfx-rs backend");
!0
} else {
limits.max_memory_allocation_count.min(!0u32 as usize) as u32
},
max_memory_allocation_size: !0,
non_coherent_atom_size: limits.non_coherent_atom_size as u64,
buffer_device_address: false,
};
MemoryAllocator(gpu_alloc::GpuAllocator::new(mem_config, properties))
}
pub fn allocate(
&mut self,
device: &B::Device,
requirements: hal::memory::Requirements,
usage: gpu_alloc::UsageFlags,
) -> Result<MemoryBlock<A>, DeviceError> {
assert!(requirements.alignment.is_power_of_two());
let request = gpu_alloc::Request {
size: requirements.size,
align_mask: requirements.alignment - 1,
memory_types: requirements.type_mask,
usage,
};
unsafe { self.0.alloc(&MemoryDevice::<A>(device), request) }
.map(MemoryBlock)
.map_err(|err| match err {
gpu_alloc::AllocationError::OutOfHostMemory
| gpu_alloc::AllocationError::OutOfDeviceMemory => DeviceError::OutOfMemory,
_ => panic!("Unable to allocate memory: {:?}", err),
})
}
pub fn free(&mut self, device: &B::Device, block: MemoryBlock<A>) {
unsafe { self.0.dealloc(&MemoryDevice::<A>(device), block.0) }
}
pub fn clear(&mut self, device: &B::Device) {
unsafe { self.0.cleanup(&MemoryDevice::<A>(device)) }
}
}
impl<A: hal::Api> MemoryBlock<A> {
pub fn bind_buffer(
&self,
device: &B::Device,
buffer: &mut B::Buffer,
) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe {
device
.bind_buffer_memory(mem, self.0.offset(), buffer)
.map_err(DeviceError::from_bind)
}
}
pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe {
device
.bind_image_memory(mem, self.0.offset(), image)
.map_err(DeviceError::from_bind)
}
}
pub fn is_coherent(&self) -> bool {
self.0
.props()
.contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT)
}
pub fn map(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: wgt::BufferAddress,
) -> Result<NonNull<u8>, DeviceError> {
let offset = inner_offset;
unsafe {
self.0
.map(&MemoryDevice::<A>(device), offset, size as usize)
.map_err(DeviceError::from)
}
}
pub fn unmap(&mut self, device: &B::Device) {
unsafe { self.0.unmap(&MemoryDevice::<A>(device)) };
}
pub fn write_bytes(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), DeviceError> {
profiling::scope!("write_bytes");
let offset = inner_offset;
unsafe {
self.0
.write_bytes(&MemoryDevice::<A>(device), offset, data)
.map_err(DeviceError::from)
}
}
pub fn read_bytes(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
data: &mut [u8],
) -> Result<(), DeviceError> {
profiling::scope!("read_bytes");
let offset = inner_offset;
unsafe {
self.0
.read_bytes(&MemoryDevice::<A>(device), offset, data)
.map_err(DeviceError::from)
}
}
fn segment(
&self,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> hal::memory::Segment {
hal::memory::Segment {
offset: self.0.offset() + inner_offset,
size: size.or_else(|| Some(self.0.size())),
}
}
pub fn flush_range(
&self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe {
device
.flush_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory))
}
}
pub fn invalidate_range(
&self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe {
device
.invalidate_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory))
}
}
}
impl<A: hal::Api> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B> {
unsafe fn allocate_memory(
&self,
size: u64,
memory_type: u32,
flags: gpu_alloc::AllocationFlags,
) -> Result<B::Memory, gpu_alloc::OutOfMemory> {
profiling::scope!("allocate_memory");
assert!(flags.is_empty());
self.0
.allocate_memory(hal::MemoryTypeId(memory_type as _), size)
.map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
}
unsafe fn deallocate_memory(&self, memory: B::Memory) {
profiling::scope!("deallocate_memory");
self.0.free_memory(memory);
}
unsafe fn map_memory(
&self,
memory: &mut B::Memory,
offset: u64,
size: u64,
) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
profiling::scope!("map_memory");
match self.0.map_memory(
memory,
hal::memory::Segment {
offset,
size: Some(size),
},
) {
Ok(ptr) => Ok(NonNull::new(ptr).expect("Pointer to memory mapping must not be null")),
Err(hal::device::MapError::OutOfMemory(_)) => {
Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory)
}
Err(hal::device::MapError::MappingFailed) => Err(gpu_alloc::DeviceMapError::MapFailed),
Err(other) => panic!("Unexpected map error: {:?}", other),
}
}
unsafe fn unmap_memory(&self, memory: &mut B::Memory) {
profiling::scope!("unmap_memory");
self.0.unmap_memory(memory);
}
unsafe fn invalidate_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("invalidate_memory_ranges");
self.0
.invalidate_mapped_memory_ranges(ranges.iter().map(|r| {
(
r.memory,
hal::memory::Segment {
offset: r.offset,
size: Some(r.size),
},
)
}))
.map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
}
unsafe fn flush_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("flush_memory_ranges");
self.0
.flush_mapped_memory_ranges(ranges.iter().map(|r| {
(
r.memory,
hal::memory::Segment {
offset: r.offset,
size: Some(r.size),
},
)
}))
.map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
}
}

View File

@ -1,175 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::DeviceError;
use arrayvec::ArrayVec;
pub use gpu_descriptor::DescriptorTotalCount;
pub type DescriptorSet<A> = gpu_descriptor::DescriptorSet<<B as hal::Backend>::DescriptorSet>;
#[derive(Debug)]
pub struct DescriptorAllocator<A: hal::Api>(
gpu_descriptor::DescriptorAllocator<B::DescriptorPool, B::DescriptorSet>,
);
struct DescriptorDevice<'a, A: hal::Api>(&'a B::Device);
impl<A: hal::Api> DescriptorAllocator<A> {
pub fn new() -> Self {
DescriptorAllocator(gpu_descriptor::DescriptorAllocator::new(0))
}
pub fn allocate(
&mut self,
device: &B::Device,
layout: &B::DescriptorSetLayout,
layout_descriptor_count: &DescriptorTotalCount,
count: u32,
) -> Result<Vec<DescriptorSet<A>>, DeviceError> {
unsafe {
self.0.allocate(
&DescriptorDevice::<A>(device),
layout,
gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
layout_descriptor_count,
count,
)
}
.map_err(|err| {
log::warn!("Descriptor set allocation failed: {}", err);
DeviceError::OutOfMemory
})
}
pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<A>>) {
unsafe { self.0.free(&DescriptorDevice::<A>(device), sets) }
}
pub fn cleanup(&mut self, device: &B::Device) {
unsafe { self.0.cleanup(&DescriptorDevice::<A>(device)) }
}
}
impl<A: hal::Api>
gpu_descriptor::DescriptorDevice<B::DescriptorSetLayout, B::DescriptorPool, B::DescriptorSet>
for DescriptorDevice<'_, B>
{
unsafe fn create_descriptor_pool(
&self,
descriptor_count: &DescriptorTotalCount,
max_sets: u32,
flags: gpu_descriptor::DescriptorPoolCreateFlags,
) -> Result<B::DescriptorPool, gpu_descriptor::CreatePoolError> {
profiling::scope!("create_descriptor_pool");
let mut ranges = ArrayVec::<[_; 7]>::new();
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Sampler,
count: descriptor_count.sampler as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Image {
ty: hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: descriptor_count.sampled_image as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Image {
ty: hal::pso::ImageDescriptorType::Storage { read_only: false },
},
count: descriptor_count.storage_image as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Uniform,
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: descriptor_count.uniform_buffer as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: descriptor_count.storage_buffer as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Uniform,
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: true,
},
},
count: descriptor_count.uniform_buffer_dynamic as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: true,
},
},
count: descriptor_count.storage_buffer_dynamic as _,
});
ranges.retain(|rd| rd.count != 0);
match hal::device::Device::create_descriptor_pool(
self.0,
max_sets as usize,
ranges.into_iter(),
hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits()),
) {
Ok(pool) => Ok(pool),
Err(hal::device::OutOfMemory::Host) => {
Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
}
Err(hal::device::OutOfMemory::Device) => {
Err(gpu_descriptor::CreatePoolError::OutOfDeviceMemory)
}
}
}
unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool) {
profiling::scope!("destroy_descriptor_pool");
hal::device::Device::destroy_descriptor_pool(self.0, pool);
}
unsafe fn alloc_descriptor_sets<'a>(
&self,
pool: &mut B::DescriptorPool,
layouts: impl ExactSizeIterator<Item = &'a B::DescriptorSetLayout>,
sets: &mut impl Extend<B::DescriptorSet>,
) -> Result<(), gpu_descriptor::DeviceAllocationError> {
use gpu_descriptor::DeviceAllocationError as Dae;
profiling::scope!("alloc_descriptor_sets");
match hal::pso::DescriptorPool::allocate(pool, layouts, sets) {
Ok(()) => Ok(()),
Err(hal::pso::AllocationError::OutOfMemory(oom)) => Err(match oom {
hal::device::OutOfMemory::Host => Dae::OutOfHostMemory,
hal::device::OutOfMemory::Device => Dae::OutOfDeviceMemory,
}),
Err(hal::pso::AllocationError::OutOfPoolMemory) => Err(Dae::OutOfPoolMemory),
Err(hal::pso::AllocationError::FragmentedPool) => Err(Dae::FragmentedPool),
Err(hal::pso::AllocationError::IncompatibleLayout) => {
panic!("Incompatible descriptor set layout")
}
}
}
unsafe fn dealloc_descriptor_sets<'a>(
&self,
pool: &mut B::DescriptorPool,
sets: impl Iterator<Item = B::DescriptorSet>,
) {
profiling::scope!("dealloc_descriptor_sets");
hal::pso::DescriptorPool::free(pool, sets)
}
}

View File

@ -19,21 +19,10 @@ use copyless::VecHelper as _;
use hal::{CommandBuffer as _, Device as _};
use parking_lot::{Mutex, MutexGuard};
use thiserror::Error;
use wgt::{BufferAddress, InputStepMode, TextureDimension, TextureFormat, TextureViewDimension};
use wgt::{BufferAddress, TextureFormat, TextureViewDimension};
use std::{
borrow::Cow,
collections::{hash_map::Entry, BTreeMap},
iter,
marker::PhantomData,
mem,
ops::Range,
ptr,
sync::atomic::Ordering,
};
use std::{borrow::Cow, iter, marker::PhantomData, mem, ops::Range, ptr, sync::atomic::Ordering};
pub mod alloc;
pub mod descriptor;
mod life;
pub mod queue;
#[cfg(any(feature = "trace", feature = "replay"))]
@ -699,7 +688,11 @@ impl<A: HalApi> Device<A> {
ref_count: texture.life_guard.add_ref(),
},
},
desc: hal_desc.map_label(|_| ()),
desc: resource::HalTextureViewDescriptor {
format: hal_desc.format,
dimension: hal_desc.dimension,
range: hal_desc.range,
},
format_features: texture.format_features,
extent,
samples: texture.kind.num_samples(),
@ -3456,7 +3449,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor<Label>,
id_in: Input<G, id::CommandEncoderId>,
) -> (id::CommandEncoderId, Option<command::CommandAllocatorError>) {
) -> (id::CommandEncoderId, Option<DeviceError>) {
profiling::scope!("create_command_encoder", "Device");
let hub = A::hub(self);
@ -3475,28 +3468,23 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
ref_count: device.life_guard.add_ref(),
};
let mut command_buffer = match device.cmd_allocator.allocate(
dev_stored,
&device.raw,
let mut raw = unsafe {
self.raw
.create_command_buffer(&hal::CommandBufferDescriptor { label: desc.label })?
};
unsafe {
raw.begin();
}
let command_buffer = command::CommandBuffer::new(
raw,
device_id,
device.limits.clone(),
device.downlevel,
device.features,
device.private_features,
&desc.label,
#[cfg(feature = "trace")]
device.trace.is_some(),
) {
Ok(cmd_buf) => cmd_buf,
Err(e) => break e,
};
let mut raw = command_buffer.raw.first_mut().unwrap();
unsafe {
if let Some(ref label) = desc.label {
device.raw.set_command_buffer_name(&mut raw, label);
}
raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
);
let id = fid.assign(command_buffer, &mut token);
return (id.0, None);
@ -3981,8 +3969,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
profiling::scope!("create_swap_chain", "Device");
fn validate_swap_chain_descriptor(
config: &mut hal::window::SwapchainConfig,
caps: &hal::window::SurfaceCapabilities,
config: &mut hal::SurfaceConfiguration,
caps: &hal::SurfaceCapabilities,
) -> Result<(), swap_chain::CreateSwapChainError> {
let width = config.extent.width;
let height = config.extent.height;
@ -3998,13 +3986,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
caps.extents
);
}
if !caps.present_modes.contains(config.present_mode) {
if !caps.vsync_modes.contains(&config.vsync_mode) {
log::warn!(
"Surface does not support present mode: {:?}, falling back to {:?}",
config.present_mode,
hal::window::PresentMode::FIFO
hal::VsyncMode::FIFO
);
config.present_mode = hal::window::PresentMode::FIFO;
config.vsync_mode = hal::VsyncMode::Fifo;
}
if width == 0 || height == 0 {
return Err(swap_chain::CreateSwapChainError::ZeroArea);
@ -4166,7 +4154,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
pub fn poll_all_devices(&self, force_wait: bool) -> Result<(), WaitIdleError> {
use crate::backend;
//use crate::backend;
let mut callbacks = Vec::new();
/*

View File

@ -247,20 +247,27 @@ pub struct TextureViewDescriptor<'a> {
#[derive(Debug)]
pub(crate) enum TextureViewInner<A: hal::Api> {
Native {
raw: A::ImageView,
raw: A::TextureView,
source_id: Stored<TextureId>,
},
SwapChain {
raw: A::SwapChainTexture,
raw: A::SurfaceTexture,
source_id: Stored<SwapChainId>,
},
}
#[derive(Debug)]
pub(crate) struct HalTextureViewDescriptor {
pub format: wgt::TextureFormat,
pub dimension: wgt::TextureViewDimension,
pub range: wgt::ImageSubresourceRange,
}
#[derive(Debug)]
pub struct TextureView<A: hal::Api> {
pub(crate) inner: TextureViewInner<A>,
//TODO: store device_id for quick access?
pub(crate) desc: hal::TextureViewDescriptor<()>,
pub(crate) desc: HalTextureViewDescriptor,
pub(crate) format_features: wgt::TextureFormatFeatures,
pub(crate) extent: wgt::Extent3d,
pub(crate) samples: u32,

View File

@ -16,6 +16,7 @@ license = "MPL-2.0"
[dependencies]
bitflags = "1.0"
smallvec = "1"
thiserror = "1"
wgt = { package = "wgpu-types", path = "../wgpu-types" }
[dependencies.naga]

View File

@ -5,6 +5,8 @@ pub struct Encoder;
#[derive(Debug)]
pub struct Resource;
type DeviceResult<T> = Result<T, crate::DeviceError>;
impl crate::Api for Api {
type Instance = Context;
type Surface = Context;
@ -19,7 +21,7 @@ impl crate::Api for Api {
type Buffer = Resource;
type QuerySet = Resource;
type Texture = Resource;
type SwapChainTexture = Resource;
type SurfaceTexture = Resource;
type TextureView = Resource;
type Sampler = Resource;
@ -37,14 +39,28 @@ impl crate::Instance<Api> for Context {
}
}
impl crate::Surface<Api> for Context {}
impl crate::Surface<Api> for Context {
unsafe fn configure(
&mut self,
_device: &Context,
_config: &crate::SurfaceConfiguration,
) -> Result<(), crate::SurfaceError> {
Ok(())
}
unsafe fn unconfigure(&mut self, _device: &Context) {}
unsafe fn acquire_texture(
&mut self,
_timeout_ms: u32,
) -> Result<(Resource, Option<crate::Suboptimal>), crate::SurfaceError> {
Ok((Resource, None))
}
}
impl crate::Adapter<Api> for Context {
unsafe fn open(
&self,
_features: wgt::Features,
) -> Result<crate::OpenDevice<Api>, crate::Error> {
Err(crate::Error::DeviceLost)
unsafe fn open(&self, _features: wgt::Features) -> DeviceResult<crate::OpenDevice<Api>> {
Err(crate::DeviceError::Lost)
}
unsafe fn close(&self, _device: Context) {}
unsafe fn texture_format_capabilities(
@ -53,8 +69,11 @@ impl crate::Adapter<Api> for Context {
) -> crate::TextureFormatCapability {
crate::TextureFormatCapability::empty()
}
unsafe fn surface_formats(&self, _surface: &Context) -> Vec<wgt::TextureFormat> {
Vec::new()
unsafe fn surface_capabilities(
&self,
_surface: &Context,
) -> Option<crate::SurfaceCapabilities> {
None
}
}
@ -66,7 +85,7 @@ impl crate::Device<Api> for Context {
unsafe fn create_buffer(
&self,
_desc: &wgt::BufferDescriptor<crate::Label>,
) -> Result<Resource, crate::Error> {
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_buffer(&self, _buffer: Resource) {}
@ -74,8 +93,8 @@ impl crate::Device<Api> for Context {
&self,
_buffer: &Resource,
_range: crate::MemoryRange,
) -> Result<std::ptr::NonNull<u8>, crate::Error> {
Err(crate::Error::DeviceLost)
) -> DeviceResult<std::ptr::NonNull<u8>> {
Err(crate::DeviceError::Lost)
}
unsafe fn unmap_buffer(&self, _buffer: &Resource) {}
unsafe fn flush_mapped_ranges<I: Iterator<Item = crate::MemoryRange>>(
@ -94,27 +113,27 @@ impl crate::Device<Api> for Context {
unsafe fn create_texture(
&self,
_desc: &wgt::TextureDescriptor<crate::Label>,
) -> Result<Resource, crate::Error> {
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_texture(&self, _texture: Resource) {}
unsafe fn create_texture_view(
&self,
_texture: &Resource,
_desc: &crate::TextureViewDescriptor<crate::Label>,
) -> Result<Resource, crate::Error> {
_desc: &crate::TextureViewDescriptor,
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_texture_view(&self, _view: Resource) {}
unsafe fn create_sampler(
&self,
_desc: &crate::SamplerDescriptor,
) -> Result<Resource, crate::Error> {
unsafe fn create_sampler(&self, _desc: &crate::SamplerDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_sampler(&self, _sampler: Resource) {}
unsafe fn create_command_buffer(&self) -> Result<Encoder, crate::Error> {
unsafe fn create_command_buffer(
&self,
_desc: &crate::CommandBufferDescriptor,
) -> DeviceResult<Encoder> {
Ok(Encoder)
}
unsafe fn destroy_command_buffer(&self, _cmd_buf: Encoder) {}
@ -122,21 +141,21 @@ impl crate::Device<Api> for Context {
unsafe fn create_bind_group_layout(
&self,
_desc: &crate::BindGroupLayoutDescriptor,
) -> Result<Resource, crate::Error> {
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: Resource) {}
unsafe fn create_pipeline_layout(
&self,
_desc: &crate::PipelineLayoutDescriptor<Api>,
) -> Result<Resource, crate::Error> {
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: Resource) {}
unsafe fn create_bind_group(
&self,
_desc: &crate::BindGroupDescriptor<Api>,
) -> Result<Resource, crate::Error> {
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_bind_group(&self, _group: Resource) {}

View File

@ -36,10 +36,17 @@
pub mod empty;
use std::{borrow::Cow, fmt, num::NonZeroU8, ops::Range, ptr::NonNull};
use std::{
borrow::{Borrow, Cow},
fmt,
num::NonZeroU8,
ops::{Range, RangeInclusive},
ptr::NonNull,
};
use bitflags::bitflags;
use smallvec::SmallVec;
use thiserror::Error;
pub const MAX_ANISOTROPY: u8 = 16;
pub const MAX_BIND_GROUPS: usize = 8;
@ -49,70 +56,46 @@ pub type MemoryRange = Range<wgt::BufferAddress>;
pub type MipLevel = u8;
pub type ArrayLayer = u16;
#[derive(Debug)]
pub enum Error {
#[derive(Clone, Debug, PartialEq, Error)]
pub enum DeviceError {
#[error("out of memory")]
OutOfMemory,
DeviceLost,
#[error("device is lost")]
Lost,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::OutOfMemory => write!(f, "out of memory"),
Self::DeviceLost => write!(f, "device is lost"),
}
}
}
impl std::error::Error for Error {}
#[derive(Debug)]
#[derive(Clone, Debug, PartialEq, Error)]
pub enum ShaderError {
#[error("compilation failed: {0:?}")]
Compilation(String),
Device(Error),
#[error(transparent)]
Device(#[from] DeviceError),
}
impl fmt::Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Compilation(ref message) => write!(f, "compilation failed: {}", message),
Self::Device(_) => Ok(()),
}
}
}
impl std::error::Error for ShaderError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
Self::Compilation(..) => None,
Self::Device(ref parent) => Some(parent),
}
}
}
#[derive(Debug)]
#[derive(Clone, Debug, PartialEq, Error)]
pub enum PipelineError {
#[error("linkage failed for stage {0:?}: {1}")]
Linkage(wgt::ShaderStage, String),
Device(Error),
#[error(transparent)]
Device(#[from] DeviceError),
}
impl fmt::Display for PipelineError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Linkage(stage, ref message) => {
write!(f, "linkage failed for stage {:?}: {}", stage, message)
}
Self::Device(_) => Ok(()),
}
}
}
impl std::error::Error for PipelineError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
Self::Linkage(..) => None,
Self::Device(ref parent) => Some(parent),
}
}
#[derive(Clone, Debug, PartialEq, Error)]
pub enum SurfaceError {
#[error("surface is lost")]
Lost,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("other reason: {0}")]
Other(&'static str),
}
/// Marker value returned if the presentation configuration no longer matches
/// the surface properties exactly, but can still be used to present
/// to the surface successfully.
#[derive(Debug)]
pub struct Suboptimal;
pub trait Api: Clone + Sized {
type Instance: Instance<Self>;
type Surface: Surface<Self>;
@ -127,7 +110,7 @@ pub trait Api: Clone + Sized {
type Buffer: fmt::Debug + Send + Sync;
type QuerySet: fmt::Debug + Send + Sync;
type Texture: fmt::Debug + Send + Sync;
type SwapChainTexture: fmt::Debug + Send + Sync;
type SurfaceTexture: fmt::Debug + Send + Sync + Borrow<Self::Texture>;
type TextureView: fmt::Debug + Send + Sync;
type Sampler: fmt::Debug + Send + Sync;
@ -143,10 +126,23 @@ pub trait Instance<A: Api> {
unsafe fn enumerate_adapters(&self) -> Vec<ExposedAdapter<A>>;
}
pub trait Surface<A: Api> {}
pub trait Surface<A: Api> {
unsafe fn configure(
&mut self,
device: &A::Device,
config: &SurfaceConfiguration,
) -> Result<(), SurfaceError>;
unsafe fn unconfigure(&mut self, device: &A::Device);
unsafe fn acquire_texture(
&mut self,
timeout_ms: u32,
) -> Result<(A::SurfaceTexture, Option<Suboptimal>), SurfaceError>;
}
pub trait Adapter<A: Api> {
unsafe fn open(&self, features: wgt::Features) -> Result<OpenDevice<A>, Error>;
unsafe fn open(&self, features: wgt::Features) -> Result<OpenDevice<A>, DeviceError>;
unsafe fn close(&self, device: A::Device);
/// Return the set of supported capabilities for a texture format.
@ -154,19 +150,24 @@ pub trait Adapter<A: Api> {
&self,
format: wgt::TextureFormat,
) -> TextureFormatCapability;
/// Returns the list of surface formats supported for presentation, if any.
unsafe fn surface_formats(&self, surface: &A::Surface) -> Vec<wgt::TextureFormat>;
/// Returns the capabilities of working with a specified surface.
///
/// `None` means presentation is not supported for it.
unsafe fn surface_capabilities(&self, surface: &A::Surface) -> Option<SurfaceCapabilities>;
}
pub trait Device<A: Api> {
unsafe fn create_buffer(&self, desc: &wgt::BufferDescriptor<Label>)
-> Result<A::Buffer, Error>;
unsafe fn create_buffer(
&self,
desc: &wgt::BufferDescriptor<Label>,
) -> Result<A::Buffer, DeviceError>;
unsafe fn destroy_buffer(&self, buffer: A::Buffer);
unsafe fn map_buffer(
&self,
buffer: &A::Buffer,
range: MemoryRange,
) -> Result<NonNull<u8>, Error>;
) -> Result<NonNull<u8>, DeviceError>;
unsafe fn unmap_buffer(&self, buffer: &A::Buffer);
unsafe fn flush_mapped_ranges<I: Iterator<Item = MemoryRange>>(
&self,
@ -182,34 +183,37 @@ pub trait Device<A: Api> {
unsafe fn create_texture(
&self,
desc: &wgt::TextureDescriptor<Label>,
) -> Result<A::Texture, Error>;
) -> Result<A::Texture, DeviceError>;
unsafe fn destroy_texture(&self, texture: A::Texture);
unsafe fn create_texture_view(
&self,
texture: &A::Texture,
desc: &TextureViewDescriptor<Label>,
) -> Result<A::TextureView, Error>;
desc: &TextureViewDescriptor,
) -> Result<A::TextureView, DeviceError>;
unsafe fn destroy_texture_view(&self, view: A::TextureView);
unsafe fn create_sampler(&self, desc: &SamplerDescriptor) -> Result<A::Sampler, Error>;
unsafe fn create_sampler(&self, desc: &SamplerDescriptor) -> Result<A::Sampler, DeviceError>;
unsafe fn destroy_sampler(&self, sampler: A::Sampler);
unsafe fn create_command_buffer(&self) -> Result<A::CommandBuffer, Error>;
unsafe fn create_command_buffer(
&self,
desc: &CommandBufferDescriptor,
) -> Result<A::CommandBuffer, DeviceError>;
unsafe fn destroy_command_buffer(&self, cmd_buf: A::CommandBuffer);
unsafe fn create_bind_group_layout(
&self,
desc: &BindGroupLayoutDescriptor,
) -> Result<A::BindGroupLayout, Error>;
) -> Result<A::BindGroupLayout, DeviceError>;
unsafe fn destroy_bind_group_layout(&self, bg_layout: A::BindGroupLayout);
unsafe fn create_pipeline_layout(
&self,
desc: &PipelineLayoutDescriptor<A>,
) -> Result<A::PipelineLayout, Error>;
) -> Result<A::PipelineLayout, DeviceError>;
unsafe fn destroy_pipeline_layout(&self, pipeline_layout: A::PipelineLayout);
unsafe fn create_bind_group(
&self,
desc: &BindGroupDescriptor<A>,
) -> Result<A::BindGroup, Error>;
) -> Result<A::BindGroup, DeviceError>;
unsafe fn destroy_bind_group(&self, group: A::BindGroup);
unsafe fn create_shader_module(
@ -234,6 +238,8 @@ pub trait Queue<A: Api> {
unsafe fn submit<I: Iterator<Item = A::CommandBuffer>>(&mut self, command_buffers: I);
}
pub trait SwapChain<A: Api> {}
pub trait CommandBuffer<A: Api> {
unsafe fn begin(&mut self);
unsafe fn end(&mut self);
@ -372,6 +378,45 @@ pub struct ExposedAdapter<A: Api> {
pub capabilities: Capabilities,
}
/// Describes information about what a `Surface`'s presentation capabilities are.
/// Fetch this with [Adapter::surface_capabilities].
#[derive(Debug, Clone)]
pub struct SurfaceCapabilities {
/// List of supported texture formats.
///
/// Must be at least one.
pub texture_formats: Vec<wgt::TextureFormat>,
/// Range for the swap chain sizes.
///
/// - `swap_chain_sizes.start` must be at least 1.
/// - `swap_chain_sizes.end` must be larger or equal to `swap_chain_sizes.start`.
pub swap_chain_sizes: RangeInclusive<u32>,
/// Current extent of the surface, if known.
pub current_extent: Option<wgt::Extent3d>,
/// Range of supported extents.
///
/// `current_extent` must be inside this range.
pub extents: RangeInclusive<wgt::Extent3d>,
/// Supported texture usage flags.
///
/// Must have at least `TextureUse::COLOR_TARGET`
pub texture_uses: TextureUse,
/// List of supported V-sync modes.
///
/// Must be at least one.
pub vsync_modes: Vec<VsyncMode>,
/// List of supported alpha composition modes.
///
/// Must be at least one.
pub composite_alpha_modes: Vec<CompositeAlphaMode>,
}
#[derive(Debug)]
pub struct OpenDevice<A: Api> {
pub device: A::Device,
@ -379,25 +424,13 @@ pub struct OpenDevice<A: Api> {
}
#[derive(Clone, Debug)]
pub struct TextureViewDescriptor<L> {
pub label: L,
pub struct TextureViewDescriptor<'a> {
pub label: Label<'a>,
pub format: wgt::TextureFormat,
pub dimension: wgt::TextureViewDimension,
pub range: wgt::ImageSubresourceRange,
}
impl<L> TextureViewDescriptor<L> {
///
pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> TextureViewDescriptor<K> {
TextureViewDescriptor {
label: fun(&self.label),
format: self.format,
dimension: self.dimension,
range: self.range.clone(),
}
}
}
#[derive(Clone, Debug)]
pub struct SamplerDescriptor<'a> {
pub label: Label<'a>,
@ -473,6 +506,11 @@ pub struct BindGroupDescriptor<'a, A: Api> {
pub entries: Cow<'a, [BindGroupEntry<'a, A>]>,
}
#[derive(Clone, Debug)]
pub struct CommandBufferDescriptor<'a> {
pub label: Label<'a>,
}
/// Naga shader module.
pub struct NagaShader {
/// Shader module IR.
@ -548,6 +586,58 @@ pub struct RenderPipelineDescriptor<'a, A: Api> {
pub color_targets: Cow<'a, [wgt::ColorTargetState]>,
}
/// Specifies the mode regulating how a surface presents frames.
#[derive(Debug, Clone)]
pub enum VsyncMode {
/// Don't ever wait for v-sync.
Immediate,
/// Wait for v-sync, overwrite the last rendered frame.
Mailbox,
/// Present frames in the same order they are rendered.
Fifo,
/// Don't wait for the next v-sync if we just missed it.
Relaxed,
}
/// Specifies how the alpha channel of the textures should be handled during (martin mouv i step)
/// compositing.
#[derive(Debug, Clone)]
pub enum CompositeAlphaMode {
/// The alpha channel, if it exists, of the textures is ignored in the
/// compositing process. Instead, the textures is treated as if it has a
/// constant alpha of 1.0.
Opaque,
/// The alpha channel, if it exists, of the textures is respected in the
/// compositing process. The non-alpha channels of the textures are not
/// expected to already be multiplied by the alpha channel by the
/// application; instead, the compositor will multiply the non-alpha
/// channels of the texture by the alpha channel during compositing.
Alpha,
/// The alpha channel, if it exists, of the textures is respected in the
/// compositing process. The non-alpha channels of the textures are
/// expected to already be multiplied by the alpha channel by the
/// application.
PremultipliedAlpha,
}
#[derive(Debug, Clone)]
pub struct SurfaceConfiguration {
/// Number of textures in the swap chain. Must be in
/// `SurfaceCapabilities::swap_chain_size` range.
pub swap_chain_size: u32,
/// Vertical synchronization mode.
pub vsync_mode: VsyncMode,
/// Alpha composition mode.
pub composite_alpha_mode: CompositeAlphaMode,
/// Format of the surface textures.
pub format: wgt::TextureFormat,
/// Requested texture extent. Must be in
/// `SurfaceCapabilities::extents` range.
pub extent: wgt::Extent3d,
/// Allowed usage of surface textures,
pub usage: TextureUse,
}
#[test]
fn test_default_limits() {
let limits = wgt::Limits::default();