Refactor some descriptor set/allocation things (#2357)

* Fix the documentation for `DescriptorPool` methods

* Pool allocate validation

* Add `DescriptorPoolAlloc`, make `UnsafeDescriptorSet` more useful
This commit is contained in:
Rua 2023-10-13 20:00:01 +02:00 committed by GitHub
parent 4bd3b81ea8
commit e00751497d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 531 additions and 431 deletions

View File

@ -694,7 +694,7 @@ where
let descriptor_sets_vk: SmallVec<[_; 12]> = descriptor_sets
.iter()
.map(|x| x.as_ref().0.inner().handle())
.map(|x| x.as_ref().0.handle())
.collect();
let dynamic_offsets_vk: SmallVec<[_; 32]> = descriptor_sets
.iter()

View File

@ -19,11 +19,12 @@
use self::sorted_map::SortedMap;
use super::{
layout::DescriptorSetLayout,
pool::{DescriptorPool, DescriptorPoolCreateInfo, DescriptorSetAllocateInfo},
sys::UnsafeDescriptorSet,
pool::{
DescriptorPool, DescriptorPoolAlloc, DescriptorPoolCreateInfo, DescriptorSetAllocateInfo,
},
};
use crate::{
descriptor_set::layout::{DescriptorSetLayoutCreateFlags, DescriptorType},
descriptor_set::layout::DescriptorType,
device::{Device, DeviceOwned},
instance::InstanceOwnedDebugWrapper,
Validated, VulkanError,
@ -60,16 +61,16 @@ pub unsafe trait DescriptorSetAllocator: DeviceOwned {
&self,
layout: &Arc<DescriptorSetLayout>,
variable_descriptor_count: u32,
) -> Result<Self::Alloc, VulkanError>;
) -> Result<Self::Alloc, Validated<VulkanError>>;
}
/// An allocated descriptor set.
pub trait DescriptorSetAlloc: Send + Sync {
/// Returns the inner unsafe descriptor set object.
fn inner(&self) -> &UnsafeDescriptorSet;
/// Returns the internal object that contains the descriptor set.
fn inner(&self) -> &DescriptorPoolAlloc;
/// Returns the inner unsafe descriptor set object.
fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet;
/// Returns the descriptor pool that the descriptor set was allocated from.
fn pool(&self) -> &DescriptorPool;
}
/// Standard implementation of a descriptor set allocator.
@ -142,38 +143,15 @@ unsafe impl DescriptorSetAllocator for StandardDescriptorSetAllocator {
type Alloc = StandardDescriptorSetAlloc;
/// Allocates a descriptor set.
///
/// # Panics
///
/// - Panics if the provided `layout` is for push descriptors rather than regular descriptor
/// sets.
/// - Panics if the provided `variable_descriptor_count` is greater than the maximum number of
/// variable count descriptors in the set.
#[inline]
fn allocate(
&self,
layout: &Arc<DescriptorSetLayout>,
variable_descriptor_count: u32,
) -> Result<StandardDescriptorSetAlloc, VulkanError> {
assert!(
!layout
.flags()
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR),
"the provided descriptor set layout is for push descriptors, and cannot be used to \
build a descriptor set object",
);
) -> Result<StandardDescriptorSetAlloc, Validated<VulkanError>> {
let max_count = layout.variable_descriptor_count();
assert!(
variable_descriptor_count <= max_count,
"the provided variable_descriptor_count ({}) is greater than the maximum number of \
variable count descriptors in the set ({})",
variable_descriptor_count,
max_count,
);
let pools = self.pools.get_or(Default::default);
let entry = unsafe { &mut *pools.get() }.get_or_try_insert(layout.id(), || {
if max_count == 0 {
FixedEntry::new(layout.clone()).map(Entry::Fixed)
@ -197,7 +175,7 @@ unsafe impl<T: DescriptorSetAllocator> DescriptorSetAllocator for Arc<T> {
&self,
layout: &Arc<DescriptorSetLayout>,
variable_descriptor_count: u32,
) -> Result<Self::Alloc, VulkanError> {
) -> Result<Self::Alloc, Validated<VulkanError>> {
(**self).allocate(layout, variable_descriptor_count)
}
}
@ -221,7 +199,7 @@ struct FixedEntry {
}
impl FixedEntry {
fn new(layout: Arc<DescriptorSetLayout>) -> Result<Self, VulkanError> {
fn new(layout: Arc<DescriptorSetLayout>) -> Result<Self, Validated<VulkanError>> {
Ok(FixedEntry {
pool: FixedPool::new(&layout, MAX_SETS)?,
set_count: MAX_SETS,
@ -229,7 +207,7 @@ impl FixedEntry {
})
}
fn allocate(&mut self) -> Result<StandardDescriptorSetAlloc, VulkanError> {
fn allocate(&mut self) -> Result<StandardDescriptorSetAlloc, Validated<VulkanError>> {
let inner = if let Some(inner) = self.pool.reserve.pop() {
inner
} else {
@ -250,14 +228,17 @@ impl FixedEntry {
struct FixedPool {
// The actual Vulkan descriptor pool. This field isn't actually used anywhere, but we need to
// keep the pool alive in order to keep the descriptor sets valid.
_inner: DescriptorPool,
inner: DescriptorPool,
// List of descriptor sets. When `alloc` is called, a descriptor will be extracted from this
// list. When a `SingleLayoutPoolAlloc` is dropped, its descriptor set is put back in this list.
reserve: ArrayQueue<UnsafeDescriptorSet>,
reserve: ArrayQueue<DescriptorPoolAlloc>,
}
impl FixedPool {
fn new(layout: &Arc<DescriptorSetLayout>, set_count: usize) -> Result<Arc<Self>, VulkanError> {
fn new(
layout: &Arc<DescriptorSetLayout>,
set_count: usize,
) -> Result<Arc<Self>, Validated<VulkanError>> {
let inner = DescriptorPool::new(
layout.device().clone(),
DescriptorPoolCreateInfo {
@ -275,28 +256,28 @@ impl FixedPool {
)
.map_err(Validated::unwrap)?;
let allocate_infos = (0..set_count).map(|_| DescriptorSetAllocateInfo {
layout,
variable_descriptor_count: 0,
});
let allocate_infos = (0..set_count).map(|_| DescriptorSetAllocateInfo::new(layout.clone()));
let allocs = unsafe {
inner
.allocate_descriptor_sets(allocate_infos)
.map_err(|err| match err {
VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
VulkanError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
VulkanError::OutOfPoolMemory => {
// We created the pool with an exact size.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
Validated::ValidationError(_) => err,
Validated::Error(vk_err) => match vk_err {
VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
VulkanError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
VulkanError::OutOfPoolMemory => {
// We created the pool with an exact size.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
},
})?
};
@ -305,10 +286,7 @@ impl FixedPool {
let _ = reserve.push(alloc);
}
Ok(Arc::new(FixedPool {
_inner: inner,
reserve,
}))
Ok(Arc::new(FixedPool { inner, reserve }))
}
}
@ -326,7 +304,7 @@ struct VariableEntry {
}
impl VariableEntry {
fn new(layout: Arc<DescriptorSetLayout>) -> Result<Self, VulkanError> {
fn new(layout: Arc<DescriptorSetLayout>) -> Result<Self, Validated<VulkanError>> {
let reserve = Arc::new(ArrayQueue::new(MAX_POOLS));
Ok(VariableEntry {
@ -340,7 +318,7 @@ impl VariableEntry {
fn allocate(
&mut self,
variable_descriptor_count: u32,
) -> Result<StandardDescriptorSetAlloc, VulkanError> {
) -> Result<StandardDescriptorSetAlloc, Validated<VulkanError>> {
if self.allocations >= MAX_SETS {
self.pool = if let Some(inner) = self.reserve.pop() {
Arc::new(VariablePool {
@ -354,8 +332,8 @@ impl VariableEntry {
}
let allocate_info = DescriptorSetAllocateInfo {
layout: &self.layout,
variable_descriptor_count,
..DescriptorSetAllocateInfo::new(self.layout.clone())
};
let mut sets = unsafe {
@ -363,19 +341,22 @@ impl VariableEntry {
.inner
.allocate_descriptor_sets([allocate_info])
.map_err(|err| match err {
VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
VulkanError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
VulkanError::OutOfPoolMemory => {
// We created the pool to fit the maximum variable descriptor count.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
Validated::ValidationError(_) => err,
Validated::Error(vk_err) => match vk_err {
VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
VulkanError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
VulkanError::OutOfPoolMemory => {
// We created the pool to fit the maximum variable descriptor count.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
},
})?
};
self.allocations += 1;
@ -446,7 +427,7 @@ impl Drop for VariablePool {
#[derive(Debug)]
pub struct StandardDescriptorSetAlloc {
// The actual descriptor set.
inner: ManuallyDrop<UnsafeDescriptorSet>,
inner: ManuallyDrop<DescriptorPoolAlloc>,
// The pool where we allocated from. Needed for our `Drop` impl.
parent: AllocParent,
}
@ -457,6 +438,16 @@ enum AllocParent {
Variable(Arc<VariablePool>),
}
impl AllocParent {
#[inline]
fn pool(&self) -> &DescriptorPool {
match self {
Self::Fixed(pool) => &pool.inner,
Self::Variable(pool) => &pool.inner,
}
}
}
// This is needed because of the blanket impl of `Send` on `Arc<T>`, which requires that `T` is
// `Send + Sync`. `FixedPool` and `VariablePool` are `Send + !Sync` because `DescriptorPool` is
// `!Sync`. That's fine however because we never access the `DescriptorPool` concurrently.
@ -465,13 +456,13 @@ unsafe impl Sync for StandardDescriptorSetAlloc {}
impl DescriptorSetAlloc for StandardDescriptorSetAlloc {
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
fn inner(&self) -> &DescriptorPoolAlloc {
&self.inner
}
#[inline]
fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet {
&mut self.inner
fn pool(&self) -> &DescriptorPool {
self.parent.pool()
}
}
@ -567,7 +558,7 @@ mod tests {
let pool1 =
if let AllocParent::Fixed(pool) = &allocator.allocate(&layout, 0).unwrap().parent {
pool._inner.handle()
pool.inner.handle()
} else {
unreachable!()
};
@ -575,7 +566,7 @@ mod tests {
thread::spawn(move || {
let pool2 =
if let AllocParent::Fixed(pool) = &allocator.allocate(&layout, 0).unwrap().parent {
pool._inner.handle()
pool.inner.handle()
} else {
unreachable!()
};

View File

@ -87,16 +87,16 @@ pub use self::{
WriteDescriptorSetElements,
},
};
use self::{layout::DescriptorSetLayout, sys::UnsafeDescriptorSet};
use self::{layout::DescriptorSetLayout, pool::DescriptorPoolAlloc};
use crate::{
acceleration_structure::AccelerationStructure,
buffer::view::BufferView,
descriptor_set::layout::{
DescriptorBindingFlags, DescriptorSetLayoutCreateFlags, DescriptorType,
},
device::{DeviceOwned, DeviceOwnedDebugWrapper},
device::DeviceOwned,
image::{sampler::Sampler, ImageLayout},
ValidationError, VulkanObject,
VulkanObject,
};
use ahash::HashMap;
use smallvec::{smallvec, SmallVec};
@ -116,15 +116,23 @@ mod update;
/// Trait for objects that contain a collection of resources that will be accessible by shaders.
///
/// Objects of this type can be passed when submitting a draw command.
pub unsafe trait DescriptorSet: DeviceOwned + Send + Sync {
/// Returns the inner `UnsafeDescriptorSet`.
fn inner(&self) -> &UnsafeDescriptorSet;
pub unsafe trait DescriptorSet:
VulkanObject<Handle = ash::vk::DescriptorSet> + DeviceOwned + Send + Sync
{
/// Returns the allocation of the descriptor set.
fn alloc(&self) -> &DescriptorPoolAlloc;
/// Returns the layout of this descriptor set.
fn layout(&self) -> &Arc<DescriptorSetLayout>;
#[inline]
fn layout(&self) -> &Arc<DescriptorSetLayout> {
self.alloc().layout()
}
/// Returns the variable descriptor count that this descriptor set was allocated with.
fn variable_descriptor_count(&self) -> u32;
#[inline]
fn variable_descriptor_count(&self) -> u32 {
self.alloc().variable_descriptor_count()
}
/// Creates a [`DescriptorSetWithOffsets`] with the given dynamic offsets.
fn offsets(
@ -144,7 +152,7 @@ pub unsafe trait DescriptorSet: DeviceOwned + Send + Sync {
impl PartialEq for dyn DescriptorSet {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner()
self.alloc() == other.alloc()
}
}
@ -152,160 +160,7 @@ impl Eq for dyn DescriptorSet {}
impl Hash for dyn DescriptorSet {
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
}
}
pub(crate) struct DescriptorSetInner {
layout: DeviceOwnedDebugWrapper<Arc<DescriptorSetLayout>>,
variable_descriptor_count: u32,
resources: DescriptorSetResources,
}
impl DescriptorSetInner {
pub(crate) fn new(
handle: ash::vk::DescriptorSet,
layout: Arc<DescriptorSetLayout>,
variable_descriptor_count: u32,
descriptor_writes: impl IntoIterator<Item = WriteDescriptorSet>,
descriptor_copies: impl IntoIterator<Item = CopyDescriptorSet>,
) -> Result<Self, Box<ValidationError>> {
assert!(
!layout
.flags()
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR),
"the provided descriptor set layout is for push descriptors, and cannot be used to \
build a descriptor set object",
);
let max_variable_descriptor_count = layout.variable_descriptor_count();
assert!(
variable_descriptor_count <= max_variable_descriptor_count,
"the provided variable_descriptor_count ({}) is greater than the maximum number of \
variable count descriptors in the layout ({})",
variable_descriptor_count,
max_variable_descriptor_count,
);
let mut resources = DescriptorSetResources::new(&layout, variable_descriptor_count);
struct PerDescriptorWrite {
write_info: DescriptorWriteInfo,
acceleration_structures: ash::vk::WriteDescriptorSetAccelerationStructureKHR,
inline_uniform_block: ash::vk::WriteDescriptorSetInlineUniformBlock,
}
let writes_iter = descriptor_writes.into_iter();
let (lower_size_bound, _) = writes_iter.size_hint();
let mut writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
let mut per_writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
for (index, write) in writes_iter.enumerate() {
write
.validate(&layout, variable_descriptor_count)
.map_err(|err| err.add_context(format!("descriptor_writes[{}]", index)))?;
resources.write(&write, &layout);
let layout_binding = &layout.bindings()[&write.binding()];
writes_vk.push(write.to_vulkan(handle, layout_binding.descriptor_type));
per_writes_vk.push(PerDescriptorWrite {
write_info: write.to_vulkan_info(layout_binding.descriptor_type),
acceleration_structures: Default::default(),
inline_uniform_block: Default::default(),
});
}
if !writes_vk.is_empty() {
for (write_vk, per_write_vk) in writes_vk.iter_mut().zip(per_writes_vk.iter_mut()) {
match &mut per_write_vk.write_info {
DescriptorWriteInfo::Image(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_image_info = info.as_ptr();
}
DescriptorWriteInfo::Buffer(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_buffer_info = info.as_ptr();
}
DescriptorWriteInfo::BufferView(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_texel_buffer_view = info.as_ptr();
}
DescriptorWriteInfo::InlineUniformBlock(data) => {
write_vk.descriptor_count = data.len() as u32;
write_vk.p_next = &per_write_vk.inline_uniform_block as *const _ as _;
per_write_vk.inline_uniform_block.data_size = write_vk.descriptor_count;
per_write_vk.inline_uniform_block.p_data = data.as_ptr() as *const _;
}
DescriptorWriteInfo::AccelerationStructure(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_next = &per_write_vk.acceleration_structures as *const _ as _;
per_write_vk
.acceleration_structures
.acceleration_structure_count = write_vk.descriptor_count;
per_write_vk
.acceleration_structures
.p_acceleration_structures = info.as_ptr();
}
}
}
}
let copies_iter = descriptor_copies.into_iter();
let (lower_size_bound, _) = copies_iter.size_hint();
let mut copies_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
for (index, copy) in copies_iter.enumerate() {
copy.validate(&layout, variable_descriptor_count)
.map_err(|err| err.add_context(format!("descriptor_copies[{}]", index)))?;
resources.copy(&copy);
let &CopyDescriptorSet {
ref src_set,
src_binding,
src_first_array_element,
dst_binding,
dst_first_array_element,
descriptor_count,
_ne: _,
} = &copy;
copies_vk.push(ash::vk::CopyDescriptorSet {
src_set: src_set.inner().handle(),
src_binding,
src_array_element: src_first_array_element,
dst_set: handle,
dst_binding,
dst_array_element: dst_first_array_element,
descriptor_count,
..Default::default()
});
}
unsafe {
let fns = layout.device().fns();
(fns.v1_0.update_descriptor_sets)(
layout.device().handle(),
writes_vk.len() as u32,
writes_vk.as_ptr(),
copies_vk.len() as u32,
copies_vk.as_ptr(),
);
}
Ok(DescriptorSetInner {
layout: DeviceOwnedDebugWrapper(layout),
variable_descriptor_count,
resources,
})
}
pub(crate) fn layout(&self) -> &Arc<DescriptorSetLayout> {
&self.layout
}
pub(crate) fn resources(&self) -> &DescriptorSetResources {
&self.resources
self.alloc().hash(state);
}
}

View File

@ -21,18 +21,17 @@
//! # Examples
//! TODO:
use super::CopyDescriptorSet;
use super::{pool::DescriptorPoolAlloc, sys::UnsafeDescriptorSet, CopyDescriptorSet};
use crate::{
descriptor_set::{
allocator::{DescriptorSetAlloc, DescriptorSetAllocator, StandardDescriptorSetAlloc},
layout::DescriptorSetLayoutCreateFlags,
update::WriteDescriptorSet,
DescriptorSet, DescriptorSetInner, DescriptorSetLayout, DescriptorSetResources,
UnsafeDescriptorSet,
DescriptorSet, DescriptorSetLayout, DescriptorSetResources,
},
device::{Device, DeviceOwned},
Validated, VulkanError, VulkanObject,
Validated, ValidationError, VulkanError, VulkanObject,
};
use smallvec::SmallVec;
use std::{
hash::{Hash, Hasher},
sync::Arc,
@ -40,8 +39,8 @@ use std::{
/// A simple, immutable descriptor set that is expected to be long-lived.
pub struct PersistentDescriptorSet<P = StandardDescriptorSetAlloc> {
alloc: P,
inner: DescriptorSetInner,
inner: UnsafeDescriptorSet<P>,
resources: DescriptorSetResources,
}
impl PersistentDescriptorSet {
@ -78,34 +77,44 @@ impl PersistentDescriptorSet {
where
A: DescriptorSetAllocator + ?Sized,
{
assert!(
!layout
.flags()
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR),
"the provided descriptor set layout is for push descriptors, and cannot be used to \
build a descriptor set object",
);
let mut set = PersistentDescriptorSet {
inner: UnsafeDescriptorSet::new(allocator, &layout, variable_descriptor_count)?,
resources: DescriptorSetResources::new(&layout, variable_descriptor_count),
};
let max_count = layout.variable_descriptor_count();
unsafe {
set.update(descriptor_writes, descriptor_copies)?;
}
assert!(
variable_descriptor_count <= max_count,
"the provided variable_descriptor_count ({}) is greater than the maximum number of \
variable count descriptors in the set ({})",
variable_descriptor_count,
max_count,
);
Ok(Arc::new(set))
}
}
let alloc = allocator.allocate(&layout, variable_descriptor_count)?;
let inner = DescriptorSetInner::new(
alloc.inner().handle(),
layout,
variable_descriptor_count,
descriptor_writes,
descriptor_copies,
)?;
impl<P> PersistentDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
unsafe fn update(
&mut self,
descriptor_writes: impl IntoIterator<Item = WriteDescriptorSet>,
descriptor_copies: impl IntoIterator<Item = CopyDescriptorSet>,
) -> Result<(), Box<ValidationError>> {
let descriptor_writes: SmallVec<[_; 8]> = descriptor_writes.into_iter().collect();
let descriptor_copies: SmallVec<[_; 8]> = descriptor_copies.into_iter().collect();
Ok(Arc::new(PersistentDescriptorSet { alloc, inner }))
unsafe {
self.inner.update(&descriptor_writes, &descriptor_copies)?;
}
for write in descriptor_writes {
self.resources.write(&write, self.inner.layout());
}
for copy in descriptor_copies {
self.resources.copy(&copy);
}
Ok(())
}
}
@ -113,20 +122,23 @@ unsafe impl<P> DescriptorSet for PersistentDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
fn inner(&self) -> &UnsafeDescriptorSet {
self.alloc.inner()
}
fn layout(&self) -> &Arc<DescriptorSetLayout> {
self.inner.layout()
}
fn variable_descriptor_count(&self) -> u32 {
self.inner.variable_descriptor_count
fn alloc(&self) -> &DescriptorPoolAlloc {
self.inner.alloc().inner()
}
fn resources(&self) -> &DescriptorSetResources {
self.inner.resources()
&self.resources
}
}
unsafe impl<P> VulkanObject for PersistentDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
type Handle = ash::vk::DescriptorSet;
fn handle(&self) -> Self::Handle {
self.inner.handle()
}
}
@ -135,7 +147,7 @@ where
P: DescriptorSetAlloc,
{
fn device(&self) -> &Arc<Device> {
self.inner.layout().device()
self.layout().device()
}
}
@ -144,7 +156,7 @@ where
P: DescriptorSetAlloc,
{
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner()
self.inner == other.inner
}
}
@ -155,6 +167,6 @@ where
P: DescriptorSetAlloc,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
self.inner.hash(state);
}
}

View File

@ -12,7 +12,7 @@ use crate::{
layout::{DescriptorSetLayout, DescriptorSetLayoutCreateFlags, DescriptorType},
sys::UnsafeDescriptorSet,
},
device::{Device, DeviceOwned},
device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
instance::InstanceOwnedDebugWrapper,
macros::{impl_id_counter, vulkan_bitflags},
Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version, VulkanError,
@ -53,6 +53,18 @@ impl DescriptorPool {
unsafe { Ok(Self::new_unchecked(device, create_info)?) }
}
fn validate_new(
device: &Device,
create_info: &DescriptorPoolCreateInfo,
) -> Result<(), Box<ValidationError>> {
// VUID-vkCreateDescriptorPool-pCreateInfo-parameter
create_info
.validate(device)
.map_err(|err| err.add_context("create_info"))?;
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn new_unchecked(
device: Arc<Device>,
@ -113,18 +125,6 @@ impl DescriptorPool {
unsafe { Ok(Self::from_handle(device, handle, create_info)) }
}
fn validate_new(
device: &Device,
create_info: &DescriptorPoolCreateInfo,
) -> Result<(), Box<ValidationError>> {
// VUID-vkCreateDescriptorPool-pCreateInfo-parameter
create_info
.validate(device)
.map_err(|err| err.add_context("create_info"))?;
Ok(())
}
/// Creates a new `DescriptorPool` from a raw object handle.
///
/// # Safety
@ -184,54 +184,80 @@ impl DescriptorPool {
self.max_inline_uniform_block_bindings
}
/// Allocates descriptor sets from the pool, one for each element in `create_info`.
/// Allocates descriptor sets from the pool, one for each element in `allocate_info`.
/// Returns an iterator to the allocated sets, or an error.
///
/// The `FragmentedPool` errors often can't be prevented. If the function returns this error,
/// you should just create a new pool.
///
/// # Panics
///
/// - Panics if one of the layouts wasn't created with the same device as the pool.
///
/// # Safety
///
/// See also the `new` function.
/// - When the pool is dropped, the returned descriptor sets must not be in use by either
/// the host or device.
/// - If the device API version is less than 1.1, and the [`khr_maintenance1`] extension is not
/// enabled on the device, then
/// the length of `allocate_infos` must not be greater than the number of descriptor sets
/// remaining in the pool, and
/// the total number of descriptors of each type being allocated must not be greater than the
/// number of descriptors of that type remaining in the pool.
///
/// - The total descriptors of the layouts must fit in the pool.
/// - The total number of descriptor sets allocated from the pool must not overflow the pool.
/// - You must ensure that the allocated descriptor sets are no longer in use when the pool
/// is destroyed, as destroying the pool is equivalent to freeing all the sets.
pub unsafe fn allocate_descriptor_sets<'a>(
/// [`khr_maintenance1`]: crate::device::DeviceExtensions::khr_maintenance1
#[inline]
pub unsafe fn allocate_descriptor_sets(
&self,
allocate_info: impl IntoIterator<Item = DescriptorSetAllocateInfo<'a>>,
) -> Result<impl ExactSizeIterator<Item = UnsafeDescriptorSet>, VulkanError> {
let (layouts, variable_descriptor_counts): (SmallVec<[_; 1]>, SmallVec<[_; 1]>) =
allocate_info
.into_iter()
.map(|info| {
assert_eq!(self.device.handle(), info.layout.device().handle(),);
debug_assert!(!info
.layout
.flags()
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR));
debug_assert!(
info.variable_descriptor_count <= info.layout.variable_descriptor_count()
);
allocate_infos: impl IntoIterator<Item = DescriptorSetAllocateInfo>,
) -> Result<impl ExactSizeIterator<Item = DescriptorPoolAlloc>, Validated<VulkanError>> {
let allocate_infos: SmallVec<[_; 1]> = allocate_infos.into_iter().collect();
self.validate_allocate_descriptor_sets(&allocate_infos)?;
(info.layout.handle(), info.variable_descriptor_count)
})
.unzip();
Ok(self.allocate_descriptor_sets_unchecked(allocate_infos)?)
}
let output = if layouts.is_empty() {
vec![]
} else {
fn validate_allocate_descriptor_sets(
&self,
allocate_infos: &[DescriptorSetAllocateInfo],
) -> Result<(), Box<ValidationError>> {
for (index, info) in allocate_infos.iter().enumerate() {
info.validate(self.device())
.map_err(|err| err.add_context(format!("allocate_infos[{}]", index)))?;
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn allocate_descriptor_sets_unchecked(
&self,
allocate_infos: impl IntoIterator<Item = DescriptorSetAllocateInfo>,
) -> Result<impl ExactSizeIterator<Item = DescriptorPoolAlloc>, VulkanError> {
let allocate_infos = allocate_infos.into_iter();
let (lower_size_bound, _) = allocate_infos.size_hint();
let mut layouts_vk = Vec::with_capacity(lower_size_bound);
let mut variable_descriptor_counts = Vec::with_capacity(lower_size_bound);
let mut layouts = Vec::with_capacity(lower_size_bound);
for info in allocate_infos {
let DescriptorSetAllocateInfo {
layout,
variable_descriptor_count,
_ne: _,
} = info;
layouts_vk.push(layout.handle());
variable_descriptor_counts.push(variable_descriptor_count);
layouts.push(layout);
}
let mut output = vec![];
if !layouts_vk.is_empty() {
let variable_desc_count_alloc_info = if (self.device.api_version() >= Version::V1_2
|| self.device.enabled_extensions().ext_descriptor_indexing)
&& variable_descriptor_counts.iter().any(|c| *c != 0)
{
Some(ash::vk::DescriptorSetVariableDescriptorCountAllocateInfo {
descriptor_set_count: layouts.len() as u32,
descriptor_set_count: layouts_vk.len() as u32,
p_descriptor_counts: variable_descriptor_counts.as_ptr(),
..Default::default()
})
@ -239,10 +265,10 @@ impl DescriptorPool {
None
};
let infos = ash::vk::DescriptorSetAllocateInfo {
let info_vk = ash::vk::DescriptorSetAllocateInfo {
descriptor_pool: self.handle,
descriptor_set_count: layouts.len() as u32,
p_set_layouts: layouts.as_ptr(),
descriptor_set_count: layouts_vk.len() as u32,
p_set_layouts: layouts_vk.as_ptr(),
p_next: if let Some(next) = variable_desc_count_alloc_info.as_ref() {
next as *const _ as *const _
} else {
@ -251,27 +277,41 @@ impl DescriptorPool {
..Default::default()
};
let mut output = Vec::with_capacity(layouts.len());
output.reserve(layouts_vk.len());
let fns = self.device.fns();
(fns.v1_0.allocate_descriptor_sets)(self.device.handle(), &infos, output.as_mut_ptr())
.result()
.map_err(VulkanError::from)
.map_err(|err| match err {
VulkanError::OutOfHostMemory
| VulkanError::OutOfDeviceMemory
| VulkanError::OutOfPoolMemory => err,
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after
// version 1.0 of Vulkan, any negative return value except out-of-memory errors
// must be considered as a fragmented pool error.
_ => VulkanError::FragmentedPool,
})?;
(fns.v1_0.allocate_descriptor_sets)(
self.device.handle(),
&info_vk,
output.as_mut_ptr(),
)
.result()
.map_err(VulkanError::from)
.map_err(|err| match err {
VulkanError::OutOfHostMemory
| VulkanError::OutOfDeviceMemory
| VulkanError::OutOfPoolMemory => err,
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after
// version 1.0 of Vulkan, any negative return value except out-of-memory errors
// must be considered as a fragmented pool error.
_ => VulkanError::FragmentedPool,
})?;
output.set_len(layouts.len());
output
};
output.set_len(layouts_vk.len());
}
Ok(output.into_iter().map(UnsafeDescriptorSet::new))
Ok(output
.into_iter()
.zip(layouts)
.zip(variable_descriptor_counts)
.map(
move |((handle, layout), variable_descriptor_count)| DescriptorPoolAlloc {
handle,
id: DescriptorPoolAlloc::next_id(),
layout: DeviceOwnedDebugWrapper(layout),
variable_descriptor_count,
},
))
}
/// Frees some descriptor sets.
@ -281,13 +321,39 @@ impl DescriptorPool {
///
/// # Safety
///
/// - The pool must have been created with `free_descriptor_set_bit` set to `true`.
/// - The descriptor sets must have been allocated from the pool.
/// - The descriptor sets must not be free'd twice.
/// - The descriptor sets must not be in use by the GPU.
/// - All elements of `descriptor_sets` must have been allocated from `self`,
/// and not freed previously.
/// - All elements of `descriptor_sets` must not be in use by the host or device.
#[inline]
pub unsafe fn free_descriptor_sets(
&self,
descriptor_sets: impl IntoIterator<Item = UnsafeDescriptorSet>,
) -> Result<(), Validated<VulkanError>> {
self.validate_free_descriptor_sets()?;
Ok(self.free_descriptor_sets_unchecked(descriptor_sets)?)
}
fn validate_free_descriptor_sets(&self) -> Result<(), Box<ValidationError>> {
if !self
.flags
.intersects(DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET)
{
return Err(Box::new(ValidationError {
context: "self.flags()".into(),
problem: "does not contain `DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET`".into(),
vuids: &["VUID-vkFreeDescriptorSets-descriptorPool-00312"],
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn free_descriptor_sets_unchecked(
&self,
descriptor_sets: impl IntoIterator<Item = UnsafeDescriptorSet>,
) -> Result<(), VulkanError> {
let sets: SmallVec<[_; 8]> = descriptor_sets.into_iter().map(|s| s.handle()).collect();
if !sets.is_empty() {
@ -308,6 +374,11 @@ impl DescriptorPool {
/// Resets the pool.
///
/// This destroys all descriptor sets and empties the pool.
///
/// # Safety
///
/// - All descriptor sets that were previously allocated from `self` must not be in use by the
/// host or device.
#[inline]
pub unsafe fn reset(&self) -> Result<(), VulkanError> {
let fns = self.device.fns();
@ -489,29 +560,126 @@ vulkan_bitflags! {
/* TODO: enable
// TODO: document
UPDATE_AFTER_BIND = UPDATE_AFTER_BIND {
api_version: V1_2,
device_extensions: [ext_descriptor_indexing],
}, */
UPDATE_AFTER_BIND = UPDATE_AFTER_BIND
RequiresOneOf([
RequiresAllOf([APIVersion(V1_2)]),
RequiresAllOf([DeviceExtension(ext_descriptor_indexing)]),
]), */
/* TODO: enable
// TODO: document
HOST_ONLY = HOST_ONLY_EXT {
device_extensions: [ext_mutable_descriptor_type, valve_mutable_descriptor_type],
}, */
HOST_ONLY = HOST_ONLY_EXT
RequiresOneOf([
RequiresAllOf([DeviceExtension(ext_mutable_descriptor_type)]),
RequiresAllOf([DeviceExtension(valve_mutable_descriptor_type)]),
]), */
}
/// Parameters to allocate a new `UnsafeDescriptorSet` from an `UnsafeDescriptorPool`.
#[derive(Clone, Debug)]
pub struct DescriptorSetAllocateInfo<'a> {
pub struct DescriptorSetAllocateInfo {
/// The descriptor set layout to create the set for.
pub layout: &'a DescriptorSetLayout,
///
/// There is no default value.
pub layout: Arc<DescriptorSetLayout>,
/// For layouts with a variable-count binding, the number of descriptors to allocate for that
/// binding. This should be 0 for layouts that don't have a variable-count binding.
///
/// The default value is 0.
pub variable_descriptor_count: u32,
pub _ne: crate::NonExhaustive,
}
impl DescriptorSetAllocateInfo {
/// Returns a `DescriptorSetAllocateInfo` with the specified `layout`.
#[inline]
pub fn new(layout: Arc<DescriptorSetLayout>) -> Self {
Self {
layout,
variable_descriptor_count: 0,
_ne: crate::NonExhaustive(()),
}
}
pub(crate) fn validate(&self, device: &Device) -> Result<(), Box<ValidationError>> {
let &Self {
ref layout,
variable_descriptor_count,
_ne,
} = self;
// VUID-VkDescriptorSetAllocateInfo-commonparent
assert_eq!(device, layout.device().as_ref());
if layout
.flags()
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR)
{
return Err(Box::new(ValidationError {
context: "layout.flags()".into(),
problem: "contains `DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR`".into(),
vuids: &["VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308"],
..Default::default()
}));
}
if variable_descriptor_count > layout.variable_descriptor_count() {
return Err(Box::new(ValidationError {
problem: "`variable_descriptor_count` is greater than
`layout.variable_descriptor_count()`"
.into(),
// vuids: https://github.com/KhronosGroup/Vulkan-Docs/issues/2244
..Default::default()
}));
}
Ok(())
}
}
/// Opaque type that represents a descriptor set allocated from a pool.
#[derive(Debug)]
pub struct DescriptorPoolAlloc {
handle: ash::vk::DescriptorSet,
id: NonZeroU64,
layout: DeviceOwnedDebugWrapper<Arc<DescriptorSetLayout>>,
variable_descriptor_count: u32,
}
impl DescriptorPoolAlloc {
/// Returns the descriptor set layout of the descriptor set.
#[inline]
pub fn layout(&self) -> &Arc<DescriptorSetLayout> {
&self.layout
}
/// Returns the variable descriptor count that this descriptor set was allocated with.
#[inline]
pub fn variable_descriptor_count(&self) -> u32 {
self.variable_descriptor_count
}
}
unsafe impl VulkanObject for DescriptorPoolAlloc {
type Handle = ash::vk::DescriptorSet;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
unsafe impl DeviceOwned for DescriptorPoolAlloc {
#[inline]
fn device(&self) -> &Arc<Device> {
self.layout.device()
}
}
impl_id_counter!(DescriptorPoolAlloc);
#[cfg(test)]
mod tests {
use super::{DescriptorPool, DescriptorPoolCreateInfo};
@ -601,10 +769,7 @@ mod tests {
.unwrap();
unsafe {
let sets = pool
.allocate_descriptor_sets([DescriptorSetAllocateInfo {
layout: set_layout.as_ref(),
variable_descriptor_count: 0,
}])
.allocate_descriptor_sets([DescriptorSetAllocateInfo::new(set_layout)])
.unwrap();
assert_eq!(sets.count(), 1);
}
@ -643,10 +808,7 @@ mod tests {
.unwrap();
unsafe {
let _ = pool.allocate_descriptor_sets([DescriptorSetAllocateInfo {
layout: set_layout.as_ref(),
variable_descriptor_count: 0,
}]);
let _ = pool.allocate_descriptor_sets([DescriptorSetAllocateInfo::new(set_layout)]);
}
});
}

View File

@ -9,56 +9,113 @@
//! Low-level descriptor set.
use super::CopyDescriptorSet;
use super::{
allocator::{DescriptorSetAlloc, DescriptorSetAllocator, StandardDescriptorSetAlloc},
CopyDescriptorSet,
};
use crate::{
descriptor_set::{
layout::DescriptorSetLayout,
update::{DescriptorWriteInfo, WriteDescriptorSet},
},
device::DeviceOwned,
macros::impl_id_counter,
VulkanObject,
device::{Device, DeviceOwned},
Validated, ValidationError, VulkanError, VulkanObject,
};
use smallvec::SmallVec;
use std::{fmt::Debug, num::NonZeroU64};
use std::{fmt::Debug, hash::Hash, sync::Arc};
/// Low-level descriptor set.
///
/// Contrary to most other objects in this library, this one doesn't free itself automatically and
/// doesn't hold the pool or the device it is associated to.
/// Instead it is an object meant to be used with the [`DescriptorPool`].
///
/// [`DescriptorPool`]: super::pool::DescriptorPool
/// This descriptor set does not keep track of synchronization,
/// nor does it store any information on what resources have been written to each descriptor.
#[derive(Debug)]
pub struct UnsafeDescriptorSet {
handle: ash::vk::DescriptorSet,
id: NonZeroU64,
pub struct UnsafeDescriptorSet<P = StandardDescriptorSetAlloc> {
alloc: P,
}
impl UnsafeDescriptorSet {
pub(crate) fn new(handle: ash::vk::DescriptorSet) -> Self {
Self {
handle,
id: Self::next_id(),
}
/// Allocates a new descriptor set and returns it.
#[inline]
pub fn new<A>(
allocator: &A,
layout: &Arc<DescriptorSetLayout>,
variable_descriptor_count: u32,
) -> Result<UnsafeDescriptorSet<A::Alloc>, Validated<VulkanError>>
where
A: DescriptorSetAllocator + ?Sized,
{
Ok(UnsafeDescriptorSet {
alloc: allocator.allocate(layout, variable_descriptor_count)?,
})
}
}
impl<P> UnsafeDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
/// Returns the allocation of this descriptor set.
#[inline]
pub fn alloc(&self) -> &P {
&self.alloc
}
/// Modifies a descriptor set. Doesn't check that the writes or copies are correct, and
/// doesn't check whether the descriptor set is in use.
/// Returns the layout of this descriptor set.
#[inline]
pub fn layout(&self) -> &Arc<DescriptorSetLayout> {
self.alloc.inner().layout()
}
/// Returns the variable descriptor count that this descriptor set was allocated with.
#[inline]
pub fn variable_descriptor_count(&self) -> u32 {
self.alloc.inner().variable_descriptor_count()
}
/// Updates the descriptor set with new values.
///
/// # Safety
///
/// - The `Device` must be the device the pool of this set was created with.
/// - Doesn't verify that the things you write in the descriptor set match its layout.
/// - Doesn't keep the resources alive. You have to do that yourself.
/// - Updating a descriptor set obeys synchronization rules that aren't checked here. Once a
/// command buffer contains a pointer/reference to a descriptor set, it is illegal to write
/// to it.
pub unsafe fn update<'a>(
/// - The resources in `descriptor_writes` and `descriptor_copies` must be kept alive for as
/// long as `self` is in use.
/// - The descriptor set must not be in use by the device,
/// or be recorded to a command buffer as part of a bind command.
#[inline]
pub unsafe fn update(
&mut self,
layout: &DescriptorSetLayout,
descriptor_writes: impl IntoIterator<Item = &'a WriteDescriptorSet>,
descriptor_copies: impl IntoIterator<Item = &'a CopyDescriptorSet>,
descriptor_writes: &[WriteDescriptorSet],
descriptor_copies: &[CopyDescriptorSet],
) -> Result<(), Box<ValidationError>> {
self.validate_update(descriptor_writes, descriptor_copies)?;
self.update_unchecked(descriptor_writes, descriptor_copies);
Ok(())
}
fn validate_update(
&self,
descriptor_writes: &[WriteDescriptorSet],
descriptor_copies: &[CopyDescriptorSet],
) -> Result<(), Box<ValidationError>> {
for (index, write) in descriptor_writes.iter().enumerate() {
write
.validate(self.layout(), self.variable_descriptor_count())
.map_err(|err| err.add_context(format!("descriptor_writes[{}]", index)))?;
}
for (index, copy) in descriptor_copies.iter().enumerate() {
copy.validate(self.layout(), self.variable_descriptor_count())
.map_err(|err| err.add_context(format!("descriptor_copies[{}]", index)))?;
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn update_unchecked(
&mut self,
descriptor_writes: &[WriteDescriptorSet],
descriptor_copies: &[CopyDescriptorSet],
) {
struct PerDescriptorWrite {
write_info: DescriptorWriteInfo,
@ -66,14 +123,12 @@ impl UnsafeDescriptorSet {
inline_uniform_block: ash::vk::WriteDescriptorSetInlineUniformBlock,
}
let writes_iter = descriptor_writes.into_iter();
let (lower_size_bound, _) = writes_iter.size_hint();
let mut writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
let mut per_writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
let mut writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_writes.len());
let mut per_writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_writes.len());
for write in writes_iter {
let layout_binding = &layout.bindings()[&write.binding()];
writes_vk.push(write.to_vulkan(self.handle, layout_binding.descriptor_type));
for write in descriptor_writes {
let layout_binding = &self.layout().bindings()[&write.binding()];
writes_vk.push(write.to_vulkan(self.handle(), layout_binding.descriptor_type));
per_writes_vk.push(PerDescriptorWrite {
write_info: write.to_vulkan_info(layout_binding.descriptor_type),
acceleration_structures: Default::default(),
@ -81,12 +136,6 @@ impl UnsafeDescriptorSet {
});
}
// It is forbidden to call `vkUpdateDescriptorSets` with 0 writes, so we need to perform
// this emptiness check.
if writes_vk.is_empty() {
return;
}
for (write_vk, per_write_vk) in writes_vk.iter_mut().zip(per_writes_vk.iter_mut()) {
match &mut per_write_vk.write_info {
DescriptorWriteInfo::Image(info) => {
@ -122,11 +171,9 @@ impl UnsafeDescriptorSet {
debug_assert!(write_vk.descriptor_count != 0);
}
let copies_iter = descriptor_copies.into_iter();
let (lower_size_bound, _) = copies_iter.size_hint();
let mut copies_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(lower_size_bound);
let mut copies_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_copies.len());
for copy in copies_iter {
for copy in descriptor_copies {
let &CopyDescriptorSet {
ref src_set,
src_binding,
@ -138,10 +185,10 @@ impl UnsafeDescriptorSet {
} = copy;
copies_vk.push(ash::vk::CopyDescriptorSet {
src_set: src_set.inner().handle(),
src_set: src_set.handle(),
src_binding,
src_array_element: src_first_array_element,
dst_set: self.handle,
dst_set: self.handle(),
dst_binding,
dst_array_element: dst_first_array_element,
descriptor_count,
@ -149,9 +196,9 @@ impl UnsafeDescriptorSet {
});
}
let fns = layout.device().fns();
let fns = self.device().fns();
(fns.v1_0.update_descriptor_sets)(
layout.device().handle(),
self.device().handle(),
writes_vk.len() as u32,
writes_vk.as_ptr(),
copies_vk.len() as u32,
@ -160,13 +207,46 @@ impl UnsafeDescriptorSet {
}
}
unsafe impl VulkanObject for UnsafeDescriptorSet {
unsafe impl<P> VulkanObject for UnsafeDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
type Handle = ash::vk::DescriptorSet;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
self.alloc.inner().handle()
}
}
impl_id_counter!(UnsafeDescriptorSet);
unsafe impl<P> DeviceOwned for UnsafeDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.alloc.inner().device()
}
}
impl<P> PartialEq for UnsafeDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.alloc.inner() == other.alloc.inner()
}
}
impl<P> Eq for UnsafeDescriptorSet<P> where P: DescriptorSetAlloc {}
impl<P> Hash for UnsafeDescriptorSet<P>
where
P: DescriptorSetAlloc,
{
#[inline]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.alloc.inner().hash(state);
}
}